repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tlnagy/seq-analysis | utils.py | 1 | 8432 | import pandas as pd
import numpy as np
import sklearn.neighbors
import os
import pickle
from multiprocessing import Pool, Manager, cpu_count
from Bio.Seq import Seq
import time, itertools
from timeit import default_timer as timer
import warnings
canonical_yeast_ubq = list(" QIFVKTLTGKTITLEVESSDTIDNVKSKIQDKEGIPPDQQRLIFAGKQLEDGRTLSDYNIQKESTLHLVLRLRGG ")
def load_mutant_map(hdf5_datastorepath, allele_pkl_path = None):
if allele_pkl_path is None:
allele_pkl_path = os.path.join(*[os.path.split(hdf5_datastorepath)[0], "allele_dic_with_WT.pkl"])
with open(allele_pkl_path, "rb") as f:
barcode_mutant_map = pd.DataFrame(pickle.load(f)).T.reset_index()
barcode_mutant_map.columns = ["barcodes", "positions", "codons"]
barcode_mutant_map["barcodes"] = barcode_mutant_map["barcodes"].apply(lambda x: str(Seq(x).reverse_complement()))
barcode_mutant_map["barcodes"] = barcode_mutant_map["barcodes"].astype(np.str)
barcode_mutant_map["WT"] = barcode_mutant_map["codons"] == "WT"
# add dummy value for WT barcodes
barcode_mutant_map["amino acids"] = "WT"
barcode_mutant_map.loc[~barcode_mutant_map["WT"], "codons"] = barcode_mutant_map.loc[~barcode_mutant_map["WT"], "codons"].apply(lambda x: str(Seq(x).transcribe()))
barcode_mutant_map.loc[~barcode_mutant_map["WT"], "amino acids"] = barcode_mutant_map.loc[~barcode_mutant_map["WT"], "codons"].apply(lambda x: str(Seq(x).translate()))
return barcode_mutant_map
def load_gen_times(df, experimental_info_csv_path):
"""
Loads generation times from the experimental info csv on the Pubs website and
adds it to the dataframe
:param df:
:param experimental_info_csv_path:
:return:
"""
primers = pd.read_csv(experimental_info_csv_path)
primers["exp"] = primers["Sample"].str.replace("Day", "d").str.replace(" T=", "t").str.split(" ").str.get(1)
primers["group"] = primers["Sample"].str.split(" ").str.get(0)
primers = primers[["group", "exp", "Generations"]]
primers = primers[~pd.isnull(primers["group"])].fillna(0).replace("-", 0)
primers[["days", "timepoints"]] = primers["exp"].str.replace(r"t", "_t").str.split("_", expand=True)
primers = primers.drop("exp", axis=1)
primers = primers.set_index(["group", "days", "timepoints"])
primers = primers.astype(np.float)
df = df.stack("timepoints").reset_index().set_index(["group", "days", "timepoints"]).join(primers)
return df.set_index(["barcodes", "codons", "amino acids", "positions"], append=True).unstack("timepoints")
btree = None
def nearest_neighbors(args):
x, q = args
dist, ind = btree.query(x, k=2)
q.put(0)
return dist, ind
def parallel_hamming(arr, func):
start = timer()
num_cpus = cpu_count()
print("Using {} cpus to compute hamming distances for {} items...\n\nPercent complete: ".format(num_cpus, len(arr)), end="")
with Pool(num_cpus) as pool:
m = Manager()
q = m.Queue()
result = pool.map_async(func, [(arr[i:i+1000], q) for i in range(0, len(arr), 1000)])
for i, elem in enumerate(itertools.cycle('\|/-')):
if result.ready():
break
size = q.qsize()
print("Percent complete: {:.0%} {}".format(size/(len(arr)/1000), elem), end="\r")
time.sleep(0.25)
print("Percent complete: {:.0%}".format(1))
return (np.concatenate(data) for data in zip(*result.get()))
def hamming_correct(raw_barcode_data, mapped_barcode_data, barcode_mutant_map, max_distance=3, barcode_length=18):
"""
High performance mapping of unmapped barcodes onto barcode library. Only unambiguous and small errors will be corrected.
:param raw_barcode_data:
:param mapped_barcode_data:
:param barcode_mutant_map:
:param max_distance: The maximum Hamming distance allowed (inclusive)
:return: new_mapped_barcode_data
"""
unmapped_raw_barcode_data = raw_barcode_data[~raw_barcode_data["barcodes"].isin(barcode_mutant_map["barcodes"])]
print("Recovering {} unmapped barcodes".format(len(unmapped_raw_barcode_data)))
print("Recovering {} counts".format(unmapped_raw_barcode_data["counts"].sum()))
unmapped_barcodes = unmapped_raw_barcode_data["barcodes"].unique().astype("str")
unmapped_as_int = unmapped_barcodes.view('S4').reshape((unmapped_barcodes.size, -1)).view(np.uint32)
barcode_lib = barcode_mutant_map["barcodes"].unique().astype("str")
barcode_lib_as_int = barcode_lib.view('S4').reshape((barcode_lib.size, -1)).view(np.uint32)
global btree
btree = sklearn.neighbors.BallTree(barcode_lib_as_int, leaf_size=40, metric="hamming")
print("Mapping {} unique unmapped barcodes onto {} library barcodes".format(len(unmapped_barcodes), len(barcode_lib)), flush=True)
dist, ind = parallel_hamming(unmapped_as_int, nearest_neighbors)
dist_from_muts = dist*barcode_length
# find only lib barcodes that are nonambiguous and only < max_dist steps away from the
# unmapped barcode
dist_upper_bound = max_distance + 1 # better performance for doing < rather than <=
mask = (np.diff(dist_from_muts).flatten() >= 1) & (dist_from_muts[:, 0] < dist_upper_bound)
output = ind[mask]
# preserve index position so we know which unmapped barcode each value corresponds to
og_idx = np.arange(len(ind))
corrected = np.vstack([unmapped_barcodes[og_idx[mask]], barcode_lib[output][:, 0]]).T
print("{} barcodes will be remapped".format(len(corrected)))
# mapping of the erroneous barcode to its closest neighbor in the barcode lib
new_mapping = unmapped_raw_barcode_data.merge(pd.DataFrame(corrected, columns=["barcodes", "corrected_bc"]), on="barcodes")
new_mapping.rename(columns={new_mapping.columns[-1]:"barcodes", new_mapping.columns[-3]:"old_barcodes"}, inplace=True)
new_mapping.set_index(["group", "days", "timepoints", "barcodes"], inplace=True)
new_mapping.sort_index(inplace=True)
# barcodes used to be unique prior to the correction so now group them together because
# they represent the same barcode
grouped_new_mapping = new_mapping.groupby(level=[i for i in range(len(new_mapping.index.levels))]).sum()
mapped_barcode_data_reindexed = mapped_barcode_data.set_index(["group", "days", "timepoints", "barcodes"]).sort_index()
mapped_barcode_data_reindexed["new_counts"] = mapped_barcode_data_reindexed.loc[:, "counts"].add(grouped_new_mapping["counts"], fill_value=0)
# len(np.unique(test3.index.get_level_values("barcodes")))
new_unmapped = grouped_new_mapping[~grouped_new_mapping.index.isin(mapped_barcode_data_reindexed.index)].loc[:, "counts"]
new_raw_barcode_data = pd.concat([mapped_barcode_data_reindexed["new_counts"], new_unmapped])
new_raw_barcode_data = new_raw_barcode_data.reset_index(name="counts")
new_mapped_barcode_data = new_raw_barcode_data.merge(barcode_mutant_map, on="barcodes")
return new_mapped_barcode_data
def subtract_control(df, merge_on=["amino acids", "positions"]):
"""
Subtracts control data from the perturbation data
:param df: either aa_weighted or codons_weighted
:param merge_on: the columns to merge on either AA-pos or codons-pos
:return: a dataframe with a difference column
Warning: this function is deprecated. Please see calc_case_control_diff in comparison_and_significance.py instead.
"""
warnings.warn("deprecated", DeprecationWarning)
s1 = df.loc[df.index.get_level_values("group") != "Control", "weighted mean slope"]
s2 = df.loc["Control", "weighted mean slope"]
merged = pd.merge(s1.reset_index(), s2.reset_index(), on=merge_on)
merged["diff"] = merged["weighted mean slope_x"] - merged["weighted mean slope_y"]
merged.drop("days_y", axis=1, inplace=True)
merged.rename(columns={'days_x':'days'}, inplace=True)
merged = merged.set_index(["group", "days", "amino acids", "positions"]).sort_index()
merged.drop("WT", level="amino acids", inplace=True)
merged.drop(["weighted mean slope_x", "weighted mean slope_y"], axis=1, inplace=True)
return merged
def set_column_sequence(dataframe, seq):
'''Takes a dataframe and a subsequence of its columns, returns dataframe with seq as first columns'''
cols = seq[:] # copy so we don't mutate seq
for x in dataframe.columns:
if x not in cols:
cols.append(x)
return dataframe[cols]
| mpl-2.0 |
NunoEdgarGub1/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtkagg.py | 70 | 4184 | """
Render to gtk from agg
"""
from __future__ import division
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, NavigationToolbar, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print 'backend_gtkagg.new_figure_manager'
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTKAgg(thisFig)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print 'backend_gtkagg.new_figure_manager done'
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print 'FigureCanvasGTKAgg.configure_event'
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print 'FigureCanvasGTKAgg.configure_event end'
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print 'FigureCanvasGTKAgg.render_figure'
FigureCanvasAgg.draw(self)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure pixmap', pixmap
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba(0,0)
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print 'FigureCanvasGTKAgg.render_figure done'
def blit(self, bbox=None):
if DEBUG: print 'FigureCanvasGTKAgg.blit'
if DEBUG: print 'FigureCanvasGTKAgg.blit', self._pixmap
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print 'FigureCanvasGTKAgg.done'
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
| gpl-3.0 |
DonBeo/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
macks22/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
jayflo/scikit-learn | sklearn/metrics/cluster/supervised.py | 207 | 27395 | """Utilities to evaluate the clustering performance of models
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <[email protected]>
# Wei LI <[email protected]>
# Diego Molla <[email protected]>
# License: BSD 3 clause
from math import log
from scipy.misc import comb
from scipy.sparse import coo_matrix
import numpy as np
from .expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import bincount
def comb2(n):
# the exact version is faster for k == 2: use it by default globally in
# this module instead of the float approximate variant
return comb(n, 2, exact=1)
def check_clusterings(labels_true, labels_pred):
"""Check that the two clusterings matching 1D integer arrays"""
labels_true = np.asarray(labels_true)
labels_pred = np.asarray(labels_pred)
# input checks
if labels_true.ndim != 1:
raise ValueError(
"labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError(
"labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
if labels_true.shape != labels_pred.shape:
raise ValueError(
"labels_true and labels_pred must have same size, got %d and %d"
% (labels_true.shape[0], labels_pred.shape[0]))
return labels_true, labels_pred
def contingency_matrix(labels_true, labels_pred, eps=None):
"""Build a contengency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
eps: None or float
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
Returns
-------
contingency: array, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = coo_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int).toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
ari : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly maching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not always pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1]) # doctest: +ELLIPSIS
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2]) # doctest: +ELLIPSIS
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] `L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985`
http://www.springerlink.com/content/x64124718341j1j0/
.. [wk] http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See also
--------
adjusted_mutual_info_score: Adjusted Mutual Information
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
or classes.shape[0] == clusters.shape[0] == len(labels_true)):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
# Compute the ARI using the contingency data
sum_comb_c = sum(comb2(n_c) for n_c in contingency.sum(axis=1))
sum_comb_k = sum(comb2(n_k) for n_k in contingency.sum(axis=0))
sum_comb = sum(comb2(n_ij) for n_ij in contingency.flatten())
prod_comb = (sum_comb_c * sum_comb_k) / float(comb(n_samples, 2))
mean_comb = (sum_comb_k + sum_comb_c) / 2.
return ((sum_comb - prod_comb) / (mean_comb - prod_comb))
def homogeneity_completeness_v_measure(labels_true, labels_pred):
"""Compute the homogeneity and completeness and V-Measure scores at once
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure: float
harmonic mean of the first two
See also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
MI = mutual_info_score(labels_true, labels_pred)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (2.0 * homogeneity * completeness
/ (homogeneity + completeness))
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
homogeneity: float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
1.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
1.0...
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
... # doctest: +ELLIPSIS
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
completeness: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
1.0
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score`.
The V-measure is the harmonic mean between homogeneity and completeness::
v = 2 * (homogeneity * completeness) / (homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
cluster labels to evaluate
Returns
-------
v_measure: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<http://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See also
--------
homogeneity_score
completeness_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
... # doctest: +ELLIPSIS
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
... # doctest: +ELLIPSIS
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
... # doctest: +ELLIPSIS
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
... # doctest: +ELLIPSIS
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[2]
def mutual_info_score(labels_true, labels_pred, contingency=None):
"""Mutual Information between two clusterings
The Mutual Information is a measure of the similarity between two labels of
the same data. Where :math:`P(i)` is the probability of a random sample
occurring in cluster :math:`U_i` and :math:`P'(j)` is the probability of a
random sample occurring in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\sum_{i=1}^R \sum_{j=1}^C P(i,j)\log\\frac{P(i,j)}{P(i)P'(j)}
This is equal to the Kullback-Leibler divergence of the joint distribution
with the product distribution of the marginals.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
contingency: None or array, shape = [n_classes_true, n_classes_pred]
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi: float
Mutual information, a non-negative value
See also
--------
adjusted_mutual_info_score: Adjusted against chance Mutual Information
normalized_mutual_info_score: Normalized Mutual Information
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer)
return mi.sum()
def adjusted_mutual_info_score(labels_true, labels_pred):
"""Adjusted Mutual Information between two clusterings
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
ami: float(upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative.
See also
--------
adjusted_rand_score: Adjusted Rand Index
mutual_information_score: Mutual Information (not adjusted for chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<http://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
ami = (mi - emi) / (max(h_true, h_pred) - emi)
return ami
def normalized_mutual_info_score(labels_true, labels_pred):
"""Normalized Mutual Information between two clusterings
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
This measure is not adjusted for chance. Therefore
:func:`adjusted_mustual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : array, shape = [n_samples]
A clustering of the data into disjoint subsets.
Returns
-------
nmi: float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
See also
--------
adjusted_rand_score: Adjusted Rand Index
adjusted_mutual_info_score: Adjusted Mutual Information (adjusted
against chance)
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred)
contingency = np.array(contingency, dtype='float')
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred,
contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
nmi = mi / max(np.sqrt(h_true * h_pred), 1e-10)
return nmi
def entropy(labels):
"""Calculates the entropy for a labeling."""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/ensemble/plot_bias_variance.py | 1 | 8132 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
# plt.show()
pltshow(plt)
| mit |
godfreyhe/flink | flink-python/pyflink/fn_execution/coders.py | 6 | 21474 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
from abc import ABC, abstractmethod
import pyarrow as pa
import pytz
from pyflink.fn_execution import flink_fn_execution_pb2
from pyflink.table.types import TinyIntType, SmallIntType, IntType, BigIntType, BooleanType, \
FloatType, DoubleType, VarCharType, VarBinaryType, DecimalType, DateType, TimeType, \
LocalZonedTimestampType, RowType, RowField, to_arrow_type, TimestampType, ArrayType
try:
from pyflink.fn_execution import coder_impl_fast as coder_impl
except:
from pyflink.fn_execution import coder_impl_slow as coder_impl
__all__ = ['FlattenRowCoder', 'RowCoder', 'BigIntCoder', 'TinyIntCoder', 'BooleanCoder',
'SmallIntCoder', 'IntCoder', 'FloatCoder', 'DoubleCoder', 'BinaryCoder', 'CharCoder',
'DateCoder', 'TimeCoder', 'TimestampCoder', 'LocalZonedTimestampCoder',
'GenericArrayCoder', 'PrimitiveArrayCoder', 'MapCoder', 'DecimalCoder',
'BigDecimalCoder', 'TupleCoder', 'TimeWindowCoder', 'CountWindowCoder']
# LengthPrefixBaseCoder will be used in Operations and other coders will be the field coder
# of LengthPrefixBaseCoder
class LengthPrefixBaseCoder(ABC):
def __init__(self, field_coder: 'FieldCoder'):
self._field_coder = field_coder
@abstractmethod
def get_impl(self):
pass
@classmethod
def from_coder_param_proto(cls, coder_param_proto):
field_coder = cls._to_field_coder(coder_param_proto)
output_mode = coder_param_proto.output_mode
if output_mode == flink_fn_execution_pb2.CoderParam.SINGLE:
return ValueCoder(field_coder)
else:
return IterableCoder(field_coder, output_mode)
@classmethod
def _to_field_coder(cls, coder_param_proto):
data_type = coder_param_proto.data_type
if data_type == flink_fn_execution_pb2.CoderParam.FLATTEN_ROW:
if coder_param_proto.HasField('schema'):
schema_proto = coder_param_proto.schema
field_coders = [from_proto(f.type) for f in schema_proto.fields]
else:
type_info_proto = coder_param_proto.type_info
field_coders = [from_type_info_proto(f.field_type)
for f in type_info_proto.row_type_info.fields]
return FlattenRowCoder(field_coders)
elif data_type == flink_fn_execution_pb2.CoderParam.ROW:
schema_proto = coder_param_proto.schema
field_coders = [from_proto(f.type) for f in schema_proto.fields]
field_names = [f.name for f in schema_proto.fields]
return RowCoder(field_coders, field_names)
elif data_type == flink_fn_execution_pb2.CoderParam.RAW:
type_info_proto = coder_param_proto.type_info
field_coder = from_type_info_proto(type_info_proto)
return field_coder
elif data_type == flink_fn_execution_pb2.CoderParam.ARROW:
timezone = pytz.timezone(os.environ['table.exec.timezone'])
schema_proto = coder_param_proto.schema
row_type = cls._to_row_type(schema_proto)
return ArrowCoder(cls._to_arrow_schema(row_type), row_type, timezone)
elif data_type == flink_fn_execution_pb2.CoderParam.OVER_WINDOW_ARROW:
timezone = pytz.timezone(os.environ['table.exec.timezone'])
schema_proto = coder_param_proto.schema
row_type = cls._to_row_type(schema_proto)
return OverWindowArrowCoder(
cls._to_arrow_schema(row_type), row_type, timezone)
else:
raise ValueError("Unexpected coder type %s" % data_type)
@classmethod
def _to_arrow_schema(cls, row_type):
return pa.schema([pa.field(n, to_arrow_type(t), t._nullable)
for n, t in zip(row_type.field_names(), row_type.field_types())])
@classmethod
def _to_data_type(cls, field_type):
if field_type.type_name == flink_fn_execution_pb2.Schema.TINYINT:
return TinyIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.SMALLINT:
return SmallIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.INT:
return IntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BIGINT:
return BigIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BOOLEAN:
return BooleanType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.FLOAT:
return FloatType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DOUBLE:
return DoubleType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.VARCHAR:
return VarCharType(0x7fffffff, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.VARBINARY:
return VarBinaryType(0x7fffffff, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DECIMAL:
return DecimalType(field_type.decimal_info.precision,
field_type.decimal_info.scale,
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DATE:
return DateType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TIME:
return TimeType(field_type.time_info.precision, field_type.nullable)
elif field_type.type_name == \
flink_fn_execution_pb2.Schema.LOCAL_ZONED_TIMESTAMP:
return LocalZonedTimestampType(field_type.local_zoned_timestamp_info.precision,
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TIMESTAMP:
return TimestampType(field_type.timestamp_info.precision, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BASIC_ARRAY:
return ArrayType(cls._to_data_type(field_type.collection_element_type),
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TypeName.ROW:
return RowType(
[RowField(f.name, cls._to_data_type(f.type), f.description)
for f in field_type.row_schema.fields], field_type.nullable)
else:
raise ValueError("field_type %s is not supported." % field_type)
@classmethod
def _to_row_type(cls, row_schema):
return RowType([RowField(f.name, cls._to_data_type(f.type)) for f in row_schema.fields])
class FieldCoder(ABC):
def get_impl(self) -> coder_impl.FieldCoderImpl:
pass
class IterableCoder(LengthPrefixBaseCoder):
"""
Coder for iterable data.
"""
def __init__(self, field_coder: FieldCoder, output_mode):
super(IterableCoder, self).__init__(field_coder)
self._output_mode = output_mode
def get_impl(self):
return coder_impl.IterableCoderImpl(self._field_coder.get_impl(), self._output_mode)
class ValueCoder(LengthPrefixBaseCoder):
"""
Coder for single data.
"""
def __init__(self, field_coder: FieldCoder):
super(ValueCoder, self).__init__(field_coder)
def get_impl(self):
if isinstance(self._field_coder, (ArrowCoder, OverWindowArrowCoder)):
# ArrowCoder and OverWindowArrowCoder doesn't support fast coder currently.
from pyflink.fn_execution import coder_impl_slow
return coder_impl_slow.ValueCoderImpl(self._field_coder.get_impl())
else:
return coder_impl.ValueCoderImpl(self._field_coder.get_impl())
class FlattenRowCoder(FieldCoder):
"""
Coder for Row. The decoded result will be flattened as a list of column values of a row instead
of a row object.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
def get_impl(self):
return coder_impl.FlattenRowCoderImpl([c.get_impl() for c in self._field_coders])
def __repr__(self):
return 'FlattenRowCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
def __eq__(self, other: 'FlattenRowCoder'):
return (self.__class__ == other.__class__
and len(self._field_coders) == len(other._field_coders)
and [self._field_coders[i] == other._field_coders[i] for i in
range(len(self._field_coders))])
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._field_coders)
class ArrowCoder(FieldCoder):
"""
Coder for Arrow.
"""
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._row_type = row_type
self._timezone = timezone
def get_impl(self):
# ArrowCoder doesn't support fast coder implementation currently.
from pyflink.fn_execution import coder_impl_slow
return coder_impl_slow.ArrowCoderImpl(self._schema, self._row_type, self._timezone)
def __repr__(self):
return 'ArrowCoder[%s]' % self._schema
class OverWindowArrowCoder(FieldCoder):
"""
Coder for batch pandas over window aggregation.
"""
def __init__(self, schema, row_type, timezone):
self._arrow_coder = ArrowCoder(schema, row_type, timezone)
def get_impl(self):
# OverWindowArrowCoder doesn't support fast coder implementation currently.
from pyflink.fn_execution import coder_impl_slow
return coder_impl_slow.OverWindowArrowCoderImpl(self._arrow_coder.get_impl())
def __repr__(self):
return 'OverWindowArrowCoder[%s]' % self._arrow_coder
class RowCoder(FieldCoder):
"""
Coder for Row.
"""
def __init__(self, field_coders, field_names):
self._field_coders = field_coders
self._field_names = field_names
def get_impl(self):
return coder_impl.RowCoderImpl([c.get_impl() for c in self._field_coders],
self._field_names)
def __repr__(self):
return 'RowCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
def __eq__(self, other: 'RowCoder'):
return (self.__class__ == other.__class__
and self._field_names == other._field_names
and [self._field_coders[i] == other._field_coders[i] for i in
range(len(self._field_coders))])
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._field_coders)
class CollectionCoder(FieldCoder):
"""
Base coder for collection.
"""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def __eq__(self, other: 'CollectionCoder'):
return (self.__class__ == other.__class__
and self._elem_coder == other._elem_coder)
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, repr(self._elem_coder))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._elem_coder)
class GenericArrayCoder(CollectionCoder):
"""
Coder for generic array such as basic array or object array.
"""
def __init__(self, elem_coder):
super(GenericArrayCoder, self).__init__(elem_coder)
def get_impl(self):
return coder_impl.GenericArrayCoderImpl(self._elem_coder.get_impl())
class PrimitiveArrayCoder(CollectionCoder):
"""
Coder for Primitive Array.
"""
def __init__(self, elem_coder):
super(PrimitiveArrayCoder, self).__init__(elem_coder)
def get_impl(self):
return coder_impl.PrimitiveArrayCoderImpl(self._elem_coder.get_impl())
class MapCoder(FieldCoder):
"""
Coder for Map.
"""
def __init__(self, key_coder, value_coder):
self._key_coder = key_coder
self._value_coder = value_coder
def get_impl(self):
return coder_impl.MapCoderImpl(self._key_coder.get_impl(), self._value_coder.get_impl())
def is_deterministic(self):
return self._key_coder.is_deterministic() and self._value_coder.is_deterministic()
def __repr__(self):
return 'MapCoder[%s]' % ','.join([repr(self._key_coder), repr(self._value_coder)])
def __eq__(self, other: 'MapCoder'):
return (self.__class__ == other.__class__
and self._key_coder == other._key_coder
and self._value_coder == other._value_coder)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash([self._key_coder, self._value_coder])
class BigIntCoder(FieldCoder):
"""
Coder for 8 bytes long.
"""
def get_impl(self):
return coder_impl.BigIntCoderImpl()
class TinyIntCoder(FieldCoder):
"""
Coder for Byte.
"""
def get_impl(self):
return coder_impl.TinyIntCoderImpl()
class BooleanCoder(FieldCoder):
"""
Coder for Boolean.
"""
def get_impl(self):
return coder_impl.BooleanCoderImpl()
class SmallIntCoder(FieldCoder):
"""
Coder for Short.
"""
def get_impl(self):
return coder_impl.SmallIntCoderImpl()
class IntCoder(FieldCoder):
"""
Coder for 4 bytes int.
"""
def get_impl(self):
return coder_impl.IntCoderImpl()
class FloatCoder(FieldCoder):
"""
Coder for Float.
"""
def get_impl(self):
return coder_impl.FloatCoderImpl()
class DoubleCoder(FieldCoder):
"""
Coder for Double.
"""
def get_impl(self):
return coder_impl.DoubleCoderImpl()
class DecimalCoder(FieldCoder):
"""
Coder for Decimal.
"""
def __init__(self, precision, scale):
self.precision = precision
self.scale = scale
def get_impl(self):
return coder_impl.DecimalCoderImpl(self.precision, self.scale)
class BigDecimalCoder(FieldCoder):
"""
Coder for Basic Decimal that no need to have precision and scale specified.
"""
def get_impl(self):
return coder_impl.BigDecimalCoderImpl()
class BinaryCoder(FieldCoder):
"""
Coder for Byte Array.
"""
def get_impl(self):
return coder_impl.BinaryCoderImpl()
class CharCoder(FieldCoder):
"""
Coder for Character String.
"""
def get_impl(self):
return coder_impl.CharCoderImpl()
class DateCoder(FieldCoder):
"""
Coder for Date
"""
def get_impl(self):
return coder_impl.DateCoderImpl()
class TimeCoder(FieldCoder):
"""
Coder for Time.
"""
def get_impl(self):
return coder_impl.TimeCoderImpl()
class TimestampCoder(FieldCoder):
"""
Coder for Timestamp.
"""
def __init__(self, precision):
self.precision = precision
def get_impl(self):
return coder_impl.TimestampCoderImpl(self.precision)
class LocalZonedTimestampCoder(FieldCoder):
"""
Coder for LocalZonedTimestamp.
"""
def __init__(self, precision, timezone):
self.precision = precision
self.timezone = timezone
def get_impl(self):
return coder_impl.LocalZonedTimestampCoderImpl(self.precision, self.timezone)
class PickledBytesCoder(FieldCoder):
def get_impl(self):
return coder_impl.PickledBytesCoderImpl()
class TupleCoder(FieldCoder):
"""
Coder for Tuple.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
def get_impl(self):
return coder_impl.TupleCoderImpl([c.get_impl() for c in self._field_coders])
def __repr__(self):
return 'TupleCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
class TimeWindowCoder(FieldCoder):
"""
Coder for TimeWindow.
"""
def get_impl(self):
return coder_impl.TimeWindowCoderImpl()
class CountWindowCoder(FieldCoder):
"""
Coder for CountWindow.
"""
def get_impl(self):
return coder_impl.CountWindowCoderImpl()
type_name = flink_fn_execution_pb2.Schema
_type_name_mappings = {
type_name.TINYINT: TinyIntCoder(),
type_name.SMALLINT: SmallIntCoder(),
type_name.INT: IntCoder(),
type_name.BIGINT: BigIntCoder(),
type_name.BOOLEAN: BooleanCoder(),
type_name.FLOAT: FloatCoder(),
type_name.DOUBLE: DoubleCoder(),
type_name.BINARY: BinaryCoder(),
type_name.VARBINARY: BinaryCoder(),
type_name.CHAR: CharCoder(),
type_name.VARCHAR: CharCoder(),
type_name.DATE: DateCoder(),
type_name.TIME: TimeCoder(),
}
def from_proto(field_type):
"""
Creates the corresponding :class:`Coder` given the protocol representation of the field type.
:param field_type: the protocol representation of the field type
:return: :class:`Coder`
"""
field_type_name = field_type.type_name
coder = _type_name_mappings.get(field_type_name)
if coder is not None:
return coder
if field_type_name == type_name.ROW:
return RowCoder([from_proto(f.type) for f in field_type.row_schema.fields],
[f.name for f in field_type.row_schema.fields])
if field_type_name == type_name.TIMESTAMP:
return TimestampCoder(field_type.timestamp_info.precision)
if field_type_name == type_name.LOCAL_ZONED_TIMESTAMP:
timezone = pytz.timezone(os.environ['table.exec.timezone'])
return LocalZonedTimestampCoder(field_type.local_zoned_timestamp_info.precision, timezone)
elif field_type_name == type_name.BASIC_ARRAY:
return GenericArrayCoder(from_proto(field_type.collection_element_type))
elif field_type_name == type_name.MAP:
return MapCoder(from_proto(field_type.map_info.key_type),
from_proto(field_type.map_info.value_type))
elif field_type_name == type_name.DECIMAL:
return DecimalCoder(field_type.decimal_info.precision,
field_type.decimal_info.scale)
else:
raise ValueError("field_type %s is not supported." % field_type)
# for data stream type information.
type_info_name = flink_fn_execution_pb2.TypeInfo
_type_info_name_mappings = {
type_info_name.STRING: CharCoder(),
type_info_name.BYTE: TinyIntCoder(),
type_info_name.BOOLEAN: BooleanCoder(),
type_info_name.SHORT: SmallIntCoder(),
type_info_name.INT: IntCoder(),
type_info_name.LONG: BigIntCoder(),
type_info_name.FLOAT: FloatCoder(),
type_info_name.DOUBLE: DoubleCoder(),
type_info_name.CHAR: CharCoder(),
type_info_name.BIG_INT: BigIntCoder(),
type_info_name.BIG_DEC: BigDecimalCoder(),
type_info_name.SQL_DATE: DateCoder(),
type_info_name.SQL_TIME: TimeCoder(),
type_info_name.SQL_TIMESTAMP: TimestampCoder(3),
type_info_name.PICKLED_BYTES: PickledBytesCoder()
}
def from_type_info_proto(type_info):
field_type_name = type_info.type_name
try:
return _type_info_name_mappings[field_type_name]
except KeyError:
if field_type_name == type_info_name.ROW:
return RowCoder(
[from_type_info_proto(f.field_type) for f in type_info.row_type_info.fields],
[f.field_name for f in type_info.row_type_info.fields])
elif field_type_name == type_info_name.PRIMITIVE_ARRAY:
if type_info.collection_element_type.type_name == type_info_name.BYTE:
return BinaryCoder()
return PrimitiveArrayCoder(from_type_info_proto(type_info.collection_element_type))
elif field_type_name in (type_info_name.BASIC_ARRAY,
type_info_name.OBJECT_ARRAY,
type_info_name.LIST):
return GenericArrayCoder(from_type_info_proto(type_info.collection_element_type))
elif field_type_name == type_info_name.TUPLE:
return TupleCoder([from_type_info_proto(field_type)
for field_type in type_info.tuple_type_info.field_types])
elif field_type_name == type_info_name.MAP:
return MapCoder(from_type_info_proto(type_info.map_type_info.key_type),
from_type_info_proto(type_info.map_type_info.value_type))
else:
raise ValueError("Unsupported type_info %s." % type_info)
| apache-2.0 |
CGATOxford/proj029 | Proj029Pipelines/pipeline_proj029_replication.py | 1 | 13769 | """
=======================================
Compare original and new RNA-seq data
at Day14 vs. Day0
=======================================
:Author: Nick Ilott
:Release: $Id$
:Date: |today|
:Tags: Python
"""
# load modules
from ruffus import *
import CGAT.Experiment as E
import logging as L
import CGAT.Database as Database
import CGAT.CSV as CSV
import sys
import os
import re
import shutil
import itertools
import math
import glob
import time
import gzip
import collections
import random
import numpy as np
import sqlite3
import CGAT.GTF as GTF
import CGAT.IOTools as IOTools
import CGAT.IndexedFasta as IndexedFasta
from rpy2.robjects import r as R
import rpy2.robjects as ro
import rpy2.robjects.vectors as rovectors
from rpy2.rinterface import RRuntimeError
import CGATPipelines.PipelineMetagenomeCommunities as PipelineMetagenomeCommunities
import pandas
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
# load options from the config file
import CGATPipelines.Pipeline as P
P.getParameters(
["pipeline.ini"])
PARAMS = P.PARAMS
###################################################################
# connecting to database
###################################################################
def connect():
'''connect to database.
This method also attaches to helper databases.
'''
dbh = sqlite3.connect(PARAMS["database"])
return dbh
###################################################
###################################################
###################################################
@follows(mkdir("pca.dir"))
@jobs_limit(1, "R")
@transform([os.path.join(PARAMS.get("communitiesdir"), "genes.dir/gene_counts.norm.matrix"),
os.path.join(PARAMS.get("communitiesdir"), "counts.dir/genus.diamond.aggregated.counts.norm.matrix")],
regex("(\S+)/(\S+).matrix"),
r"pca.dir/\2.loadings.tsv")
def buildPCALoadings(infile, outfile):
'''
run PCA and heatmap the loadings
'''
outname_plot = P.snip(outfile, ".loadings.tsv") + ".pca.pdf"
R('''dat <- read.csv("%s", header = T, stringsAsFactors = F, sep = "\t")''' % infile)
# just get day14 and day0
R('''remove <- c("day3", "day6", "day28")''')
R('''for (day in remove){; dat <- dat[, grep(day, colnames(dat), invert=T)]}''')
R('''rownames(dat) <- dat$taxa''')
R('''dat <- dat[, 1:ncol(dat)-1]''')
R('''pc <- prcomp(t(dat))''')
R('''conds <- unlist(strsplit(colnames(dat), ".R[0-9]"))[seq(1, ncol(dat)*2, 2)]''')
R('''conds <- unlist(strsplit(conds, ".", fixed = T))[seq(2, length(conds)*2, 2)]''')
# plot the principle components
R('''library(ggplot2)''')
R('''pcs <- data.frame(pc$x)''')
R('''pcs$cond <- conds''')
# get variance explained
R('''imps <- c(summary(pc)$importance[2], summary(pc)$importance[5])''')
R('''p <- ggplot(pcs, aes(x = PC1, y = PC2, colour = cond, size = 3)) + geom_point()''')
R('''p2 <- p + xlab(imps[1]) + ylab(imps[2])''')
R('''p3 <- p2 + scale_colour_manual(values = c("slateGrey", "red"))''')
R('''ggsave("%s")''' % outname_plot)
# get the loadings
R('''loads <- data.frame(pc$rotation)''')
R('''loads$taxa <- rownames(loads)''')
# write out data
R('''write.table(loads, file = "%s", sep = "\t", row.names = F, quote = F)''' % outfile.replace("/", "/%s_" % suffix))
P.touch(outfile)
#########################################
#########################################
#########################################
@transform([os.path.join(PARAMS.get("communitiesdir"), "genes.dir/gene_counts.norm.matrix"),
os.path.join(PARAMS.get("communitiesdir"), "counts.dir/genus.diamond.aggregated.counts.norm.matrix")],
regex("(\S+)/(\S+).matrix"),
r"pca.dir/\2.sig")
def testDistSignificance(infile, outfile):
'''
test whether the colitic samples
cluster significantly
'''
PipelineMetagenomeCommunities.testDistSignificance(infile,
outfile)
#########################################
#########################################
#########################################
@follows(mkdir("correlation.dir"))
@merge(["original_gene_counts.diff.tsv",
"replication_gene_counts.diff.tsv"],
"correlation.dir/gene_abundance_scatter.png")
def scatterplotAbundanceEstimates(infiles, outfile):
'''
scatterplot abundance estimates for NOGs
'''
R('''dat.orig <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infiles[0])
R('''dat.orig <- dat.orig[dat.orig$group2 == "WT" & dat.orig$group1 == "HhaIL10R",]''')
R('''dat.rep <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infiles[1])
R('''dat.rep <- dat.rep[dat.rep$group1 == "day0" & dat.rep$group2 == "day14",]''')
R('''rownames(dat.orig) <- dat.orig$taxa''')
R('''dat.orig <- dat.orig[dat.rep$taxa,]''')
R('''png("%s")''' % outfile)
R('''plot(dat.orig$AveExpr, dat.rep$AveExpr, pch=16, col="slateGrey")''')
R('''abline(0,1)''')
R('''text(x=3, y=15, labels=c(paste("r =", round(cor(dat.orig$AveExpr, dat.rep$AveExpr),2), sep=" ")))''')
R["dev.off"]()
#########################################
#########################################
#########################################
@follows(mkdir("correlation.dir"))
@merge(["original_gene_counts.diff.tsv",
"replication_gene_counts.diff.tsv"],
"correlation.dir/gene_fold_changes_scatter.png")
def scatterplotFoldChanges(infiles, outfile):
'''
scatterplot abundance estimates for NOGs
'''
R('''dat.orig <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infiles[0])
R('''dat.orig <- dat.orig[dat.orig$group2 == "WT" & dat.orig$group1 == "HhaIL10R",]''')
R('''dat.rep <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infiles[1])
R('''dat.rep <- dat.rep[dat.rep$group1 == "day0" & dat.rep$group2 == "day14",]''')
R('''rownames(dat.orig) <- dat.orig$taxa''')
R('''dat.orig <- dat.orig[dat.rep$taxa,]''')
R('''png("%s")''' % outfile)
R('''plot(dat.orig$logFC, -1*dat.rep$logFC, pch=16)''')
R('''text(x=-4, y=5, labels=c(paste("r =", round(cor(dat.orig$logFC, -1*dat.rep$logFC),2), sep=" ")))''')
R["dev.off"]()
#########################################
#########################################
#########################################
@follows(mkdir("correlation.dir"))
@merge(["original_gene_counts.diff.tsv",
"replication_gene_counts.diff.tsv"],
"correlation.dir/gene_diff_overlap.tsv")
def buildGeneDifferentialExpressionOverlap(infiles, outfile):
'''
scatterplot abundance estimates for NOGs
'''
R('''dat.orig <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infiles[0])
R('''dat.orig <- dat.orig[dat.orig$group2 == "WT" & dat.orig$group1 == "HhaIL10R",]''')
R('''dat.rep <- read.csv("%s", header=T, stringsAsFactors=F, sep="\t")''' % infiles[1])
R('''dat.rep <- dat.rep[dat.rep$group1 == "day0" & dat.rep$group2 == "day14",]''')
R('''rownames(dat.orig) <- dat.orig$taxa''')
R('''dat.orig <- dat.orig[dat.rep$taxa,]''')
R('''diff.orig <- dat.orig$taxa[dat.orig$adj.P.Val < 0.05]''')
R('''diff.rep <- dat.rep$taxa[dat.rep$adj.P.Val < 0.05]''')
R('''overlap <- intersect(diff.orig, diff.rep)''')
R('''write.table(overlap, file="%s", sep="\t")''' % outfile)
R('''norig <- length(diff.orig)''')
R('''nrep <- length(diff.rep)''')
R('''noverlap <- length(overlap)''')
# significance testing
R('''x <- length(intersect(dat.orig$taxa, dat.rep$taxa))''')
R('''m <- nrep''')
R('''n <- x - nrep''')
R('''k <- norig''')
R('''print(1-phyper(x,m,n,k))''')
R('''write.table(data.frame(c(norig, nrep,noverlap)), file="correlation.dir/noverlap.tsv")''')
#########################################
#########################################
#########################################
@follows(mkdir("wolinella_weisella.dir"))
@transform("original_genus.diamond.aggregated.counts.norm.matrix",
regex("(\S+).norm.matrix"),
r"wolinella_weisella.dir/\1.wolinella.pdf")
def plotOriginalWolinella(infile, outfile):
'''
plot the abundance of Weisella and Wolinella;
ones that replicated
'''
R('''library(reshape)''')
R('''library(ggplot2)''')
R('''dat <- read.csv("%s",
header=T,
stringsAsFactors=F,
sep="\t")''' % infile)
R('''dat <- melt(dat)''')
R('''conds <- unlist(strsplit(as.character(dat$variable), ".R[0-9]"))''')
R('''conds <- conds[seq(1,length(conds),2)]''')
R('''dat$cond <- conds''')
R('''dat <- dat[dat$taxa == "Wolinella",]''')
R('''plot1 <- ggplot(dat, aes(x=factor(cond, levels=c("stool.WT","stool.aIL10R", "stool.Hh", "stool.HhaIL10R")),
y=value, group=cond, colour=cond))''')
R('''plot2 <- plot1 + geom_boxplot() + geom_jitter(size=3)''')
R('''plot2 + scale_colour_manual(values=c("blue", "darkGreen", "red", "grey")) + ylim(c(0,3))''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("wolinella_weisella.dir"))
@transform("replication_genus.diamond.aggregated.counts.norm.matrix",
regex("(\S+).norm.matrix"),
r"wolinella_weisella.dir/\1.wolinella.pdf")
def plotReplicationWolinella(infile, outfile):
'''
plot the abundance of Weisella and Wolinella;
ones that replicated
'''
R('''library(reshape)''')
R('''library(ggplot2)''')
R('''dat <- read.csv("%s",
header=T,
stringsAsFactors=F,
sep="\t")''' % infile)
# just get day14 and day0
R('''remove <- c("day3", "day6", "day28")''')
R('''for (day in remove){; dat <- dat[, grep(day, colnames(dat), invert=T)]}''')
R('''dat <- melt(dat)''')
R('''conds <- unlist(strsplit(as.character(dat$variable), ".R[0-9]"))''')
R('''conds <- conds[seq(1,length(conds),2)]''')
R('''dat$cond <- conds''')
R('''dat <- dat[dat$taxa == "Wolinella",]''')
R('''plot1 <- ggplot(dat, aes(x=factor(cond, levels=c("stool.day0","stool.day14")),
y=value, group=cond, colour=cond))''')
R('''plot2 <- plot1 + geom_boxplot() + geom_jitter(size=3)''')
R('''plot2 + scale_colour_manual(values=c("grey", "red")) + ylim(c(0,3))''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("wolinella_weisella.dir"))
@transform("original_genus.diamond.aggregated.counts.norm.matrix",
regex("(\S+).norm.matrix"),
r"wolinella_weisella.dir/\1.weissella.pdf")
def plotOriginalWeissella(infile, outfile):
'''
plot the abundance of Weisella and Wolinella;
ones that replicated
'''
R('''library(reshape)''')
R('''library(ggplot2)''')
R('''dat <- read.csv("%s",
header=T,
stringsAsFactors=F,
sep="\t")''' % infile)
R('''dat <- melt(dat)''')
R('''conds <- unlist(strsplit(as.character(dat$variable), ".R[0-9]"))''')
R('''conds <- conds[seq(1,length(conds),2)]''')
R('''dat$cond <- conds''')
R('''dat <- dat[dat$taxa == "Weissella",]''')
R('''plot1 <- ggplot(dat, aes(x=factor(cond, levels=c("stool.WT","stool.aIL10R", "stool.Hh", "stool.HhaIL10R")),
y=value, group=cond, colour=cond))''')
R('''plot2 <- plot1 + geom_boxplot() + geom_jitter(size=3)''')
R('''plot2 + scale_colour_manual(values=c("blue", "darkGreen", "red", "grey")) + ylim(c(0,4))''')
R('''ggsave("%s")''' % outfile)
#########################################
#########################################
#########################################
@follows(mkdir("wolinella_weisella.dir"))
@transform("replication_genus.diamond.aggregated.counts.norm.matrix",
regex("(\S+).norm.matrix"),
r"wolinella_weisella.dir/\1.weissella.pdf")
def plotReplicationWeissella(infile, outfile):
'''
plot the abundance of Weisella and Wolinella;
ones that replicated
'''
R('''library(reshape)''')
R('''library(ggplot2)''')
R('''dat <- read.csv("%s",
header=T,
stringsAsFactors=F,
sep="\t")''' % infile)
# just get day14 and day0
R('''remove <- c("day3", "day6", "day28")''')
R('''for (day in remove){; dat <- dat[, grep(day, colnames(dat), invert=T)]}''')
R('''dat <- melt(dat)''')
R('''conds <- unlist(strsplit(as.character(dat$variable), ".R[0-9]"))''')
R('''conds <- conds[seq(1,length(conds),2)]''')
R('''dat$cond <- conds''')
R('''dat <- dat[dat$taxa == "Weissella",]''')
R('''plot1 <- ggplot(dat, aes(x=factor(cond, levels=c("stool.day0","stool.day14")),
y=value, group=cond, colour=cond))''')
R('''plot2 <- plot1 + geom_boxplot() + geom_jitter(size=3)''')
R('''plot2 + scale_colour_manual(values=c("grey", "red")) + ylim(c(0,4))''')
R('''ggsave("%s")''' % outfile)
@follows(plotOriginalWeissella,
plotOriginalWolinella,
plotReplicationWeissella,
plotReplicationWolinella)
def plotWolinellaWeissella():
pass
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
| bsd-3-clause |
maheshakya/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 42 | 2894 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix += dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
chutsu/slam | slam_optimization/scripts/plot_3d_data.py | 1 | 3075 | #!/usr/bin/env python2
import csv
from math import pow
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pylab as plt
TEST_DATA_3D = "pts3d.dat"
TEST_DATA_1 = "pts1.dat"
TEST_DATA_2 = "pts2.dat"
def load_2d_data(fp, skip_header=True):
csv_file = open(fp, 'r')
csv_reader = csv.reader(csv_file)
if skip_header:
next(csv_reader, None)
data = {
"x": [], "y": [],
}
for line in csv_reader:
data["x"].append(float(line[0]))
data["y"].append(float(line[1]))
return data
def load_3d_data(fp, skip_header=True):
csv_file = open(fp, 'r')
csv_reader = csv.reader(csv_file)
if skip_header:
next(csv_reader, None)
data = {
"x": [], "y": [], "z": [],
}
for line in csv_reader:
data["x"].append(float(line[0]))
data["y"].append(float(line[1]))
data["z"].append(float(line[2]))
return data
def rotation_matrix(q):
R_00 = 1.0 - 2.0 * pow(q[1], 2) - 2.0 * pow(q[2], 2)
R_01 = 2.0 * q[0] * q[1] + 2.0 * q[3] * q[2]
R_02 = 2.0 * q[0] * q[2] - 2.0 * q[3] * q[1]
R_10 = 2.0 * q[0] * q[1] - 2.0 * q[3] * q[2]
R_11 = 1.0 - 2.0 * pow(q[0], 2) - 2.0 * pow(q[2], 2)
R_12 = 2.0 * q[1] * q[2] + 2.0 * q[3] * q[2]
R_20 = 2.0 * q[0] * q[2] - 2.0 * q[3] * q[1]
R_21 = 2.0 * q[1] * q[2] - 2.0 * q[3] * q[0]
R_22 = 1.0 - 2.0 * pow(q[0], 2) - 2.0 * pow(q[1], 2)
R = [
[R_00, R_01, R_02],
[R_10, R_11, R_12],
[R_20, R_21, R_22]
]
return np.matrix(R)
def plot_2dpts(q, t, pts, ax, color):
pts["z"] = []
R = rotation_matrix(q)
t = np.array(t)
# transform point
for i in range(len(pts["x"])):
pt = np.array([pts["x"][i], pts["y"][i], 0.0])
pt = R.dot(pt + t).tolist()[0]
pts["x"][i] = pt[0]
pts["y"][i] = pt[1]
pts["z"].append(pt[2])
# plot
ax.scatter(pts["x"], pts["y"], pts["z"], c=color)
def plot(pts3d, pts1, pts2):
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
# plot 3d points
ax.scatter(pts3d["x"], pts3d["y"], pts3d["z"], c="r")
# plot points 1
q = [0.0, 0.0, 0.0, 1.0]
t = [0.0, 0.0, 0.0]
plot_2dpts(q, t, pts1, ax, "g")
# plot points 2
q = [0.0, -0.174, 0.0, 0.985]
t = [1.0, 0.0, 0.0]
plot_2dpts(q, t, pts2, ax, "b")
for i in range(len(pts3d["x"])):
ax.plot(
[pts3d["x"][i], pts1["x"][i]],
[pts3d["y"][i], pts1["y"][i]],
[pts3d["z"][i], pts1["z"][i]],
c="g"
)
for i in range(len(pts3d["x"])):
ax.plot(
[pts3d["x"][i], pts2["x"][i]],
[pts3d["y"][i], pts2["y"][i]],
[pts3d["z"][i], pts2["z"][i]],
c="b"
)
# plot labels
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
if __name__ == "__main__":
pts3d = load_3d_data(TEST_DATA_3D)
pts1 = load_2d_data(TEST_DATA_1)
pts2 = load_2d_data(TEST_DATA_2)
plot(pts3d, pts1, pts2)
| gpl-3.0 |
etendue/deep-learning | image-classification/helper.py | 155 | 5631 | import pickle
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelBinarizer
def _load_label_names():
"""
Load the label names from file
"""
return ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def load_cfar10_batch(cifar10_dataset_folder_path, batch_id):
"""
Load a batch of the dataset
"""
with open(cifar10_dataset_folder_path + '/data_batch_' + str(batch_id), mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
labels = batch['labels']
return features, labels
def display_stats(cifar10_dataset_folder_path, batch_id, sample_id):
"""
Display Stats of the the dataset
"""
batch_ids = list(range(1, 6))
if batch_id not in batch_ids:
print('Batch Id out of Range. Possible Batch Ids: {}'.format(batch_ids))
return None
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_id)
if not (0 <= sample_id < len(features)):
print('{} samples in batch {}. {} is out of range.'.format(len(features), batch_id, sample_id))
return None
print('\nStats of batch {}:'.format(batch_id))
print('Samples: {}'.format(len(features)))
print('Label Counts: {}'.format(dict(zip(*np.unique(labels, return_counts=True)))))
print('First 20 Labels: {}'.format(labels[:20]))
sample_image = features[sample_id]
sample_label = labels[sample_id]
label_names = _load_label_names()
print('\nExample of Image {}:'.format(sample_id))
print('Image - Min Value: {} Max Value: {}'.format(sample_image.min(), sample_image.max()))
print('Image - Shape: {}'.format(sample_image.shape))
print('Label - Label Id: {} Name: {}'.format(sample_label, label_names[sample_label]))
plt.axis('off')
plt.imshow(sample_image)
def _preprocess_and_save(normalize, one_hot_encode, features, labels, filename):
"""
Preprocess data and save it to file
"""
features = normalize(features)
labels = one_hot_encode(labels)
pickle.dump((features, labels), open(filename, 'wb'))
def preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode):
"""
Preprocess Training and Validation Data
"""
n_batches = 5
valid_features = []
valid_labels = []
for batch_i in range(1, n_batches + 1):
features, labels = load_cfar10_batch(cifar10_dataset_folder_path, batch_i)
validation_count = int(len(features) * 0.1)
# Prprocess and save a batch of training data
_preprocess_and_save(
normalize,
one_hot_encode,
features[:-validation_count],
labels[:-validation_count],
'preprocess_batch_' + str(batch_i) + '.p')
# Use a portion of training batch for validation
valid_features.extend(features[-validation_count:])
valid_labels.extend(labels[-validation_count:])
# Preprocess and Save all validation data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(valid_features),
np.array(valid_labels),
'preprocess_validation.p')
with open(cifar10_dataset_folder_path + '/test_batch', mode='rb') as file:
batch = pickle.load(file, encoding='latin1')
# load the test data
test_features = batch['data'].reshape((len(batch['data']), 3, 32, 32)).transpose(0, 2, 3, 1)
test_labels = batch['labels']
# Preprocess and Save all test data
_preprocess_and_save(
normalize,
one_hot_encode,
np.array(test_features),
np.array(test_labels),
'preprocess_test.p')
def batch_features_labels(features, labels, batch_size):
"""
Split features and labels into batches
"""
for start in range(0, len(features), batch_size):
end = min(start + batch_size, len(features))
yield features[start:end], labels[start:end]
def load_preprocess_training_batch(batch_id, batch_size):
"""
Load the Preprocessed Training data and return them in batches of <batch_size> or less
"""
filename = 'preprocess_batch_' + str(batch_id) + '.p'
features, labels = pickle.load(open(filename, mode='rb'))
# Return the training data in batches of size <batch_size> or less
return batch_features_labels(features, labels, batch_size)
def display_image_predictions(features, labels, predictions):
n_classes = 10
label_names = _load_label_names()
label_binarizer = LabelBinarizer()
label_binarizer.fit(range(n_classes))
label_ids = label_binarizer.inverse_transform(np.array(labels))
fig, axies = plt.subplots(nrows=4, ncols=2)
fig.tight_layout()
fig.suptitle('Softmax Predictions', fontsize=20, y=1.1)
n_predictions = 3
margin = 0.05
ind = np.arange(n_predictions)
width = (1. - 2. * margin) / n_predictions
for image_i, (feature, label_id, pred_indicies, pred_values) in enumerate(zip(features, label_ids, predictions.indices, predictions.values)):
pred_names = [label_names[pred_i] for pred_i in pred_indicies]
correct_name = label_names[label_id]
axies[image_i][0].imshow(feature)
axies[image_i][0].set_title(correct_name)
axies[image_i][0].set_axis_off()
axies[image_i][1].barh(ind + margin, pred_values[::-1], width)
axies[image_i][1].set_yticks(ind + margin)
axies[image_i][1].set_yticklabels(pred_names[::-1])
axies[image_i][1].set_xticks([0, 0.5, 1.0])
| mit |
waterponey/scikit-learn | sklearn/datasets/lfw.py | 2 | 20048 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove, rename
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
archive_path_temp = archive_path + ".tmp"
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path_temp)
rename(archive_path_temp, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""
Alias for fetch_lfw_people(download_if_missing=False)
.. deprecated:: 0.17
This function will be removed in 0.19.
Use :func:`sklearn.datasets.fetch_lfw_people` with parameter
``download_if_missing=False`` instead.
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 47.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""
Alias for fetch_lfw_pairs(download_if_missing=False)
.. deprecated:: 0.17
This function will be removed in 0.19.
Use :func:`sklearn.datasets.fetch_lfw_pairs` with parameter
``download_if_missing=False`` instead.
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
ashhher3/scikit-learn | sklearn/metrics/tests/test_ranking.py | 11 | 37239 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
"""Test to ensure that we don't return spurious repeating thresholds.
Duplicated thresholds can arise due to machine precision issues.
"""
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
"""Test that roc_auc_score function returns an error when trying
to compute AUC for non-binary class values.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
"""Check on several small example that it works """
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
"""Check tie handling in score"""
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
""" Check that Label ranking average precision works for various"""
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
| bsd-3-clause |
nist-ionstorage/electrode | electrode/electrode.py | 1 | 18612 | # -*- coding: utf8 -*-
#
# electrode: numeric tools for Paul traps
#
# Copyright (C) 2011-2012 Robert Jordens <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, print_function,
unicode_literals, division)
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from .utils import area_centroid, construct_derivative
try:
if False: # test slow python only or fast numba expressions
raise ImportError
from .cexpressions import (point_potential, polygon_potential,
mesh_potential)
except ImportError:
from .expressions import (point_potential, polygon_potential,
mesh_potential)
class Electrode(object):
"""An electrode of a Paul trap.
Encapsulates the name, the dc and rf voltages and the electrical
potential contribution of an electrode.
Parameters
----------
name : str
dc : float
DC potential associated with the constituents of this electrode.
The electrodes's electrical potential is proportional to the DC
potential. Does not influence the pseudopotential contribution.
rf : float
RF potential of this electrode. The pseudopotential controbution
of this electrode is proportional to the square of its RF
potential.
"""
__slots__ = "name dc rf".split()
def __init__(self, name="", dc=0., rf=0.):
self.name = name
self.dc = dc
self.rf = rf
def potential(self, x, derivative=0, potential=1., out=None):
"""Electrical potential contribution.
Return the specified derivative of the eletrical potential
contribution of this electrode assuming all other electrodes in
the system are grounded.
Parameters
----------
x : array_like, shape (n, 3)
Position to evaluate the electrical potential at. The first
dimension is used to evaluate at several points in parallel.
derivative : int
Derivative order of the potential. `derivative=0` returns
the potential, `derivative=1` the field/force,
`derivative=2` the curvature/hessian.
potential : float
Scaling of the potential. Could be set to `self.rf` or
`self.dc`. Since this method is used to determine both
potentials (electrical and pseudo). Scaling with `self.dc`
and `self.rf` is done in the respective methods of the `System`
instance that contains this electrode.
out : None or array_like, shape (n, 2*derivative + 1), double
Array to add the potential contribution to. Needs to be
zeroed before. If None, an array is created and returned.
Returns
-------
potential : array, shape(n, 2*derivative + 1), double
Output potential or `out` if given. The first dimension is
the point index (same as `x`) the second is the derivative
index. There are only `2*derivative + 1` values as the
others are lineraly dependent. See `utils.expand_tensor` and
`utils.select_tensor` for details and utility methods.
See Also
--------
utils.expand_tensor
Expand this reduced tensor to full form.
utils.cartesian_to_spherical_harmonics
Convert the reduced tensor to spherical harmonics.
utils.find_laplace
Find partial derivates that can be used to construct others
using the vanishing trace of the Laplacian of.
"""
raise NotImplementedError
def orientations(self):
"""Return the orientation of the electrode surfaces with respect
to the `z > 0` half space.
Positive orientation yields positive potential for
positive voltage and z>0.
.. note:: Only fully implemented for `PolygonPixelElectrode`.
"""
return np.array([])
def plot(self, ax, label=None, color=None, **kw):
"""Plot this electrode in the supplied axes.
Visualize a 2D projection of the electrode in the plot.
.. note:: Only fully implemented in `PolyGonPixelElectrode` and
`PointPixelElectrode`.
"""
pass
class CoverElectrode(Electrode):
"""
Continuous infinite conducting cover or mesh electrode.
Parameters
----------
height : float
height above `z = 0`
Notes
-----
* Only valid as part of a `System` consisting purely of
`SurfaceElectrode`.
* The other electrodes in the `System` all need to have their
`cover_height` adjusted and set to the same value as `height`
here. Otherwise their contributions are calculated wrong.
"""
__slots__ = "height".split()
def __init__(self, height=50., **kwargs):
super(CoverElectrode, self).__init__(**kwargs)
self.height = height
def potential(self, x, derivative=0, potential=1., out=None):
if out is None:
out = np.zeros((x.shape[0], 2*derivative+1), np.double)
if derivative == 0:
out[:, 0] += potential*x[:, 2]/self.height
elif derivative == 1:
out[:, 2] += potential/self.height
else:
pass
return out
class SurfaceElectrode(Electrode):
"""A patch set embedded in a gapless infinite grounded conducting
plane at `z = 0`.
Subclasses of this class can make use of the cover electrode
potential expansion [1]_ and play together with a `CoverElectrode`
instance in a `System`.
Parameters
----------
cover_height : float
The height of the CoverElectrode plane above the `z=0` plane.
cover_nmax : int
Expansion order of the effect of the cover plane onto this
electrode's potential contribution.
See Also
--------
Electrode
`name`, `dc`, `rf` attributes/parameters
CoverElectrode
References
----------
.. [1] Roman Schmied et al. 2011 New J. Phys. 13 115011
http://dx.doi.org/10.1088/1367-2630/13/11/115011
"""
__slots__ = "cover_height cover_nmax".split()
def __init__(self, cover_height=50., cover_nmax=0, **kwargs):
super(SurfaceElectrode, self).__init__(**kwargs)
# cover plane height
self.cover_height = cover_height
# max components in cover plane potential expansion
self.cover_nmax = cover_nmax
class PointPixelElectrode(SurfaceElectrode):
"""Surface electrode comprising several small pixels.
The pixels are approximated as potential points. Their potential
contribution is scaled by `areas`.
Parameters
----------
points : array_like, shape (n, s)
Point pixel positions
areas : array_like, shape (n,)
Point pixel areas
See Also
--------
Electrode
`name`, `dc`, `rf` attributes/parameters
SurfaceElectrode
`cover_nmax` and `cover_height` attributes/constructor parameters
"""
__slots__ = "points areas".split()
def __init__(self, points=[], areas=[], **kwargs):
super(PointPixelElectrode, self).__init__(**kwargs)
self.points = np.asanyarray(points, np.double)
self.areas = np.asanyarray(areas, np.double)
def orientations(self):
return np.ones_like(self.areas)
def plot(self, ax, label=None, color=None, **kw):
import matplotlib as mpl
# color="red"?
p = self.points
a = (self.areas/np.pi)**.5*2
col = mpl.collections.EllipseCollection(
edgecolors="none",
#cmap=plt.cm.binary, norm=plt.Normalize(0, 1.),
facecolor=color,
# FIXME/workaround: x in matplotlib<r8111
widths=a, heights=a, units="xy",
angles=np.zeros(a.shape),
offsets=p[:, (0, 1)], transOffset=ax.transData)
ax.add_collection(col)
if label is None:
label = self.name
if label:
ax.text(p[:,0].mean(), p[:,1].mean(), label,
horizontalalignment="center",
verticalalignment="center")
def potential(self, x, derivative=0, potential=1., out=None):
return point_potential(x, self.points, self.areas, potential,
derivative, self.cover_nmax, self.cover_height, out)
class PolygonPixelElectrode(SurfaceElectrode):
"""Surface electrode comprising several polygonal patches.
Parameters
----------
paths : list of array_like, shape (n, 2)
Polygon boundaries as lists of points. Polygons with positive
orientation (counter clock wise) contribute with poisitive sign.
THose with negative orientation contribute with negative sign.
See Also
--------
Electrode
`name`, `dc`, `rf` attributes/parameters
SurfaceElectrode
`cover_nmax` and `cover_height` attributes/constructor parameters
"""
__slots__ = "paths".split()
def __init__(self, paths=[], **kwargs):
super(PolygonPixelElectrode, self).__init__(**kwargs)
self.paths = [np.asanyarray(i, np.double) for i in paths]
def orientations(self):
return np.sign([area_centroid(pi)[0] for pi in self.paths])
def plot(self, ax, label=None, color=None, **kw):
import matplotlib as mpl
# we already store the right order for interior/exterior
vertices = np.concatenate([np.r_[p, [p[0]]] for p in self.paths])
codes = np.concatenate([np.r_[
mpl.path.Path.MOVETO, np.ones(len(p))*mpl.path.Path.LINETO
].astype(mpl.path.Path.code_type)
for p in self.paths])
path = mpl.path.Path(vertices, codes)
patch = mpl.patches.PathPatch(path, facecolor=color,
edgecolor=kw.pop("edgecolor", "none"), **kw)
ax.add_patch(patch)
if label is None:
label = self.name
if label:
for p in self.paths:
ax.text(p[:,0].mean(), p[:,1].mean(), label,
horizontalalignment="center",
verticalalignment="center")
def to_points(self):
"""Convert all polygons to points at their centroids with the
appropriate area.
Returns
-------
PointPixelElectrode
"""
a, c = [], []
for p in self.paths:
ai, ci = area_centroid(p)
a.append(ai)
c.append(ci)
e = PointPixelElectrode(name=self.name, dc=self.dc, rf=self.rf,
cover_nmax=self.cover_nmax, cover_height=self.cover_height,
areas=a, points=c)
return e
def potential(self, x, derivative=0, potential=1., out=None):
return polygon_potential(x, self.paths, potential, derivative,
self.cover_nmax, self.cover_height, out)
class MeshPixelElectrode(SurfaceElectrode):
"""A surface electrode consisting of a polygonal mesh with
different potential for each polygon.
.. note:: untested, unused
Parameters
----------
points : array_like, shape (n, 2)
Vertex coordinates
edges : array_like, shape (m, 2)
The two vertex indices comprising each edge.
Each value is an index into the first axis of `points`.
polygons : array_like, shape (m,)
Polygon associations of each edge. Each value is an index into
`potentials`.
potentials : array_like, shape (k,)
Polygon potential prefactors.
See Also
--------
Electrode
`name`, `dc`, `rf` attributes/parameters
SurfaceElectrode
`cover_nmax` and `cover_height` attributes/constructor parameters
"""
__slots__ = "points edges polygons potentials".split()
def __init__(self, points=[], edges=[], polygons=[], potentials=[],
**kwargs):
super(MeshPixelElectrode, self).__init__(**kwargs)
self.points = np.asanyarray(points, np.double)
self.edges = np.asanyarray(edges, np.intc)
self.polygons = np.asanyarray(polygons, np.intc)
self.potentials = np.asanyarray(potentials, np.double)
@classmethod
def from_polygon_system(cls, s):
points = []
edges = []
polygons = []
potentials = []
for p in s:
assert isinstance(p, PolygonPixelElectrode), p
for i in p.paths:
ei = len(points)+np.arange(len(i))
points.extend(i)
edges.extend(np.c_[np.roll(ei, 1, 0), ei])
polygons.extend(len(potentials)*np.ones(len(ei)))
potentials.append(p.dc)
return cls(dc=1, points=points, edges=edges, polygons=polygons,
potentials=potentials)
def potential(self, x, derivative=0, potential=1., out=None):
return mesh_potential(x, self.points, self.edges, self.polygons,
self.potentials*potential, derivative,
self.cover_nmax, self.cover_height, out)
class GridElectrode(Electrode):
"""Electrode based on a precalculated grid of electrical potentials.
Parameters
----------
data : list of array_like, shape (n, m, k, l)
List of potential derivatives. The ith data entry is of order
(l - 1)/2. Each entry is shaped as a (n, m, k) grid.
origin : array_like, shape (3,)
Position of the (n, m, k) = (0, 0, 0) voxel.
spacing : array_like, shape (3,)
Voxel pitch.
See Also
--------
Electrode
`name`, `dc`, `rf` attributes/parameters
"""
__slots__ = "data origin spacing".split()
def __init__(self, data=[], origin=(0, 0, 0), spacing=(1, 1, 1),
**kwargs):
super(GridElectrode, self).__init__(**kwargs)
self.data = [np.asanyarray(i, np.double) for i in data]
self.origin = np.asanyarray(origin, np.double)
self.spacing = np.asanyarray(spacing, np.double)
@classmethod
def from_result(cls, result, maxderiv=4):
"""Create a `GridElectrode` from a `bem.result.Result` instance.
Parameters
----------
result : bem.result.Result
maxderiv : int
Maximum derivative order to precompute based on the
available data.
Returns
-------
GridElectrode
"""
origin = result.grid.get_origin()
spacing = result.grid.step
data = [result.potential[:, :, :, None]]
if result.field is not None:
data.append(result.field.transpose(1, 2, 3, 0))
obj = cls(origin=origin, spacing=spacing, data=data)
obj.generate(maxderiv)
return obj
@classmethod
def from_vtk(cls, fil, maxderiv=4):
"""Load grid potential data from vtk StructuredPoints.
.. note:: needs `tvtk`
Parameters
----------
fil : str
File name of the VTK StructuredPoints file containing the
gridded data.
maxderiv : int
Maximum derivative order to precompute.
Returns
-------
GridElectrode
"""
from tvtk.api import tvtk
#sgr = tvtk.XMLImageDataReader(file_name=fil)
sgr = tvtk.StructuredPointsReader(file_name=fil)
sgr.update()
sg = sgr.output
pot = [None, None]
for i in range(sg.point_data.number_of_arrays):
name = sg.point_data.get_array_name(i)
if "_pondpot" in name:
continue # not harmonic, do not use it
elif name not in ("potential", "field"):
continue
sp = sg.point_data.get_array(i)
data = sp.to_array()
spacing = sg.spacing
origin = sg.origin
dimensions = tuple(sg.dimensions)
dim = sp.number_of_components
data = data.reshape(dimensions[::-1]+(dim,)).transpose(2, 1, 0, 3)
pot[int((dim-1)/2)] = data
obj = cls(origin=origin, spacing=spacing, data=pot)
obj.generate(maxderiv)
return obj
def generate(self, maxderiv=4):
"""Generate missing derivative orders by successive finite
differences from the already present derivative orders.
.. note:: Finite differences amplify noise and discontinuities
in the original data.
Parameters
----------
maxderiv : int
Maximum derivative order to precompute if not already
present.
"""
for deriv in range(maxderiv):
if len(self.data) < deriv+1:
self.data.append(self.derive(deriv))
ddata = self.data[deriv]
assert ddata.ndim == 4, ddata.ndim
assert ddata.shape[-1] == 2*deriv+1, ddata.shape
if deriv > 0:
assert ddata.shape[:-1] == self.data[deriv-1].shape[:-1]
def derive(self, deriv):
"""Take finite differences along each axis.
Parameters
----------
deriv : derivative order to generate
Returns
-------
data : array, shape (n, m, k, l)
New derivative data, l = 2*deriv + 1
"""
odata = self.data[deriv-1]
ddata = np.empty(odata.shape[:-1] + (2*deriv+1,), np.double)
for i in range(2*deriv+1):
(e, j), k = construct_derivative(deriv, i)
# TODO triple work
grad = np.gradient(odata[..., j], *self.spacing)[k]
ddata[..., i] = grad
return ddata
def potential(self, x, derivative=0, potential=1., out=None):
x = (x - self.origin[None, :])/self.spacing[None, :]
if out is None:
out = np.zeros((x.shape[0], 2*derivative+1), np.double)
dat = self.data[derivative]
for i in range(2*derivative+1):
out[:, i] += potential*map_coordinates(dat[..., i], x.T,
order=1, mode="nearest")
return out
| gpl-3.0 |
reynoldsk/pySCA | scaCore.py | 1 | 4983 | #!/usr/bin/env python
"""
The scaCore script runs the core calculations for SCA, and stores the output using the python tool pickle. These calculations can be divided into two parts:
1) Sequence correlations:
a) Compute simMat = the global sequence similarity matrix for the alignment
b) Compute Useq and Uica = the eigenvectors (and independent components) for the following sequence correlation matrices:
* unweighted (:math:`U^0`)
* sequence weights applied (:math:`U^1`)
* both sequence and position weights applied (:math:`U^2`)
2) Positional correlations:
a) Compute the single-site position weights and positional conservation values (:math:`D_i` and :math:`D_i^a`)
b) Compute the dimension-reduced SCA correlation matrix :math:`\\tilde{C_{ij}}`, the projected alignment :math:`tX`,
and the projector
c) Compute Ntrials of the randomized SCA matrix, and the eigenvectors and eigenvalues associated with each
:Arguments:
*.db (the database produced by running scaProcessMSA.py).
:Keyword Arguments:
-n norm type for dimension-reducing the sca matrix. Options are: 'spec' (the spectral norm) or 'frob' (frobenius norm). Default: frob
-l lambda parameter for pseudo-counting the alignment. Default: 0.03
--Ntrials, -t number of randomization trials
--matlab, -m write out the results of these calculations to a matlab workspace for further analysis
:Example:
>>> ./scaCore.py PF00071_full.db
:By: Rama Ranganathan, Kim Reynolds
:On: 8.5.2014
Copyright (C) 2015 Olivier Rivoire, Rama Ranganathan, Kimberly Reynolds
This program is free software distributed under the BSD 3-clause
license, please see the file LICENSE for details.
"""
from __future__ import division
import sys, time
import os
import numpy as np
import copy
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt
import scaTools as sca
import pickle
import argparse
from Bio import SeqIO
from scipy.stats import t
from scipy.stats import scoreatpercentile
from scipy.io import savemat
if __name__ == '__main__':
#parse inputs
parser = argparse.ArgumentParser()
parser.add_argument("database", help='database from running scaProcessMSA')
parser.add_argument("-n", dest = "norm", default='frob', help="norm type for dimension-reducing the sca matrix. Options are: 'spec' (the spectral norm) or 'frob' (frobenius norm). Default: frob")
parser.add_argument("-t", "--Ntrials", dest ="Ntrials", default=10, type=int, help="number of randomization trials")
parser.add_argument("-l", dest = "lbda", default=0.03, type=float, help="lambda parameter for pseudo-counting the alignment. Default: 0.03")
parser.add_argument("-m","--matlab", dest = "matfile", action = "store_true", default = False, help="write out the results of these calculations to a matlab workspace for further analysis")
options = parser.parse_args()
if (options.norm != 'frob') & (options.norm != 'spec'):
sys.exit("The option -n must be set to 'frob' or 'spec' - other keywords are not allowed.")
# extract the necessary stuff from the database...
db_in = pickle.load(open(options.database,"rb"))
D_in = db_in['sequence']
msa_num = D_in['msa_num']
seqw = D_in['seqw']
Nseq = D_in['Nseq']
Npos = D_in['Npos']
ats = D_in['ats']
hd = D_in['hd']
# sequence analysis
print("Computing the sequence projections.")
Useq, Uica = sca.seqProj(msa_num, seqw, kseq = 30, kica = 15)
simMat = sca.seqSim(msa_num)
# SCA calculations
print("Computing the SCA conservation and correlation values.")
Wia,Dia,Di = sca.posWeights(msa_num, seqw, options.lbda)
Csca, tX, Proj = sca.scaMat(msa_num, seqw, options.norm, options.lbda)
# Matrix randomizations
print("Computing matrix randomizations...")
start = time.time()
Vrand, Lrand, Crand = sca.randomize(msa_num, options.Ntrials, seqw, options.lbda)
end = time.time()
print("Randomizations complete, %i trials, time: %.1f minutes" % (options.Ntrials, (end-start)/60))
# saving...
path_list = options.database.split(os.sep)
fn = path_list[-1]
fn_noext = fn.split(".")[0]
D={}
D['Useq'] = Useq
D['Uica'] = Uica
D['simMat'] = simMat
D['lbda'] = options.lbda
D['Dia'] = Dia
D['Di'] = Di
D['Csca'] = Csca
D['tX'] = tX
D['Proj'] = Proj
D['Ntrials'] = options.Ntrials
D['Vrand'] = Vrand
D['Lrand'] = Lrand
D['Crand'] = Crand
db = {}
db['sequence']=D_in
db['sca']=D
print("Calculations complete, writing to database file "+"Outputs/"+ fn_noext)
if options.matfile:
savemat("Outputs/"+fn_noext,db,appendmat = True, oned_as = 'column')
time.sleep(10)
pickle.dump(db,open("Outputs/"+ fn_noext + ".db","wb"))
| bsd-3-clause |
great-expectations/great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_skew_to_be_between.py | 1 | 14524 | import json
from typing import Any, Dict, Optional, Tuple
import numpy as np
import pandas as pd
import scipy.stats as stats
from great_expectations.core import ExpectationConfiguration
from great_expectations.execution_engine import (
ExecutionEngine,
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.expectation import (
ColumnExpectation,
Expectation,
ExpectationConfiguration,
InvalidExpectationConfigurationError,
_format_map_output,
)
from great_expectations.expectations.metrics.column_aggregate_metric import (
ColumnMetricProvider,
column_aggregate_value,
)
from great_expectations.expectations.metrics.import_manager import F, sa
from great_expectations.expectations.metrics.metric_provider import (
MetricProvider,
metric_value,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
handle_strict_min_max,
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
from great_expectations.validator.validation_graph import MetricConfiguration
class ColumnSkew(ColumnMetricProvider):
"""MetricProvider Class for Aggregate Mean MetricProvider"""
metric_name = "column.custom.skew"
value_keys = ("abs",)
@column_aggregate_value(engine=PandasExecutionEngine)
def _pandas(cls, column, abs=False, **kwargs):
if abs:
return np.abs(stats.skew(column))
return stats.skew(column)
#
# @metric_value(engine=SqlAlchemyExecutionEngine, metric_fn_type="value")
# def _sqlalchemy(
# cls,
# execution_engine: "SqlAlchemyExecutionEngine",
# metric_domain_kwargs: Dict,
# metric_value_kwargs: Dict,
# metrics: Dict[Tuple, Any],
# runtime_configuration: Dict,
# ):
# (
# selectable,
# compute_domain_kwargs,
# accessor_domain_kwargs,
# ) = execution_engine.get_compute_domain(
# metric_domain_kwargs, MetricDomainTypes.COLUMN
# )
# column_name = accessor_domain_kwargs["column"]
# column = sa.column(column_name)
# sqlalchemy_engine = execution_engine.engine
# dialect = sqlalchemy_engine.dialect
#
# column_median = None
#
# # TODO: compute the value and return it
#
# return column_median
#
# @metric_value(engine=SparkDFExecutionEngine, metric_fn_type="value")
# def _spark(
# cls,
# execution_engine: "SqlAlchemyExecutionEngine",
# metric_domain_kwargs: Dict,
# metric_value_kwargs: Dict,
# metrics: Dict[Tuple, Any],
# runtime_configuration: Dict,
# ):
# (
# df,
# compute_domain_kwargs,
# accessor_domain_kwargs,
# ) = execution_engine.get_compute_domain(
# metric_domain_kwargs, MetricDomainTypes.COLUMN
# )
# column = accessor_domain_kwargs["column"]
#
# column_median = None
#
# # TODO: compute the value and return it
#
# return column_median
#
# @classmethod
# def _get_evaluation_dependencies(
# cls,
# metric: MetricConfiguration,
# configuration: Optional[ExpectationConfiguration] = None,
# execution_engine: Optional[ExecutionEngine] = None,
# runtime_configuration: Optional[dict] = None,
# ):
# """This should return a dictionary:
#
# {
# "dependency_name": MetricConfiguration,
# ...
# }
# """
#
# dependencies = super()._get_evaluation_dependencies(
# metric=metric,
# configuration=configuration,
# execution_engine=execution_engine,
# runtime_configuration=runtime_configuration,
# )
#
# table_domain_kwargs = {
# k: v for k, v in metric.metric_domain_kwargs.items() if k != "column"
# }
#
# dependencies.update(
# {
# "table.row_count": MetricConfiguration(
# "table.row_count", table_domain_kwargs
# )
# }
# )
#
# if isinstance(execution_engine, SqlAlchemyExecutionEngine):
# dependencies["column_values.nonnull.count"] = MetricConfiguration(
# "column_values.nonnull.count", metric.metric_domain_kwargs
# )
#
# return dependencies
class ExpectColumnSkewToBeBetween(ColumnExpectation):
"""Expect column skew to be between. Currently tests against Gamma and Beta distributions"""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"a": [
5.27071512,
7.05981507,
8.46671693,
10.20629973,
6.15519149,
7.11709362,
5.31915535,
6.56441299,
5.69143401,
5.0389317,
6.48222587,
5.62433534,
5.46219467,
5.74686441,
6.05413964,
7.09435276,
6.43876861,
6.05301145,
6.12727457,
6.80603351,
], # sampled from Gamma(1, 5)
"b": [
81.11265955,
76.7836479,
85.25019592,
93.93285666,
83.63587009,
81.88712944,
80.37321975,
86.786491,
80.05277435,
70.36302516,
79.4907302,
84.1288281,
87.79298488,
78.02771047,
80.63975023,
88.59461893,
84.05632481,
84.54128192,
78.74152549,
83.60684806,
], # sampled from Beta(50, 10)
"c": [
95.74648827,
80.4031074,
85.41863916,
93.98001949,
97.84607818,
89.01205412,
89.55045229,
97.32734707,
93.94199505,
88.19992377,
98.3336087,
97.66984436,
97.39464709,
95.55637873,
96.10980996,
90.18004343,
96.2019293,
89.19519753,
94.01807868,
93.23978285,
], # sampled from Beta(20, 2)
},
"tests": [
{
"title": "positive_test_positive_skew",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "a", "min_value": 0.25, "max_value": 10},
"out": {"success": True, "observed_value": 1.6974323016687487},
},
{
"title": "negative_test_no_skew",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "b", "min_value": 0.25, "max_value": 10},
"out": {"success": False, "observed_value": -0.07638895580386174},
},
{
"title": "positive_test_negative_skew",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "c", "min_value": -10, "max_value": -0.5},
"out": {"success": True, "observed_value": -0.9979514313860596},
},
{
"title": "negative_test_abs_skew",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "c",
"abs": True,
"min_value": 0,
"max_value": 0.5,
},
"out": {"success": False, "observed_value": 0.9979514313860596},
},
{
"title": "positive_test_abs_skew",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"column": "c",
"abs": True,
"min_value": 0.5,
"max_value": 10,
},
"out": {"success": True, "observed_value": 0.9979514313860596},
},
],
},
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [ # Tags for this Expectation in the gallery
# "experimental"
],
"contributors": [ # Github handles for all contributors to this Expectation.
"@lodeous",
"@rexboyce",
"@bragleg",
],
"package": "experimental_expectations",
}
# Setting necessary computation metric dependencies and defining kwargs, as well as assigning kwargs default values\
metric_dependencies = ("column.custom.skew",)
success_keys = ("min_value", "strict_min", "max_value", "strict_max", "abs")
# Default values
default_kwarg_values = {
"min_value": None,
"max_value": None,
"strict_min": None,
"strict_max": None,
"abs": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
# def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
# """
# Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
# neccessary configuration arguments have been provided for the validation of the expectation.
#
# Args:
# configuration (OPTIONAL[ExpectationConfiguration]): \
# An optional Expectation Configuration entry that will be used to configure the expectation
# Returns:
# True if the configuration has been validated successfully. Otherwise, raises an exception
# """
# super().validate_configuration(configuration)
# self.validate_metric_value_between_configuration(configuration=configuration)
# @classmethod
# @renderer(renderer_type="renderer.prescriptive")
# @render_evaluation_parameter_string
# def _prescriptive_renderer(
# cls,
# configuration=None,
# result=None,
# language=None,
# runtime_configuration=None,
# **kwargs,
# ):
# runtime_configuration = runtime_configuration or {}
# include_column_name = runtime_configuration.get("include_column_name", True)
# include_column_name = (
# include_column_name if include_column_name is not None else True
# )
# styling = runtime_configuration.get("styling")
# params = substitute_none_for_missing(
# configuration.kwargs,
# [
# "column",
# "min_value",
# "max_value",
# "row_condition",
# "condition_parser",
# "strict_min",
# "strict_max",
# ],
# )
#
# if (params["min_value"] is None) and (params["max_value"] is None):
# template_str = "median may have any numerical value."
# else:
# at_least_str, at_most_str = handle_strict_min_max(params)
# if params["min_value"] is not None and params["max_value"] is not None:
# template_str = f"median must be {at_least_str} $min_value and {at_most_str} $max_value."
# elif params["min_value"] is None:
# template_str = f"median must be {at_most_str} $max_value."
# elif params["max_value"] is None:
# template_str = f"median must be {at_least_str} $min_value."
#
# if include_column_name:
# template_str = "$column " + template_str
#
# if params["row_condition"] is not None:
# (
# conditional_template_str,
# conditional_params,
# ) = parse_row_condition_string_pandas_engine(params["row_condition"])
# template_str = conditional_template_str + ", then " + template_str
# params.update(conditional_params)
#
# return [
# RenderedStringTemplateContent(
# **{
# "content_block_type": "string_template",
# "string_template": {
# "template": template_str,
# "params": params,
# "styling": styling,
# },
# }
# )
# ]
def _validate(
self,
configuration: ExpectationConfiguration,
metrics: Dict,
runtime_configuration: dict = None,
execution_engine: ExecutionEngine = None,
):
return self._validate_metric_value_between(
metric_name="column.custom.skew",
configuration=configuration,
metrics=metrics,
runtime_configuration=runtime_configuration,
execution_engine=execution_engine,
)
if __name__ == "__main__":
self_check_report = ExpectColumnSkewToBeBetween().run_diagnostics()
print(json.dumps(self_check_report, indent=2))
| apache-2.0 |
ywcui1990/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| agpl-3.0 |
wishdasher/hypopotamus | entity/entity_only.py | 1 | 1391 | import tensorflow as tf
from tensorflow.contrib import learn
import numpy as np
import csv
from sklearn.metrics import precision_recall_fscore_support
vector_file = 'glove.6B.300d.txt'#'small_vector.txt'
train_file = 'datasets/dataset_lex/train.tsv'#'small_train.tsv'
test_file = 'datasets/dataset_lex/test.tsv'#'small_test.tsv'
# extract vectors into dictionary
word_dict = {}
with open(vector_file) as f:
for line in f:
entry = line.split()
key = entry.pop(0).lower()
word_dict[key] = list(map(float, entry))
# create train and test vectors
def file_to_data(file_name):
data = []
labels = []
with open(file_name) as tsvfile:
tsvreader = csv.reader(tsvfile, delimiter="\t")
for line in tsvreader:
ent1, ent2, rel = line
if ent1 in word_dict and ent2 in word_dict:
x = word_dict[ent1] + word_dict[ent2]
y = int(rel == 'True')
data.append(x)
labels.append(y)
return (np.array(data), np.array(labels))
train_x, train_y = file_to_data(train_file)
test_x, test_y = file_to_data(test_file)
classifier = learn.DNNClassifier(hidden_units=[10, 7], n_classes=2)
classifier.fit(train_x, train_y.T, steps=75, batch_size=100)
pred = classifier.predict(test_x)
xor = np.logical_xor(pred, test_y)
total = len(pred)
correct = total - np.sum(xor)
accuracy = correct / total
print(correct, total, accuracy)
print(precision_recall_fscore_support(test_y, pred)) | mit |
wackymaster/QTClock | Libraries/matplotlib/backends/backend_pgf.py | 7 | 36822 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import math
import os
import sys
import errno
import re
import shutil
import tempfile
import codecs
import atexit
import weakref
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.figure import Figure
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _png, rcParams
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.compat import subprocess
from matplotlib.compat.subprocess import check_output
###############################################################################
# create a list of system fonts, all of these should work with xe/lua-latex
system_fonts = []
if sys.platform.startswith('win'):
from matplotlib import font_manager
from matplotlib.ft2font import FT2Font
for f in font_manager.win32InstalledFonts():
try:
system_fonts.append(FT2Font(str(f)).family_name)
except:
pass # unknown error, skip this font
else:
# assuming fontconfig is installed and the command 'fc-list' exists
try:
# list scalable (non-bitmap) fonts
fc_list = check_output(['fc-list', ':outline,scalable', 'family'])
fc_list = fc_list.decode('utf8')
system_fonts = [f.split(',')[0] for f in fc_list.splitlines()]
system_fonts = list(set(system_fonts))
except:
warnings.warn('error getting fonts from fc-list', UserWarning)
def get_texcommand():
"""Get chosen TeX system from rc."""
texsystem_options = ["xelatex", "lualatex", "pdflatex"]
texsystem = rcParams.get("pgf.texsystem", "xelatex")
return texsystem if texsystem in texsystem_options else "xelatex"
def get_fontspec():
"""Build fontspec preamble from rc."""
latex_fontspec = []
texcommand = get_texcommand()
if texcommand != "pdflatex":
latex_fontspec.append("\\usepackage{fontspec}")
if texcommand != "pdflatex" and rcParams.get("pgf.rcfonts", True):
# try to find fonts from rc parameters
families = ["serif", "sans-serif", "monospace"]
fontspecs = [r"\setmainfont{%s}", r"\setsansfont{%s}",
r"\setmonofont{%s}"]
for family, fontspec in zip(families, fontspecs):
matches = [f for f in rcParams["font." + family]
if f in system_fonts]
if matches:
latex_fontspec.append(fontspec % matches[0])
else:
pass # no fonts found, fallback to LaTeX defaule
return "\n".join(latex_fontspec)
def get_preamble():
"""Get LaTeX preamble from rc."""
latex_preamble = rcParams.get("pgf.preamble", "")
if type(latex_preamble) == list:
latex_preamble = "\n".join(latex_preamble)
return latex_preamble
###############################################################################
# This almost made me cry!!!
# In the end, it's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
###############################################################################
# helper functions
NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
re_mathsep = re.compile(NO_ESCAPE + r"\$")
re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
repl_escapetext = lambda m: "\\" + m.group(1)
re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
def common_texification(text):
"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = re_mathdefault.sub(repl_mathdefault, text)
# split text into normaltext and inline math parts
parts = re_mathsep.split(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = re_escapetext.sub(repl_escapetext, s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def writeln(fh, line):
# every line of a file included with \input must be terminated with %
# if not, latex will create additional vertical spaces for some reason
fh.write(line)
fh.write("%\n")
def _font_properties_str(prop):
# translate font properties to latex commands, return as string
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif family in system_fonts and get_texcommand() != "pdflatex":
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
pass # print warning?
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands)
def make_pdf_to_png_converter():
"""
Returns a function that converts a pdf file to a png file.
"""
tools_available = []
# check for pdftocairo
try:
check_output(["pdftocairo", "-v"], stderr=subprocess.STDOUT)
tools_available.append("pdftocairo")
except:
pass
# check for ghostscript
gs, ver = mpl.checkdep_ghostscript()
if gs:
tools_available.append("gs")
# pick converter
if "pdftocairo" in tools_available:
def cairo_convert(pdffile, pngfile, dpi):
cmd = ["pdftocairo", "-singlefile", "-png",
"-r %d" % dpi, pdffile, os.path.splitext(pngfile)[0]]
# for some reason this doesn't work without shell
check_output(" ".join(cmd), shell=True, stderr=subprocess.STDOUT)
return cairo_convert
elif "gs" in tools_available:
def gs_convert(pdffile, pngfile, dpi):
cmd = [gs, '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-sDEVICE=png16m', '-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile]
check_output(cmd, stderr=subprocess.STDOUT)
return gs_convert
else:
raise RuntimeError("No suitable pdf to png renderer found.")
class LatexError(Exception):
def __init__(self, message, latex_output=""):
Exception.__init__(self, message)
self.latex_output = latex_output
class LatexManagerFactory(object):
previous_instance = None
@staticmethod
def get_latex_manager():
texcommand = get_texcommand()
latex_header = LatexManager._build_latex_header()
prev = LatexManagerFactory.previous_instance
# check if the previous instance of LatexManager can be reused
if prev and prev.latex_header == latex_header and prev.texcommand == texcommand:
if rcParams.get("pgf.debug", False):
print("reusing LatexManager")
return prev
else:
if rcParams.get("pgf.debug", False):
print("creating LatexManager")
new_inst = LatexManager()
LatexManagerFactory.previous_instance = new_inst
return new_inst
class WeakSet(object):
# TODO: Poor man's weakref.WeakSet.
# Remove this once python 2.6 support is dropped from matplotlib.
def __init__(self):
self.weak_key_dict = weakref.WeakKeyDictionary()
def add(self, item):
self.weak_key_dict[item] = None
def discard(self, item):
if item in self.weak_key_dict:
del self.weak_key_dict[item]
def __iter__(self):
return six.iterkeys(self.weak_key_dict)
class LatexManager(object):
"""
The LatexManager opens an instance of the LaTeX application for
determining the metrics of text elements. The LaTeX environment can be
modified by setting fonts and/or a custem preamble in the rc parameters.
"""
_unclean_instances = WeakSet()
@staticmethod
def _build_latex_header():
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some
# math fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
latex_header = [r"\documentclass{minimal}",
latex_preamble,
latex_fontspec,
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}"]
return "\n".join(latex_header)
@staticmethod
def _cleanup_remaining_instances():
unclean_instances = list(LatexManager._unclean_instances)
for latex_manager in unclean_instances:
latex_manager._cleanup()
def _stdin_writeln(self, s):
self.latex_stdin_utf8.write(s)
self.latex_stdin_utf8.write("\n")
self.latex_stdin_utf8.flush()
def _expect(self, s):
exp = s.encode("utf8")
buf = bytearray()
while True:
b = self.latex.stdout.read(1)
buf += b
if buf[-len(exp):] == exp:
break
if not len(b):
raise LatexError("LaTeX process halted", buf.decode("utf8"))
return buf.decode("utf8")
def _expect_prompt(self):
return self._expect("\n*")
def __init__(self):
# store references for __del__
self._os_path = os.path
self._shutil = shutil
self._debug = rcParams.get("pgf.debug", False)
# create a tmp directory for running latex, remember to cleanup
self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
LatexManager._unclean_instances.add(self)
# test the LaTeX setup to ensure a clean startup of the subprocess
self.texcommand = get_texcommand()
self.latex_header = LatexManager._build_latex_header()
latex_end = "\n\\makeatletter\n\\@@end\n"
try:
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.tmpdir)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError("Latex command not found. "
"Install '%s' or change pgf.texsystem to the desired command."
% self.texcommand
)
else:
raise RuntimeError("Error starting process '%s'" % self.texcommand)
test_input = self.latex_header + latex_end
stdout, stderr = latex.communicate(test_input.encode("utf-8"))
if latex.returncode != 0:
raise LatexError("LaTeX returned an error, probably missing font or error in preamble:\n%s" % stdout)
# open LaTeX process for real work
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
self.latex = latex
self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
# write header with 'pgf_backend_query_start' token
self._stdin_writeln(self._build_latex_header())
# read all lines until our 'pgf_backend_query_start' token appears
self._expect("*pgf_backend_query_start")
self._expect_prompt()
# cache for strings already processed
self.str_cache = {}
def _cleanup(self):
if not self._os_path.isdir(self.tmpdir):
return
try:
self.latex.communicate()
self.latex_stdin_utf8.close()
self.latex.stdout.close()
except:
pass
try:
self._shutil.rmtree(self.tmpdir)
LatexManager._unclean_instances.discard(self)
except:
sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
def __del__(self):
if self._debug:
print("deleting LatexManager")
self._cleanup()
def get_width_height_descent(self, text, prop):
"""
Get the width, total height and descent for a text typesetted by the
current LaTeX environment.
"""
# apply font properties and define textbox
prop_cmds = _font_properties_str(prop)
textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
# check cache
if textbox in self.str_cache:
return self.str_cache[textbox]
# send textbox to LaTeX and wait for prompt
self._stdin_writeln(textbox)
try:
self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# typeout width, height and text offset of the last textbox
self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
# read answer from latex and advance to the next prompt
try:
answer = self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# parse metrics from the answer string
try:
width, height, offset = answer.splitlines()[0].split(",")
except:
msg = "Error processing '%s'\nLaTeX Output:\n%s" % (text, answer)
raise ValueError(msg)
w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
# the height returned from LaTeX goes from base to top.
# the height matplotlib expects goes from bottom to top.
self.str_cache[textbox] = (w, h + o, o)
return w, h + o, o
class RendererPgf(RendererBase):
def __init__(self, figure, fh, dummy=False):
"""
Creates a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes:
* figure: Matplotlib figure to initialize height, width and dpi from.
* fh: File handle for the output of the drawing commands.
"""
RendererBase.__init__(self)
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
# get LatexManager instance
self.latexManager = LatexManagerFactory.get_latex_manager()
if dummy:
# dummy==True deactivate all methods
nop = lambda *args, **kwargs: None
for m in RendererPgf.__dict__.keys():
if m.startswith("draw_"):
self.__dict__[m] = nop
else:
# if fh does not belong to a filename, deactivate draw_image
if not hasattr(fh, 'name') or not os.path.exists(fh.name):
warnings.warn("streamed pgf-code does not support raster "
"graphics, consider using the pgf-to-pdf option",
UserWarning)
self.__dict__["draw_image"] = lambda *args, **kwargs: None
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
writeln(self.fh, r"\pgfsys@defobject{currentmarker}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"}")
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False):
x, y = point[0] * f, point[1] * f
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform, rgbFace)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform, rgbFace)
writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
writeln(self.fh, r"\pgfsys@defobject{currentpattern}{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = int(math.ceil(xmax-xmin)), int(math.ceil(ymax-ymin))
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}} " % coords)
writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
if gc.get_forced_alpha():
fillopacity = strokeopacity = gc.get_alpha()
else:
strokeopacity = gc.get_rgb()[3]
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
writeln(self.fh, r"\definecolor{currentfill}{rgb}{%f,%f,%f}" % tuple(rgbFace[:3]))
writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
writeln(self.fh, r"\definecolor{currentstroke}{rgb}{%f,%f,%f}" % stroke_rgba[:3])
writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
dash_str = r"\pgfsetdash{"
for dash in dash_list:
dash_str += r"{%fpt}" % dash
dash_str += r"}{%fpt}" % dash_offset
writeln(self.fh, dash_str)
def _print_pgf_path(self, gc, path, transform, rgbFace=None):
f = 1. / self.dpi
# check for clip box / ignore clip for filled paths
bbox = gc.get_clip_rectangle() if gc else None
if bbox and (rgbFace is None):
p1, p2 = bbox.get_points()
clip = (p1[0], p1[1], p2[0], p2[1])
else:
clip = None
# build path
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
writeln(self.fh, r"\pgfpathquadraticcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
writeln(self.fh, r"\pgfpathcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def draw_image(self, gc, x, y, im):
# TODO: Almost no documentation for the behavior of this function.
# Something missing?
# save the images to png files
path = os.path.dirname(self.fh.name)
fname = os.path.splitext(os.path.basename(self.fh.name))[0]
fname_img = "%s-img%d.png" % (fname, self.image_counter)
self.image_counter += 1
_png.write_png(np.array(im)[::-1], os.path.join(path, fname_img))
# reference the image in the pgf picture
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
h, w = im.get_size_out()
f = 1. / self.dpi # from display coords to inch
writeln(self.fh, r"\pgftext[at=\pgfqpoint{%fin}{%fin},left,bottom]{\pgfimage[interpolate=true,width=%fin,height=%fin]{%s}}" % (x * f, y * f, w * f, h * f, fname_img))
writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
self.draw_text(gc, x, y, s, prop, angle, ismath, mtext)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# prepare string for tex
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = r"%s %s" % (prop_cmds, s)
writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
if rgb != (0, 0, 0):
writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
f = 1.0 / self.figure.dpi
text_args = []
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# if text anchoring can be supported, get the original coordinates
# and add alignment information
x, y = mtext.get_transform().transform_point(mtext.get_position())
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
halign = {"left": "left", "right": "right", "center": ""}
valign = {"top": "top", "bottom": "bottom",
"baseline": "base", "center": ""}
text_args.append(halign[mtext.get_ha()])
text_args.append(valign[mtext.get_va()])
else:
# if not, use the text layout provided by matplotlib
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
text_args.append("left")
text_args.append("base")
if angle != 0:
text_args.append("rotate=%f" % angle)
writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# check if the math is supposed to be displaystyled
s = common_texification(s)
# get text metrics in units of latex pt, convert to display units
w, h, d = self.latexManager.get_width_height_descent(s, prop)
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
return False
def get_canvas_width_height(self):
return self.figure.get_figwidth(), self.figure.get_figheight()
def points_to_pixels(self, points):
return points * mpl_pt_to_in * self.dpi
def new_gc(self):
return GraphicsContextPgf()
class GraphicsContextPgf(GraphicsContextBase):
pass
########################################################################
def draw_if_interactive():
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPgf(figure)
manager = FigureManagerPgf(canvas, num)
return manager
class TmpDirCleaner(object):
remaining_tmpdirs = set()
@staticmethod
def add(tmpdir):
TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
@staticmethod
def cleanup_remaining_tmpdirs():
for tmpdir in TmpDirCleaner.remaining_tmpdirs:
try:
shutil.rmtree(tmpdir)
except:
sys.stderr.write("error deleting tmp directory %s\n" % tmpdir)
class FigureCanvasPgf(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh, *args, **kwargs):
if kwargs.get("dryrun", False):
renderer = RendererPgf(self.figure, None, dummy=True)
self.figure.draw(renderer)
return
header_text = """%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \\input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \\usepackage{pgf}
%%
%% Figures using additional raster images can only be included by \input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \\usepackage{import}
%% and then include the figures with
%% \\import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in get_preamble().splitlines():
header_info_preamble.append("%% " + line)
for line in get_fontspec().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.get_dpi()
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
writeln(fh, r"\begingroup")
writeln(fh, r"\makeatletter")
writeln(fh, r"\begin{pgfpicture}")
writeln(fh, r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}" % (w, h))
writeln(fh, r"\pgfusepath{use as bounding box, clip}")
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
writeln(fh, r"\end{pgfpicture}")
writeln(fh, r"\makeatother")
writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, *args, **kwargs):
"""
Output pgf commands for drawing the figure so it can be included and
rendered in latex documents.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pgf is to be written to
if is_string_like(fname_or_fh):
with codecs.open(fname_or_fh, "w", encoding="utf-8") as fh:
self._print_pgf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
fh = codecs.getwriter("utf-8")(fname_or_fh)
self._print_pgf_to_fh(fh, *args, **kwargs)
else:
raise ValueError("filename must be a path")
def _print_pdf_to_fh(self, fh, *args, **kwargs):
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
try:
# create temporary directory for compiling the figure
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pgf = os.path.join(tmpdir, "figure.pgf")
fname_tex = os.path.join(tmpdir, "figure.tex")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
# print figure to pgf and compile it with latex
self.print_pgf(fname_pgf, *args, **kwargs)
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latexcode = """
\\documentclass[12pt]{minimal}
\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\\usepackage{pgf}
\\begin{document}
\\centering
\\input{figure.pgf}
\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
fh_tex.write(latexcode)
texcommand = get_texcommand()
cmdargs = [texcommand, "-interaction=nonstopmode",
"-halt-on-error", "figure.tex"]
try:
check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
except subprocess.CalledProcessError as e:
raise RuntimeError("%s was not able to process your file.\n\nFull log:\n%s" % (texcommand, e.output))
# copy file contents to target
with open(fname_pdf, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_pdf(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a Pgf generated figure to PDF.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pdf is to be written to
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_pdf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_pdf_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def _print_png_to_fh(self, fh, *args, **kwargs):
converter = make_pdf_to_png_converter()
try:
# create temporary directory for pdf creation and png conversion
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
fname_png = os.path.join(tmpdir, "figure.png")
# create pdf and try to convert it to png
self.print_pdf(fname_pdf, *args, **kwargs)
converter(fname_pdf, fname_png, dpi=self.figure.dpi)
# copy file contents to target
with open(fname_png, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_png(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a pgf figure to pdf and convert it to png.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_png_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_png_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def get_renderer(self):
return RendererPgf(self.figure, None, dummy=True)
class FigureManagerPgf(FigureManagerBase):
def __init__(self, *args):
FigureManagerBase.__init__(self, *args)
FigureCanvas = FigureCanvasPgf
FigureManager = FigureManagerPgf
def _cleanup_all():
LatexManager._cleanup_remaining_instances()
TmpDirCleaner.cleanup_remaining_tmpdirs()
atexit.register(_cleanup_all)
| mit |
yhsunshining/fuzzy-c-means | src/transfer.py | 1 | 12064 | import numpy as np
import pandas as pd
from optmize import *
import cv2
from sklearn import metrics
from sklearn.metrics import pairwise_distances
def evaluate(membership,dataSet):
exp = np.array(getExpResult(membership))
index = metrics.silhouette_score(np.array(dataSet), exp, metric='euclidean')
print index
return index
class TS(TabuSearch):
def start(self, U, V, J, VQue):
_U, _V, _J = U, V, J
curTimes = 0
_tabuLength = 0
epsilon = 1e-6
lastlocationJ = _J
while (curTimes < self.MAX_ITERATION):
locationJ = float('inf')
locationA = 0
locationU = locationV = None
neighbourhoodVs = deque([])
judge = deque([])
for i in xrange(self.maxSearchNum):
neighbourV = self.circleNeighbourhoodV(V)
neighbourhoodVs.append(neighbourV)
judge.append(self.tabuJudge(neighbourV))
neighbourhoodVs = np.array(neighbourhoodVs)
judge = np.array(judge)
if not judge.all():
neighbourhoodVs = neighbourhoodVs[judge == False]
for neighbourV in neighbourhoodVs:
temU, temV, temJ, temVQue = fcmIteration(
U, neighbourV, self.dataSet, self.m, self.c, 1)
temA = evaluate(temU, self.dataSet)
# if temJ < locationJ:
if temA > locationA:
locationU = temU
locationV = temV
locationJ = temJ
locationA = temA
locationVQue = temVQue
# if locationJ < _J:
if locationA <= accuracy:
self.neighbourhoodTimes = max(1, self.neighbourhoodTimes - 1)
else:
if locationA > _accuracy:
_U, _V, _J, _accuracy, _VQue = locationU, locationV, locationJ, locationA, locationVQue
self.neighbourhoodTimes = min(10, self.neighbourhoodTimes + 1)
U, V = locationU, locationV
if _tabuLength < self.tabuLength:
self.addTabuObj(locationV)
_tabuLength += 1
else:
self.updateList(locationV)
# if -epsilon <= lastlocationJ - locationJ <= epsilon:
# break
# else:
# lastlocationJ = locationJ
curTimes += 1
return _U, _V, _J
def convert2D(origin, shape, colNum=1):
return origin.reshape(shape[0] * shape[1], colNum)
def convert3D(origin, shape, colNum=3):
return origin.reshape(shape[0], shape[1], colNum)
def rangeMat(data):
res = np.zeros((2, data.shape[1]))
res[0] = np.max(data, axis=0)
res[1] = np.min(data, axis=0)
return res
def cosMat(verctor, mat):
dot = np.sum(verctor * mat, axis=1)
norm = np.linalg.norm(mat, axis=1) * np.linalg.norm(verctor)
return np.float_(dot) / norm
def inv_normalization(data, rangeMat):
data = data * (rangeMat[0] - rangeMat[1]) + rangeMat[1]
return data
def matchByFrequency(originExp, targetExp):
""" match by the order of the number of points within the cluster """
series = pd.Series(targetExp)
targetKeys = series.value_counts().keys()
series = pd.Series(originExp)
originKeys = series.value_counts().keys()
originLength = len(originKeys)
targetLength = len(targetKeys)
matchMap = {}
for i in range(originLength):
matchMap[originKeys[i]] = targetKeys[i % originLength]
return matchMap
def stdMat(data, V, exp):
c = V.shape[0]
mat = np.zeros(V.shape)
for i in range(c):
mask = exp == i
dataSlice = data[mask]
mat[i, :] = np.std(dataSlice, axis=0)
return mat
def matchByCos(origin, target, seq=None):
""" match by the cosine similarity of (means,std) vector """
originLen = len(origin)
targetLen = len(target)
targetDict = {}
matchMap = {}
for i in range(targetLen):
targetDict[i] = True
_targetDict = targetDict.copy()
iteration = seq if seq else range(originLen)
for i in iteration:
keys = targetDict.keys()
if not len(keys):
targetDict = _targetDict.copy()
keys = targetDict.keys()
mat = cosMat(origin[i], target[[int(item) for item in targetDict]])
selectIndex = np.argmax(mat)
""" match by the Euclidean distance """
# mat = distanceMat(
# origin[i].reshape(1,origin.shape[1]),
# target[[int(item) for item in targetDict]])
# selectIndex = np.argmin(mat)
matchMap[i] = int(keys[selectIndex])
del targetDict[keys[selectIndex]]
return matchMap
def matchByChannel(origin, target):
""" match by the order of the color channel """
originIndex = np.argsort(origin[:, 0])
targetIndex = np.argsort(target[:, 0])
originLength = len(originIndex)
targetLength = len(targetIndex)
matchMap = {}
for i in range(originLength):
ti = i if i < targetLength else targetLength - 1
matchMap[originIndex[i]] = targetIndex[ti]
return matchMap
def transferInRGB(originExp, originKeys, targetV, targetKeys):
""" color transfer in RGB space instead of lab space """
img = cv2.imread(originImagePath, 0)
img = cv2.equalizeHist(img)
out = convert2D(img, img.shape, 1) * np.ones(3)
out = normalization(out) * 0.5 + 0.5
for i in range(c):
mask = originExp == originKeys[i]
out[mask] = out[mask] * targetV[targetKeys[i]]
out[:, [0, -1]] = out[:, [-1, 0]]
return out
def data2image(data, shape, type='lab'):
""" convert a 2-d dataSet to a image data
Args:
data: 2-d dataSet, every item is the color of a pixel
shape: the shape of the out image
*type: the color space of the data
Returns:
a image mat in RGB color space
"""
data = np.uint8(data)
data = convert3D(data, shape, 3)
return cv2.cvtColor(data, cv2.COLOR_LAB2RGB)
def transfer(originU, originV, originData, originRange, targetU, targetV,
targetData, targetRange):
c = originV.shape[0]
targetV = inv_normalization(targetV, targetRange)
targetData = inv_normalization(targetData, targetRange)
originV = inv_normalization(originV, originRange)
originData = inv_normalization(originData, originRange)
targetExp = getExpResult(targetU)
originExp = getExpResult(originU)
originStd = stdMat(originData, originV, originExp)
targetStd = stdMat(targetData, targetV, targetExp)
# use l channel to match
series = pd.Series(originExp)
originKeys = series.value_counts().keys()
# matchMap = matchByCos(originStd, targetStd, originKeys.tolist())
# matchMap = matchByCos(
# np.column_stack((normalization(originV, axis=None), normalization(
# originStd, axis=None))),
# np.column_stack((normalization(targetV, axis=None), normalization(
# targetStd, axis=None))), originKeys.tolist())
matchMap = matchByChannel(originV, targetV)
# print targetV
# use frequency to match
# matchMap = matchByFrequency(originExp,targetExp)
img = cv2.imread(originImagePath)
# out = convert3D(img,img.shape,1)* np.ones(3)
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
out = np.float_(convert2D(img, img.shape, 3))
out_color = out.copy()
for i in range(c):
originMask = originExp == i
targetMask = targetExp == matchMap[i]
originSlice = out[originMask]
originStd = np.std(originSlice, axis=0)
originMeans = originV[i]
targetMeans = targetV[matchMap[i]]
out[originMask] = (
originSlice - originMeans[0:3]
) * targetStd[matchMap[i]][0:3] / originStd[0:3] + targetMeans[0:3]
out_color[originMask] = np.zeros(
originSlice.shape) + targetV[matchMap[i]][0:3]
# normalization
outRange = rangeMat(out)
outRange[outRange > 255] = 255
outRange[outRange < 0] = 0
out = normalization(out)
out = inv_normalization(out, outRange)
out = data2image(out, img.shape)
out_color = data2image(out_color, img.shape)
timeString = time.strftime('%Y_%m_%d-%H%M%S')
plt.imsave('./' + timeString + '.color.png', out_color)
plt.imsave('./' + timeString + '.transfer.png', out)
def showClustering(U, V, rangeMat, data, shape):
""" save color segement result of the origin image """
V = inv_normalization(V, rangeMat)
data = inv_normalization(data, rangeMat)
exp = getExpResult(U)
for i in range(V.shape[0]):
mask = exp == i
dataSlice = data[mask]
data[mask] = np.zeros(dataSlice.shape) + V[i]
data = np.uint8(data[:, 0:3])
data = convert3D(data, shape, 3)
data = cv2.cvtColor(data, cv2.COLOR_LAB2RGB)
plt.imsave('./' + time.strftime('%Y_%m_%d-%H%M%S') + '.clustering.png',
data)
def loadImageData(url, meanshift=False, position=True):
""" load data form a image
Args:
url: the path of the image
meanshift: bool, whether use meanshift on the image
position: bool, whether add position information to the dataSet
Returns:
a tuple(dataSet,shape) consist of image information and the image shape
the dataSet include the color information of the image in lab color space,
the location(x,y) of every pixels is optional
"""
img = cv2.imread(url)
img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
if meanshift:
img = cv2.pyrMeanShiftFiltering(img, 9, 50)
out = conv2D = np.float32(convert2D(img, img.shape, 3))
if position:
out = np.zeros((conv2D.shape[0], conv2D.shape[1] + 2))
out[:, 0:-2] = conv2D
out[:, -2] = np.repeat(range(img.shape[0]), img.shape[1])
out[:, -1] = np.tile(range(img.shape[1]), img.shape[0])
return out, img.shape
if __name__ == '__main__':
originImagePath = '../images/filter/scream_200.jpg'
targetImagePath = '../images/filter/picasso_1_200.jpg'
start = time.clock()
filterData, filterShape = loadImageData(targetImagePath, True, False)
originData, originShape = loadImageData(originImagePath, True, False)
filterRange = rangeMat(filterData)
originRange = rangeMat(originData)
filterData = normalization(filterData)
originData = normalization(originData)
c = int(5) # number of cluster
m = int(2) # power of membership
originU, originV, originJ, originVque = fcm(originData, m, c, 1)
print evaluate(originU,originData)
# targetU, targetV, targetJ,targetVque = fcm(filterData, m, c,1)
# showClustering(originU, originV, originRange, originData, originShape)
# showClustering(targetU, targetV, filterRange, filterData, filterShape)
# transfer(originU, originV, originData, originRange, targetU, targetV,
# filterData, filterRange)
# print('before origin J:{}').format(originJ)
# print('before target J:{}').format(targetJ)
# print time.clock() - start
# start = time.clock()
# filterTs = TS(MAX_ITERATION=20,
# extra={'dataSet': filterData,
# 'm': m,
# 'c': c})
# targetU, targetV, targetJ = filterTs.start(targetU, targetV, targetJ, targetVque)
# originTs = TS(MAX_ITERATION=40,
# extra={'dataSet': originData,
# 'm': m,
# 'c': c})
# originU, originV, originJ = originTs.start(
# originU, originV, originJ, originVque)
# print evaluate(originU,originData)
# showClustering(originU, originV, originRange, originData, originShape)
# showClustering(targetU, targetV, filterRange, filterData, filterShape)
# transfer(originU, originV, originData, originRange, targetU, targetV,
# filterData, filterRange)
# print('after origin J:{}').format(originJ)
# print('after target J:{}').format(targetJ)
print time.clock() - start
| mit |
ivannz/study_notes | year_15_16/thesis/notebooks/utils/functions.py | 1 | 3370 | """Function for testing"""
import numpy as np
from scipy.linalg import cholesky
from sklearn.metrics.pairwise import pairwise_kernels as kernel
from sklearn.utils import check_random_state
def rosenbrock(X, random_state=None, scale=1.0, **kwargs):
"""Test function: `rosenbrock`"""
val_ = 100 * np.sum((X[:, 1:] - X[:, :-1]**2)**2, axis=1)
val_ += np.sum((X[:, :-1] - 1)**2, axis=1)
if scale > 0:
val_ /= val_.std()
val_ *= scale
return val_
def auckley(X, random_state=None, scale=1.0, **kwargs):
"""Test function: `auckley`"""
x, y = X[:, 0] * 4, X[:, 1] * 4
val_ = -20 * np.exp(-0.2 * np.sqrt((x**2 + y**2) / 2))
val_ -= np.exp((np.cos(2 * np.pi * x) + np.cos(2 * np.pi * y)) / 2)
val_ += 20 + np.e
if scale > 0:
val_ /= val_.std()
val_ *= scale
return val_
def eggholder(X, random_state=None, scale=1.0, **kwargs):
"""Test function: `eggholder`"""
x, y = X[:, 0] * 400, X[:, 1] * 400 + 47
val_ = - y * np.sin(np.sqrt(np.abs(0.5*x + y))) - x * np.sin(np.sqrt(np.abs(x - y)))
if scale > 0:
val_ /= val_.std()
val_ *= scale
return val_
def levi(X, random_state=None, scale=1.0, **kwargs):
"""Test function: `levi`"""
x, y = X[:, 0] + 1, X[:, 1] + 1
v1_ = 1 + np.sin(3 * np.pi * y)**2
v2_ = 1 + np.sin(2 * np.pi * y)**2
v1_ *= (x - 1)**2
v2_ *= (y - 1)**2
val_ = np.sin(3 * np.pi * x)**2 + v1_ + v2_
if scale > 0:
val_ /= val_.std()
val_ *= scale
return val_
def holder(X, random_state=None, scale=1.0, **kwargs):
"""Test function: `holder`"""
x, y = X[:, 0] * 4, X[:, 1] * 4
val_ = - np.abs(np.sin(x) * np.cos(y) * np.exp(np.abs(1 - np.sqrt(x**2 + y**2) / np.pi)))
if scale > 0:
val_ /= val_.std()
val_ *= scale
return val_
def schaffer(X, random_state=None, scale=1.0, **kwargs):
"""Test function: `schaffer`"""
x, y = 20 * (X[:, 0]**2), 20 * (X[:, 1]**2)
val_ = 0.5 + (np.sin(x - y)**2 - 0.5) / (1 + (x + y) / 1000)
if scale > 0:
val_ /= val_.std()
val_ *= scale
return val_
def gaussian(X, size=None, scale=1.0, random_state=None, nugget=1e-6, metric="rbf", **kwargs):
"""Generate a realisation of a Gaussian process sampled at `X`.
Parameters
----------
nugget : float, optional, default=1e-6
Cholesky decomposition is aaplicable to strictly positive definite
matrices. Since Kernel matrices are positive-definite (semi-definite)
`nugget` adds some regularization to the kernel, which is equvalent
to mixing in some white noise with varaince `nugget` at each x in X.
"""
## Prepare the kernel matrix
Kxx = np.asfortranarray(kernel(X, metric=metric, **kwargs))
## Add some White noise and do the in-place cholesky decomposition.
Kxx[np.diag_indices_from(Kxx)] += nugget
cholesky(Kxx, lower=True, overwrite_a=True)
## Draw some independent gaussian variates.
random_state = check_random_state(random_state)
size_ = size if size is not None else tuple()
return np.dot(Kxx, scale * random_state.normal(size=(Kxx.shape[1],) + size_))
def get_functions():
"""Returns a dictionary"""
functions_ = [rosenbrock, auckley, eggholder,
levi, holder, schaffer, gaussian]
return {fn.__name__: fn for fn in functions_}
| mit |
johndowns/azure-quickstart-templates | quickstarts/microsoft.machinelearningservices/machine-learning-service-create-aci/prereqs/Driver.py | 19 | 1049 | import json
import numpy
from sklearn.externals import joblib
from azureml.core.model import Model
from azureml.contrib.services.aml_request import AMLRequest, rawhttp
from azureml.contrib.services.aml_response import AMLResponse
def init():
global model
model_path = Model.get_model_path('sklearn_regression_model.pkl')
model = joblib.load(model_path)
@rawhttp
def run(request):
if request.method == 'GET':
respBody = str.encode(request.full_path)
return AMLResponse(respBody, 200)
elif request.method == 'POST':
try:
reqBody = request.get_data(False)
raw_data = reqBody.decode("utf-8")
data = json.loads(raw_data)['data']
data = numpy.array(data)
result = model.predict(data)
result_string = json.dumps(result.tolist())
return AMLResponse(result_string, 200)
except Exception as e:
error = str(e)
return AMLResponse(error, 500)
else:
return AMLResponse("bad request", 500) | mit |
syl20bnr/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| gpl-3.0 |
heli522/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
sumspr/scikit-learn | sklearn/__check_build/__init__.py | 345 | 1671 | """ Module to give helpful messages to the user that did not
compile the scikit properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
pnedunuri/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
M4573R/BuildingMachineLearningSystemsWithPython | ch11/demo_corr.py | 25 | 2288 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from matplotlib import pylab
import numpy as np
import scipy
from scipy.stats import norm, pearsonr
from utils import CHART_DIR
def _plot_correlation_func(x, y):
r, p = pearsonr(x, y)
title = "Cor($X_1$, $X_2$) = %.3f" % r
pylab.scatter(x, y)
pylab.title(title)
pylab.xlabel("$X_1$")
pylab.ylabel("$X_2$")
f1 = scipy.poly1d(scipy.polyfit(x, y, 1))
pylab.plot(x, f1(x), "r--", linewidth=2)
# pylab.xticks([w*7*24 for w in [0,1,2,3,4]], ['week %i'%(w+1) for w in
# [0,1,2,3,4]])
def plot_correlation_demo():
np.random.seed(0) # to reproduce the data later on
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(0, 10, 0.2)
pylab.subplot(221)
y = 0.5 * x + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
pylab.clf()
pylab.figure(num=None, figsize=(8, 8))
x = np.arange(-5, 5, 0.2)
pylab.subplot(221)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.01, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(222)
y = 0.5 * x ** 2 + norm.rvs(1, scale=.1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(223)
y = 0.5 * x ** 2 + norm.rvs(1, scale=1, size=len(x))
_plot_correlation_func(x, y)
pylab.subplot(224)
y = 0.5 * x ** 2 + norm.rvs(1, scale=10, size=len(x))
_plot_correlation_func(x, y)
pylab.autoscale(tight=True)
pylab.grid(True)
filename = "corr_demo_2.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_correlation_demo()
| mit |
pkruskal/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 130 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
DataViva/dataviva-scripts | commands/load_metadata/countries.py | 1 | 1745 | import click
import pandas
import json
from clients import s3, redis
@click.command()
@click.option('--both', 'upload', flag_value='s3_and_redis', default=True, help='Upload metadata to both s3 and Redis')
@click.option('--s3', 'upload', flag_value='only_s3', help='Upload metadata only to s3')
@click.option('--redis', 'upload', flag_value='only_redis', help='Upload metadata only to Redis')
def countries(upload):
csv = s3.get('metadata/continents.csv')
df_continents = pandas.read_csv(
csv,
sep=';',
header=0,
names=['id', 'country_id', 'name_en', 'name_pt'],
converters={
"country_id": lambda x: '%03d' % int(x)
}
)
continents = {}
for _, row in df_continents.iterrows():
continents[row['country_id']] = {
'id': row["id"],
'name_en': row["name_en"],
'name_pt': row["name_pt"],
}
csv = s3.get('metadata/wld.csv')
df = pandas.read_csv(
csv,
sep=';',
header=0,
names=['id', 'name_pt', 'name_en', 'abbreviation'],
converters={
"id": str
}
)
countries = {}
for _, row in df.iterrows():
country = {
'id': row["id"],
'name_pt': row["name_pt"],
'name_en': row["name_en"],
'abbrv': row["abbreviation"],
'continent': continents.get(row["id"], {})
}
countries[row['id']] = country
if upload != 'only_s3':
redis.set('country/' + str(row['id']), json.dumps(country, ensure_ascii=False))
if upload != 'only_redis':
s3.put('country.json', json.dumps(countries, ensure_ascii=False))
click.echo("Countries loaded.")
| mit |
adolfocorreia/portfolio | retriever/bovespa.py | 1 | 2969 | import glob
import pandas as pd
from .retriever import ValueRetriever
class BovespaRetriever(ValueRetriever):
def __init__(self):
ValueRetriever.__init__(self, "bovespa")
def _get_data_file_patterns(self):
return [self.data_directory + "/COTAHIST_A%s.ZIP"]
def _available_codes(self):
return self._data.index.levels[1].values
def _load_data_files(self):
print "Loading stocks TXT files..."
fields = [
( 1, 2, "TIPREG"),
( 3, 10, "DATA" ), # Data
( 11, 12, "CODBDI"), # Codigo BDI
( 13, 24, "CODNEG"), # Codigo de negociacao
( 25, 27, "TPMERC"), # Tipo de mercado
( 28, 39, "NOMRES"), # Nome resumido
( 40, 49, "ESPECI"), # Especificacao do papel
( 50, 52, "PRAZOT"),
( 53, 56, "MODREF"),
( 57, 69, "PREABE"), # Preco abertura
( 70, 82, "PREMAX"), # Preco maximo
( 83, 95, "PREMIN"), # Preco minimo
( 96, 108, "PREMED"), # Preco medio
(109, 121, "PREULT"), # Preco fechamento
(122, 134, "PREOFC"),
(135, 147, "PREOFV"),
(148, 152, "TOTNEG"), # Total de negocios
(153, 170, "QUATOT"), # Quantidade de titulos negociados
(171, 188, "VOLTOT"), # Volume negociado
(189, 201, "PREEXE"),
(202, 202, "INDOPC"),
(203, 210, "DATVEN"),
(211, 217, "FATCOT"),
(218, 230, "PTOEXE"),
(231, 242, "CODISI"),
(243, 245, "DISMES"),
]
colspecs = [(item[0]-1, item[1]) for item in fields]
names = [item[2] for item in fields]
# filter_CODBDI = [
# "02", # Lote padrao
# "12", # Fundos imobiliarios
# ]
prices = [
"PREABE",
"PREMAX",
"PREMIN",
"PREMED",
"PREULT",
"PREOFC",
"PREOFV",
# "VOLTOT",
"PREEXE",
]
self._data = pd.DataFrame()
file_list = sorted(glob.glob(self.data_directory + "/COTAHIST_A*.TXT"))
for file_name in file_list:
print "Loading file %s..." % file_name
df = pd.read_fwf(
file_name,
names=names,
header=0,
parse_dates=['DATA'],
index_col=['DATA', 'CODNEG'],
colspecs=colspecs,
skipfooter=1)
self._data = self._data.append(df)
for col in prices:
self._data[col] /= 100.0
print "Done loading stocks TXT files."
def get_value(self, code, date):
ValueRetriever.get_value(self, code, date)
ts = pd.Timestamp(date)
sub_df = self._data.xs(code, level='CODNEG')
asof_ts = sub_df.index.asof(ts)
return sub_df.ix[asof_ts].PREULT
| gpl-2.0 |
h2oai/h2o-dev | h2o-py/tests/testdir_scikit_grid/pyunit_scal_pca_rf_grid.py | 1 | 3717 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def scale_pca_rf_pipe():
from h2o.transforms.preprocessing import H2OScaler
from h2o.transforms import H2OPCA
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.pipeline import Pipeline
from sklearn.grid_search import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
from scipy.stats import randint
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
# build transformation pipeline using sklearn's Pipeline and H2O transforms
pipe = Pipeline([("standardize", H2OScaler()),
("pca", H2OPCA()),
("rf", H2ORandomForestEstimator())])
params = {"standardize__center": [True, False], # Parameters to test
"standardize__scale": [True, False],
"pca__k": randint(2, iris[1:].shape[1]),
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),
"pca__transform": ["none", "standardize"],
}
custom_cv = H2OKFold(iris, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe, params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(iris[1:],iris[0])
print(random_search.best_estimator_)
def scale_pca_rf_pipe_new_import():
from h2o.transforms.preprocessing import H2OScaler
from h2o.estimators.pca import H2OPrincipalComponentAnalysisEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from sklearn.pipeline import Pipeline
from sklearn.grid_search import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
from scipy.stats import randint
iris = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris_wheader.csv"))
# build transformation pipeline using sklearn's Pipeline and H2O transforms
pipe = Pipeline([("standardize", H2OScaler()),
("pca", H2OPrincipalComponentAnalysisEstimator().init_for_pipeline()),
("rf", H2ORandomForestEstimator())])
params = {"standardize__center": [True, False], # Parameters to test
"standardize__scale": [True, False],
"pca__k": randint(2, iris[1:].shape[1]),
"rf__ntrees": randint(50,60),
"rf__max_depth": randint(4,8),
"rf__min_rows": randint(5,10),
"pca__transform": ["none", "standardize"],
}
custom_cv = H2OKFold(iris, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe, params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(iris[1:],iris[0])
print(random_search.best_estimator_)
if __name__ == "__main__":
pyunit_utils.standalone_test(scale_pca_rf_pipe)
pyunit_utils.standalone_test(scale_pca_rf_pipe_new_import)
else:
scale_pca_rf_pipe()
scale_pca_rf_pipe_new_import()
| apache-2.0 |
kubeflow/kfp-tekton-backend | components/deprecated/dataproc/analyze/src/analyze_run.py | 1 | 3782 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pyspark.sql.types import StructType, StructField
from pyspark.sql.types import DoubleType, IntegerType, StringType
import pandas as pd
from tensorflow.python.lib.io import file_io
from pyspark.sql.session import SparkSession
import json
import os
VOCAB_ANALYSIS_FILE = 'vocab_%s.csv'
STATS_FILE = 'stats.json'
def load_schema(schema_file):
type_map = {
'KEY': StringType(),
'NUMBER': DoubleType(),
'CATEGORY': StringType(),
'TEXT': StringType(),
'IMAGE_URL': StringType()
}
schema_json = json.loads(file_io.read_file_to_string(schema_file))
fields = [StructField(x['name'], type_map[x['type']]) for x in schema_json]
return schema_json, StructType(fields)
def get_columns_of_type(datatype, schema_json):
return [x['name'] for x in schema_json if x['type'] == datatype]
parser = argparse.ArgumentParser(description='ML')
parser.add_argument('--output', type=str)
parser.add_argument('--train', type=str)
parser.add_argument('--schema', type=str)
args = parser.parse_args()
schema_json, schema = load_schema(args.schema)
text_columns = get_columns_of_type('TEXT', schema_json)
category_columns = get_columns_of_type('CATEGORY', schema_json)
number_columns = get_columns_of_type('NUMBER', schema_json)
spark = SparkSession.builder.appName("MLAnalyzer").getOrCreate()
df = spark.read.schema(schema).csv(args.train)
df.createOrReplaceTempView("train")
num_examples = df.sql_ctx.sql(
'SELECT COUNT(*) AS num_examples FROM train').collect()[0].num_examples
stats = {'column_stats': {}, 'num_examples': num_examples}
for col in text_columns:
col_data = df.sql_ctx.sql("""
SELECT token, COUNT(token) AS token_count
FROM (SELECT EXPLODE(SPLIT({name}, \' \')) AS token FROM train)
GROUP BY token
ORDER BY token_count DESC, token ASC""".format(name=col))
token_counts = [(r.token, r.token_count) for r in col_data.collect()]
csv_string = pd.DataFrame(token_counts).to_csv(index=False, header=False)
file_io.write_string_to_file(os.path.join(args.output, VOCAB_ANALYSIS_FILE % col), csv_string)
stats['column_stats'][col] = {'vocab_size': len(token_counts)}
for col in category_columns:
col_data = df.sql_ctx.sql("""
SELECT {name} as token, COUNT({name}) AS token_count
FROM train
GROUP BY token
ORDER BY token_count DESC, token ASC
""".format(name=col))
token_counts = [(r.token, r.token_count) for r in col_data.collect()]
csv_string = pd.DataFrame(token_counts).to_csv(index=False, header=False)
file_io.write_string_to_file(os.path.join(args.output, VOCAB_ANALYSIS_FILE % col), csv_string)
stats['column_stats'][col] = {'vocab_size': len(token_counts)}
for col in number_columns:
col_stats = df.sql_ctx.sql("""
SELECT MAX({name}) AS max_value, MIN({name}) AS min_value, AVG({name}) AS mean_value
FROM train""".format(name=col)).collect()
stats['column_stats'][col] = {'min': col_stats[0].min_value, 'max': col_stats[0].max_value, 'mean': col_stats[0].mean_value}
file_io.write_string_to_file(os.path.join(args.output, STATS_FILE), json.dumps(stats, indent=2, separators=(',', ': ')))
file_io.write_string_to_file(os.path.join(args.output, 'schema.json'), json.dumps(schema_json, indent=2, separators=(',', ': ')))
| apache-2.0 |
poryfly/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/backends/backend_gtkagg.py | 11 | 4354 | """
Render to gtk from agg
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import matplotlib
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.backends.backend_gtk import gtk, FigureManagerGTK, FigureCanvasGTK,\
show, draw_if_interactive,\
error_msg_gtk, PIXELS_PER_INCH, backend_version, \
NavigationToolbar2GTK
from matplotlib.backends._gtkagg import agg_to_gtk_drawable
DEBUG = False
class NavigationToolbar2GTKAgg(NavigationToolbar2GTK):
def _get_canvas(self, fig):
return FigureCanvasGTKAgg(fig)
class FigureManagerGTKAgg(FigureManagerGTK):
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar']=='toolbar2':
toolbar = NavigationToolbar2GTKAgg (canvas, self.window)
else:
toolbar = None
return toolbar
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
if DEBUG: print('backend_gtkagg.new_figure_manager')
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasGTKAgg(figure)
return FigureManagerGTKAgg(canvas, num)
if DEBUG: print('backend_gtkagg.new_figure_manager done')
class FigureCanvasGTKAgg(FigureCanvasGTK, FigureCanvasAgg):
filetypes = FigureCanvasGTK.filetypes.copy()
filetypes.update(FigureCanvasAgg.filetypes)
def configure_event(self, widget, event=None):
if DEBUG: print('FigureCanvasGTKAgg.configure_event')
if widget.window is None:
return
try:
del self.renderer
except AttributeError:
pass
w,h = widget.window.get_size()
if w==1 or h==1: return # empty fig
# compute desired figure size in inches
dpival = self.figure.dpi
winch = w/dpival
hinch = h/dpival
self.figure.set_size_inches(winch, hinch)
self._need_redraw = True
self.resize_event()
if DEBUG: print('FigureCanvasGTKAgg.configure_event end')
return True
def _render_figure(self, pixmap, width, height):
if DEBUG: print('FigureCanvasGTKAgg.render_figure')
FigureCanvasAgg.draw(self)
if DEBUG: print('FigureCanvasGTKAgg.render_figure pixmap', pixmap)
#agg_to_gtk_drawable(pixmap, self.renderer._renderer, None)
buf = self.buffer_rgba()
ren = self.get_renderer()
w = int(ren.width)
h = int(ren.height)
pixbuf = gtk.gdk.pixbuf_new_from_data(
buf, gtk.gdk.COLORSPACE_RGB, True, 8, w, h, w*4)
pixmap.draw_pixbuf(pixmap.new_gc(), pixbuf, 0, 0, 0, 0, w, h,
gtk.gdk.RGB_DITHER_NONE, 0, 0)
if DEBUG: print('FigureCanvasGTKAgg.render_figure done')
def blit(self, bbox=None):
if DEBUG: print('FigureCanvasGTKAgg.blit', self._pixmap)
agg_to_gtk_drawable(self._pixmap, self.renderer._renderer, bbox)
x, y, w, h = self.allocation
self.window.draw_drawable (self.style.fg_gc[self.state], self._pixmap,
0, 0, 0, 0, w, h)
if DEBUG: print('FigureCanvasGTKAgg.done')
def print_png(self, filename, *args, **kwargs):
# Do this so we can save the resolution of figure in the PNG file
agg = self.switch_backends(FigureCanvasAgg)
return agg.print_png(filename, *args, **kwargs)
"""\
Traceback (most recent call last):
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtk.py", line 304, in expose_event
self._render_figure(self._pixmap, w, h)
File "/home/titan/johnh/local/lib/python2.3/site-packages/matplotlib/backends/backend_gtkagg.py", line 77, in _render_figure
pixbuf = gtk.gdk.pixbuf_new_from_data(
ValueError: data length (3156672) is less then required by the other parameters (3160608)
"""
FigureCanvas = FigureCanvasGTKAgg
FigureManager = FigureManagerGTKAgg
| mit |
schets/scikit-learn | sklearn/manifold/tests/test_locally_linear.py | 41 | 4827 | from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| mit |
kashif/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 73 | 6086 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
smartscheduling/scikit-learn-categorical-tree | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
bmazin/ARCONS-pipeline | fluxcal/test/testFluxCal.py | 1 | 1609 | from fluxcal.fluxCal import FluxCal
from util.ObsFile import ObsFile
from util.FileName import FileName
import matplotlib.pyplot as plt
import numpy as np
import os, sys
from matplotlib.backends.backend_pdf import PdfPages
"""
Created 2/7/2013 by Seth Meeker
Routine for testing the generation of FluxCal files.
usage: > python testFluxCal.py paramfilename
example parameter files are included in the fluxcal/test for all Pal2012 standards
outputs the fluxCalSoln h5 file and a .pdf of the debug plots. Can be set to plots=False to turn this off
"""
def main():
params = []
paramfile = sys.argv[1]
f = open(paramfile,'r')
for line in f:
params.append(line)
f.close()
datadir = params[0].split('=')[1].strip()
flatdir = params[1].split('=')[1].strip()
wvldir = params[2].split('=')[1].strip()
fluxfile = params[3].split('=')[1].strip()
skyfile = params[4].split('=')[1].strip()
flatfile = params[5].split('=')[1].strip()
wvlfile = params[6].split('=')[1].strip()
fluxdir = params[7].split('=')[1].strip()
fluxoutfile = params[8].split('=')[1].strip()
objectName = params[9].split('=')[1].strip()
fluxFileName = os.path.join(datadir, fluxfile)
skyFileName = os.path.join(datadir, skyfile)
wvlCalFileName = os.path.join(wvldir, wvlfile)
flatCalFileName = os.path.join(flatdir, flatfile)
fluxCalFileName = os.path.join(fluxdir, fluxoutfile)
print objectName
fc = FluxCal(fluxFileName, skyFileName, wvlCalFileName,flatCalFileName, objectName, fluxCalFileName, plots=True)
if __name__ == '__main__':
main()
| gpl-2.0 |
bd-j/sedpy | sedpy/boneyard/plotsed.py | 1 | 2519 | # This code is an unfinished attempt to plot SEDs as probability
# distributions p(flux, wave)
import numpy as np
class Bunch(object):
""" Simple storage.
"""
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class SedPoint(object):
def __init__(self, flux=0., unc=0., band=None):
self.flux = flux
self.unc = unc
self.band = band
@property
def var(self):
return self.unc**2
@property
def pnorm(self):
return (self.var * 2 * np.pi)**(-0.5)
@property
def wave_min(self):
return self.band.wave_effective - self.band.effective_width/2.
@property
def wave_max(self):
return self.band.wave_effective + self.band.effective_width/2.
def flux_lnprob(self, fluxes):
fnp = (np.array(fluxes) - self.flux)**2 / (2 * self.var)
return np.log(self.pnorm) - fnp
def spans(self, wavelengths):
waves = np.array(wavelengths)
return (waves < self.wave_max) & (waves > self.wave_min)
def sed_prob(self, wavelength_grid, flux_grid):
return self.spans(wavelength_grid), self.flux_lnprob(flux_grid)
def sed_to_psed(filters, fluxes, uncs, wgrid, fgrid):
sed = []
lnpsed = np.zeros(( len(wgrid), len(fgrid)))
for filt, flux, unc in zip(filters, fluxes, uncs):
sed_point = SedPoint(flux, unc, filt)
winds, lnprob = sed_point.sed_prob(wgrid, fgrid)
lnpsed[winds,:] += lnprob[None, :]
sed += [sed_point]
lnpsed -= np.log(np.trapz(np.exp(lnpsed), fgrid, axis = 1))[:, None]
lnpsed += np.log(np.trapz(np.ones(len(fgrid)), fgrid))
return lnpsed, sed
def test():
from sedpy import observate
import fsps
import matplotlib.pyplot as pl
filters = ['galex_NUV', 'sdss_u0', 'sdss_r0', 'sdss_r0', 'sdss_i0', 'sdss_z0',
'bessell_U', 'bessell_B', 'bessell_V', 'bessell_R', 'bessell_I',
'twomass_J','twomass_H']
flist = observate.load_filters(filters)
sps = fsps.StellarPopulation(compute_vega_mags=False)
wave, spec = sps.get_spectrum(tage=1.0, zmet=2, peraa=True)
sed = observate.getSED(wave, spec, flist)
sed_unc = np.abs(np.random.normal(1, 0.3, len(sed)))
wgrid = np.linspace( 2e3, 13e3, 1000)
fgrid = np.linspace( -13, -9, 100)
psed, sedpoints = sed_to_psed(flist, sed, sed_unc, wgrid, fgrid)
pl.imshow(np.exp(psed).T, cmap='Greys_r',
interpolation='nearest', origin ='upper', aspect='auto')
| gpl-2.0 |
henridwyer/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
cjayb/kingjr_natmeg_arhus | JR_toolbox/skl_king_parallel_gs2.py | 2 | 14960 |
print("######################################################################")
print("# Parallel n-split k-stratified-fold continuous SVM Scikitlearn MVPA #")
print("# (c) Jean-Remi King 2012, jeanremi.king [at] gmail [dot] com #")
print("######################################################################")
# Implementation of a multivariate pattern analysis based on the scikit-learn
# toolbox (http://scikit-learn.org/stable/). It reads two .mat files
# (filenameX, filenamey) created by 'jr_classify.m'
#
# Function:
# skl_king_parallel.py filenameX filenamey [number_of_cores]
#
# Inputs:
# in filenameX:
# Xm: samples x features x classification matrix (e.g. trials x
# chans x time)
# in filenamey:
# y: vector indicating the class of each sample. Negative values
# will be used for generalization only. 0 indicates to-be-
# ignored samples.
# y2: cost/weights applied on each sample
# path: export directory
# nameX: export filename X
# namey: export filename y
# folding:type of folding(e.g. stratified)
# n_splits:number of splits
# n_folds: number of folds
# C: SVM penalization parameter
# compute_probas: compute logit fit
# compute_predict: compute traditional SVM
# fs_n: number of univariate features selected for classification
# dims: classification performed on dims dimensions
# dims_tg:classification generalized on dims_tg dimensions
#
# Ouputs:
# predict: prediction matrix (split x samples x dims x dimsg)
# predictg:same as predict for generalized samples
# probas: probas matrix (split x samples x dims x dimsg x class)
# probasg: same as probas for generalized samples
# coef: weight hyperplan vector
# all_folds:folding report (split x fold x samples)
# y_all: original y
# y: training y
# yg: generalized y
# filenameX:
# filenamey:
#
# Results are reported in: path + nameX + '_' + namey + "_results.mat"
###############################################################################
# (c) Jean-Remi King: jeanremi.king [at] gmail [dot] com
###############################################################################
# update 2013 01 03: input binary format
# update 2012 12 20: remove np.copy, add compute distance
# update 2012 11 29: fix 3rd dimension issue
# update 2012 11 13: fix bug str output on some python versions
# update 2012 11 02: change stratified kfolding y by y2
# update 2012 11 02: add np.copy to Xtrain and Xtest
# update 2012 11 01: correct feature selection coef bug when at 100 %
# update 2012 10 23: correct leaveoneout bug
# update 2012 10 23: correct major n_split new_order error
# update 2012 10 18: correct python/matlab dim incompatibility
# update 2012 10 18: correct error fs between 99 and 100 && remove Kbest
# update 2012 10 17: correct error n_features shape and add nice
# update 2012 10 01: correct prediction error+change loading results option
# update 2012 09 14: handle fs float error
# update 2012 09 14: pass n_cores to sys.arg
# version 2012 09 13: implementation of parallelization
###############################################################################
print("LIBRARY")
import sys as sys
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
from sklearn.externals.joblib import Parallel, delayed
import scipy.io as sio
from sklearn.preprocessing import Scaler
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score
###############################################################################
print("INPUT DATA")
#-- get argument to load specific file
filenameX = str(sys.argv[1])
filenamey = str(sys.argv[2])
if len(sys.argv) <= 3:
n_cores = -1
else:
n_cores = int(sys.argv[3])
print("cores: " + str(n_cores))
print(filenameX)
print(filenamey)
#-- load data dimension
mat = sio.loadmat(filenamey)
Xdims = mat["Xdims"].reshape(3)
#-- Load binary data into python
Xm_all = np.fromfile(filenameX, dtype=np.float64)
Xm_all = Xm_all.reshape(Xdims[2], Xdims[1], Xdims[0]).transpose([2, 1, 0])
#-- load classification parameters
mat = sio.loadmat(filenamey)
dims = mat["dims"] # select time windows to compute
dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility
dims_tg = mat["dims_tg"] - 1
mat = sio.loadmat(filenamey, squeeze_me=True)
path = mat["path"]
nameX = mat["nameX"]
namey = mat["namey"]
folding = mat["folding"]
n_splits = mat["n_splits"]
n_folds = mat["n_folds"] # fold number
svm_C = mat["C"] # svm penalization parameter
compute_probas = mat["compute_probas"]
compute_predict = mat["compute_predict"]
compute_distance = mat["compute_distance"]
fs_n = mat["fs"] # feature selection
y_all = mat["y"] # class used for train and test
print(Xm_all.shape)
print(y_all.shape)
y2_all = mat["y2"] # class used for sample weights
#-- build training and generalizing classes
Xm = Xm_all[y_all > 0, :, :] # training categories
Xmg = Xm_all[y_all < 0, :, :] # generalization categories
y = y_all[y_all > 0]
yg = y_all[y_all < 0]
y2 = y2_all[y_all > 0]
n_samples, n_features, unused = Xm.shape
n_samplesg, unused, unused = Xmg.shape
n_featuresg = n_features
n_dims = dims.shape[0]
n_dimsg = n_dims
n_dims_tg = dims_tg.shape[1]
n_dimsg_tg = dims_tg.shape[1]
n_classes = np.unique(y).shape[0]
#deal with sample_weight
sample_weight = np.ones(y.shape[0])
classes = np.unique(y2)
for c in range(classes.shape[0]):
sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c]))
###############################################################################
print("PREPARE CLASSIFICATION")
#-- classifier
clf = GridSearchCV(svm.SVC(kernel='linear', probability=True),
{'C': svm_C}, score_func=precision_score)
#-- normalizer
scaler = Scaler()
#-- feature selection
if fs_n > 1:
fs = SelectKBest(f_classif, k=fs_n)
elif fs_n == -1:
fs = SelectKBest(f_classif, k=1)
else:
fs = SelectPercentile(f_classif, percentile=fs_n * 100)
#-- results initialization
if compute_predict:
predict = np.zeros([n_splits, n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
probas = []
probasg = []
if compute_distance:
distance = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
distanceg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
distance = []
distanceg = []
coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan
###############################################################################
#-- Define parallel cross validation
def my_pipeline(train, test,
Xm_shfl, y_shfl, sw_shfl, Xmg,
dims, fs, scaler, clf,
n_samples, n_dims, n_dims_tg, n_classes):
# indicate opened fold
sys.stdout.write("<")
sys.stdout.flush()
# initialize results within a given fold
if compute_predict:
predict = np.zeros([n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
probas = []
probasg = []
if compute_distance:
distance = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
distanceg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
distance = []
distanceg = []
coef = np.empty([n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
# apply different classification along dimension 0
for d in range(0, dims.shape[0]):
Xtrain = Xm_shfl[train, :, dims[d]]
ytrain = y_shfl[train]
sw_train = sw_shfl[train]
# (deal with NaN samples in training)
ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))]
sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection
fs.fit(Xtrain, ytrain)
Xtrain = fs.transform(Xtrain)
# normalization
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
# Grid search
clf.fit(Xtrain, ytrain, sample_weight=sw_train, cv=5)
# select best classifier
_clf = clf.best_estimator_
# print(clf.best_params_)
# retrieve features selected during univariate selection
if fs_n == -1 or (fs_n > 1):
# uni_features = sorted(range(len(fs.pvalues_)),key=lambda x:fs.pvalues_[x])
uni_features = range(0, _clf.coef_.shape[1])
else:
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
# retrieve hyperplan (unselected features as 0)
coef[d, :, uni_features] = scaler.inverse_transform(_clf.coef_).T
# generalize across all time points
for d_tg in range(0, n_dims_tg):
# select data
Xtest = Xm_shfl[test, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# feature selection from training
Xtest = fs.transform(Xtest)
# normalize from training
Xtest = scaler.transform(Xtest)
# generalize test samples
if (Xtest.shape[0] - np.sum(test_nan)) > 0:
if compute_predict:
predict[test[~test_nan], d, d_tg] = _clf.predict(Xtest)
if compute_probas:
probas[test[~test_nan], d, d_tg, :] = _clf.predict_proba(Xtest)
if compute_distance:
distance[test[~test_nan], d, d_tg, :] = _clf.decision_function(Xtest) # correct!
# predict on generalization sample
# select data
Xtestg = Xmg[:, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtestg, axis=1))
if (Xtestg.shape[0] - np.sum(test_nan)) > 0:
Xtestg = Xtestg[~test_nan, :]
# preproc feature selection and normalization
Xtestg = fs.transform(Xtestg)
Xtestg = scaler.transform(Xtestg)
# compute prediction
if compute_predict:
predictg[~test_nan, d, d_tg] = clf.predict(Xtestg)
if compute_probas:
probasg[~test_nan, d, d_tg, :] = clf.predict_proba(Xtestg)
if compute_distance:
distanceg[~test_nan, d, d_tg, :] = clf.decision_function(Xtestg) # correct!
# summarize fold results
out = {
'coef': coef,
'predict': predict,
'predictg': predictg,
'probas': probas,
'probasg': probasg,
'distance': distance,
'distanceg': distanceg}
# indicate end of fold
sys.stdout.write(">")
sys.stdout.flush()
return out
###############################################################################
print("CLASSIFY")
#-- Shuffle split
for split in range(n_splits):
print("split " + str(split))
#-- shuffle order in case this is not the first split
new_order = np.array(range(y.shape[0]))
if split > 0:
np.random.shuffle(new_order)
y_shfl = y
y_shfl = y_shfl[new_order]
y2_shfl = y2
y2_shfl = y2_shfl[new_order]
Xm_shfl = Xm
Xm_shfl = Xm_shfl[new_order, :, :]
sw_shfl = sample_weight
sw_shfl = sw_shfl[new_order]
else:
y_shfl = y
y2_shfl = y2
Xm_shfl = Xm
sw_shfl = sample_weight
#-- define crossvalidation
if folding == 'stratified':
cv = StratifiedKFold(y2_shfl, k=n_folds)
elif folding == 'kfolding':
cv = KFold(n=y2_shfl.shape[0], k=n_folds)
elif folding == 'leaveoneout':
n_folds = y_shfl.shape[0]
cv = LeaveOneOut(n=y_shfl.shape[0])
else:
print("unknown crossvalidation method!")
# Cross-validation computed in parallel
out = Parallel(n_jobs=n_cores)(delayed(my_pipeline)(
train=train,
test=test,
Xm_shfl=Xm_shfl,
y_shfl=y_shfl,
sw_shfl=sw_shfl,
Xmg=Xmg,
dims=dims,
fs=fs,
scaler=scaler,
clf=clf,
n_samples=n_samples,
n_dims=n_dims,
n_dims_tg=n_dims_tg,
n_classes=n_classes) for fold, (train, test) in enumerate(cv))
# reorder results folds and splits
for fold, (train, test) in enumerate(cv):
all_folds[split, fold, train] = 1
all_folds[split, fold, test] = 0
coef[split, fold, :, :, :] = out[fold]['coef']
if compute_predict:
predict[split, new_order[test], :, :] = out[fold]['predict'][test, :, :]
predictg[split, :, :, :, fold] = out[fold]['predictg']
if compute_probas:
probas[split, new_order[test], :, :, :] = out[fold]['probas'][test, :, :, :]
probasg[split, :, :, :, :, fold] = out[fold]['probasg']
if compute_distance:
distance[split, new_order[test], :, :, :] = out[fold]['distance'][test, :, :, :]
distanceg[split, :, :, :, :, fold] = out[fold]['distanceg']
all_folds[split, :, new_order] = all_folds[split, :, :].T
###############################################################################
print("EXPORT DATA")
mat['predict'] = predict
mat['predictg'] = predictg
mat['probas'] = probas
mat['probasg'] = probasg
mat['distance'] = distance
mat['distanceg'] = distanceg
mat['coef'] = coef
mat['all_folds'] = all_folds
mat['y_all'] = y_all
mat['y'] = y
mat['yg'] = yg
mat['filenameX'] = filenameX
mat['filenamey'] = filenamey
print nameX
print namey
print path
output = str(path) + str(nameX) + '_' + str(namey) + "_results.mat"
print(output)
sio.savemat(output, mat)
| bsd-3-clause |
zmr/namsel | experimental/formatter.py | 1 | 19420 | # encoding: utf-8
import cPickle as pickle
import numpy as np
from sklearn.mixture import GMM
from collections import Counter
from scipy.stats import mode as statsmode
from yik import alphabet
from sklearn.mixture.dpgmm import DPGMM
import json
import zlib
import re
import sys
import webbrowser
import multiprocessing
alphabet = set(alphabet)
def num_stacks(chars):
return len(alphabet.intersection(chars))
def get_gmm_for_stack(vals, thresh=10, num_sizes=2):
''' Assume no more than num_sizes font sizes present... '''
if len(vals) > 30:
# vals = [v for v in vals if vals.count(v) > 2]
# counts = Counter(vals)
# vals = [v for v in counts if counts[v] > 2]
n_components = num_sizes
elif len(vals) == 1:
gmm = GMM()
gmm.means_ = np.array(vals).reshape((1,1))
# gmm.labels_ = np.array([0])
return gmm
else:
n_components = 1
gmm = GMM(n_components=2)
gmm.fit(np.array(vals).reshape(len(vals), 1))
return gmm
# while True:
# gmm = GMM(n_components=n_components)
# try:
# gmm.fit(vals)
# except:
# print vals, n_components
# raise
# # gmm.labels_ = np.argsort(gmm.means_)
# means = list(gmm.means_.copy().flatten())
# means.sort()
# if n_components > 1:
# for i, m in enumerate(means[:-1]):
# if means[i+1] - m < thresh or not gmm.converged_:
#
# n_components -= 1
# break
# else:
# return gmm
# elif n_components == 1:
# return gmm
def big_or_small(gmm, width, stack_mean):
order = np.argsort(gmm.means_.copy().flatten())
# print order
# if len(order) == 1:
# if width - stack_mean > 10:
# return 'b'
# else:
# return 's' #... and what about medium?
#FIXME: add setting for medium?
# label = gmm.predict([width])
label = np.argmax(gmm.predict_proba([width]))
# print
# print width, gmm.means_.flatten(), gmm.predict_proba([width]), label, order[label]
pos = order[label]
# print label, 'LABEL --------<<<'
# for k, ix in enumerate(order):
# # ixlabel = gmm.labels_[ix]
# if ix == label:
# if k == 0:
# return 's'
# elif k == 1:
# if len(order) == 2:
# return 'b'
# else:
# return 'm'
# else:
# return 'b'
if pos == 0:
return 's'
elif pos == 1:
if len(order) == 2:
return 'b'
else:
return 'm'
else:
return 'b'
def granular_smooth(all_lines):
'''this is broken'''
second_pass = []
punc = set(u'་། ')
def reset_chars(s):
r = set(u' )\t')
if s in r:
reset = True
else:
reset = False
return reset
all_lines = zip(range(len(all_lines)), all_lines)
all_lines = map(list, all_lines)
spass_items = []
just_punc = []
for i in all_lines:
if i[1] not in u'་།':
spass_items.append(i)
else:
just_punc.append(i)
prev_size = all_lines[0][1]
## Iterate through all conditions that would be reason to smooth over
## the size of a stack using the previous (and next) size
for i, s in enumerate(spass_items[:-1]):
prev = [k[1] for k in spass_items[max(0,i-2):i]]
nxt = [k[1] for k in spass_items[i+1:min(i+3, len(all_lines))]]
if prev:
if prev[-1] in ' )':
reset_before_change_b = True
elif prev[-1] != s[1]:
reset_before_change_b = False
else:
reset_before_change_b = True
if nxt:
if nxt[0] == ' ':
reset_before_change_f = True
elif nxt[0] != s[1] :
reset_before_change_f = False
else:
reset_before_change_f = True
if s[1] in punc:
s[1] = prev_size
elif not reset_before_change_b and not reset_before_change_f:
s[1] = prev[-1]
second_pass.append(s)
prev_size = s[1]
s = all_lines[-1]
if s[1] in punc:
s[1] = prev_size
elif s[1] != spass_items[-2][1] and spass_items[-2][1] not in u' )':
s[1] = spass_items[-2][1]
second_pass.append(s)
second_pass.extend(just_punc)
second_pass.sort()
final_pass = []
for i in second_pass:
if i[1] in punc:
final_pass.append(prev_size)
else:
final_pass.append(i[1])
prev_size = i[1]
# if all_lines[-2] not in u' )':
# for j in range(3): # look ahead
# fut_char = all_lines[i+j+1]
# prev_char = all_lines[i-j-1]
# # if fut_char in u'་།' or fut_char != ' ':
# # reset = False
# # else:
# # reset = True
# if prev_char in u'་།' or prev_char != ' ':
# reset = False
# else:
# reset = True
#
# if i + 2 < len(all_lines):
# if s in punc:
# s = prev_size
# reset = reset_chars(s)
# # There was a change, then punc, then change back --> smooth over
# elif not reset and prev_size != s and (all_lines[i+1] in punc and all_lines[i+1] != ' ' and all_lines[i+2] != s) :
# s = prev_size
# reset = reset_chars(s)
# # There was a change, then change back --> smooth over
# elif not reset and prev_size != s and all_lines[i+1] != s :
# s = prev_size
# reset = reset_chars(s)
#
# elif i + 1 < len(all_lines):
# if s in punc:
# s = prev_size
# reset = reset_chars(s)
# # There was a change and then abrupt change back --> smooth over
# elif not reset and prev_size != s and all_lines[i+1] != s :
# s = prev_size
# reset = reset_chars(s)
# elif i == len(all_lines) - 1:
# if s != prev_size and not reset:
# s = prev_size
# reset = reset_chars(s)
# if s not in punc:
# prev_size = s
# second_pass.append(s)
return final_pass
def line_smooth(all_lines):
smoothed = []
cnts = Counter()
map(cnts.update, all_lines)
if cnts['b'] > cnts['s']:
sizemode = 'b'
else:
sizemode = 's'
for line in all_lines:
for s in _majority_smooth(line, sizemode):
smoothed.append(s)
return smoothed
def _majority_smooth(cur_seg, sizemode):
scount = cur_seg.count('s')
bcount = cur_seg.count('b')
if scount > bcount:
dom_size = 's'
elif bcount > scount:
dom_size = 'b'
else:
dom_size = sizemode
for c in cur_seg:
yield dom_size
def segment_smooth(all_lines):
cur_seg = []
smoothed_sizes = []
tbcount = all_lines.count('b')
tscount = all_lines.count('s')
if tbcount > tscount:
sizemode = 'b'
else:
sizemode = 's'
for j, s in enumerate(all_lines):
if s in u'། ()〈〉༽༼༔༑':
# print 'val is', s,
if s in u'། )〉༽༔༑':
cur_seg.append(s)
# print 'segment added', s
# print
for sz in _majority_smooth(cur_seg, sizemode):
smoothed_sizes.append(sz)
if s in u'(〈༼':
cur_seg = [s]
# print 'segment added', s
# print
else:
cur_seg = []
else:
cur_seg.append(s)
# print j, len(smoothed_sizes) + len(cur_seg) - 2, 'CUR S', s
for sz in _majority_smooth(cur_seg, sizemode):
smoothed_sizes.append(sz)
assert len(smoothed_sizes) == len(all_lines), 'Length of input does not equal length of output. %d != %d' % (len(smoothed_sizes), len(all_lines))
return smoothed_sizes
def assign_sizes_for_page(content, stack_inx_pointer, stacks_gmms, stack_mean, prev_size='', smooth_method=''):
all_lines = []
if not content:
return None, all_lines
# First pass
for line in content:
# prev_size = 'b'
cur_line = []
for s in line:
if s[-1] in u'་། ()〈〉༽༼༔༑' or num_stacks(s[-1]) > 1 :
# size = prev_size
if smooth_method == 'line_smooth':
cur_line.append(s[-1])
else:
all_lines.append(s[-1])
continue
# print s
try:
# size = big_or_small(stacks_gmms[s[-1]], s[2], stack_mean)
size = big_or_small(stacks_gmms[stack_inx_pointer[s[-1]]], s[2], stack_mean)
except KeyError:
if smooth_method == 'line_smooth':
cur_line.append(s[-1])
else:
all_lines.append(s[-1])
# print 'KEY ERROR', s[-1]
continue
# prev_size = size
# assert size in 'bs', 'fatal error in first assignment pass'
if smooth_method == 'line_smooth':
cur_line.append(size)
else:
all_lines.append(size)
if smooth_method == 'line_smooth':
all_lines.append(cur_line)
cur_line = []
# all_lines.append(size)
# all_lines.append(cur_line)
# print '\n'
# second pass. smooth over abrupt size changes
if smooth_method == 'line_smooth':
second_pass = line_smooth(all_lines)
elif smooth_method == 'segment_smooth':
second_pass = segment_smooth(all_lines)
elif smooth_method == 'granular':
second_pass = granular_smooth(all_lines)
else:
second_pass = []
for i, s in enumerate(all_lines):
if s in u'་། ()〈〉༽༼༔༑':
s = prev_size
second_pass.append(prev_size)
else:
second_pass.append(s)
prev_size = s
if not second_pass:
mode = 's'
print 'WARNING: using default value for mode'
else:
mode = statsmode(second_pass)[0][0]
final_lines = []
prev_inx = 0
for line in content:
cur_inx = len(line) + prev_inx
final_lines.append(second_pass[prev_inx:cur_inx])
prev_inx = cur_inx
return mode, final_lines
def fit_dpgmm(arrs):
# dpgmm = DPGMM(n_components = 45, alpha=10, n_iter=50)
# dpgmm = DPGMM(n_components = 15, alpha=2.25, n_iter=50) ### good for kandze histories
dpgmm = DPGMM(n_components = 20, alpha=.0, n_iter=50)
# dpgmm = DPGMM(n_components = 55, alpha=2.75, n_iter=50)
dpgmm.fit(arrs)
# print 'dpgmm means'
# print dpgmm.means_
# print len(dpgmm.means_)
return dpgmm
# def generate_formatting(page_objects, smooth_method='line_smooth'):
# def generate_formatting(page_objects, smooth_method='segment_smooth'):
def generate_formatting(page_objects, smooth_method='', ignore_first_line=False):
from matplotlib import pyplot as plt
allsizes = []
single_chars = {}
# print page_objects
##### First, gather widths into lists, one global (all widths) and one for
##### each stack
for p in page_objects:
# content = p['other_page_info']
content = p['page_info']
# content = pickle.loads(content.decode('base64'))
# content = json.loads(zlib.decompress(str(content).decode('string-escape')))
p['content'] = content['content']
for i, line in enumerate(p['content']):
if ignore_first_line and i == 0:
continue
for char in line:
try:
if char[2] not in (-1, 0) and num_stacks(char[-1]) == 1 and char[-1] not in u'་།':
# allchars.append(char[-1])
# allsizes.append(char[3]) # using height rather than width
allsizes.append(char[2])
cl = single_chars.get(char[-1], [])
cl.append(char[2])
# cl.append(char[3])
single_chars[char[-1]] = cl
except:
print char, line, content
raise
stack_mean = np.mean(allsizes)
### Modeling distributions of sizes
print 'modeling distribution of widths'
maxsize = np.min([200, np.max(allsizes)])
arrs = []
keys = single_chars.keys()
for c in keys:
vals = single_chars[c]
# plt.hist(vals, max(10, len(vals)/10))
# plt.savefig(u'/home/zr/width-plots/%s-%d.png' % (c, len(vals)))
# plt.clf()
counts = Counter(vals)
arr = np.zeros((maxsize+1), int)
for cn in counts:
if cn < 200:
# if cn < 200 and counts[cn] > 5:
arr[cn] = counts[cn]
arrs.append(arr.astype(float))
# arrs = np.array(arrs).astype(float)
# print arrs.shape
# dpgmm = DPGMM(n_components = 10, alpha=1.0, n_iter=50)
dpgmm = fit_dpgmm(arrs)
ndists = len(dpgmm.means_)
letter_groups = [[] for i in range(ndists)]
for i, c in enumerate(keys):
label = dpgmm.predict(arrs[i].reshape((1,maxsize+1)))[0]
letter_groups[label].append(c)
from matplotlib import pyplot as plt
pooled_gmms = {}
stack_group_inx = {}
for i, gr in enumerate(letter_groups):
print 'group ------'
group_ws = []
for l in gr:
print l,
group_ws.extend(single_chars[l])
stack_group_inx[l] = i
print
if group_ws:
gmm = get_gmm_for_stack(group_ws)
pooled_gmms[i] = gmm
print '\tgroup', i, 'has', len(gmm.means_), 'dists. Converged? ', gmm.converged_
# if u'ཚ' in gr:
# from matplotlib.mlab import normpdf
n,bins,p = plt.hist(group_ws, maxsize+1, normed=True)
# for i in range(2):
# plt.plot(bins, normpdf(bins, gmm.means_[i], gmm.covars_[i]), label='fit', linewidth=1)
# plt.show()
plt.savefig('/media/zr/zr-mechanical/kandze3_hists/%s' % gr[0])
plt.clf()
# import sys; sys.exit()
print
print '-------'
print "converged?", dpgmm.converged_
print 'number of chars in each group:'
for i in letter_groups:
print len(i),
stacks_gmms = pooled_gmms
# import sys; sys.exit()
##### Create a GMM for widths corresponding to each stack
# stacks_gmms = {}
# for c in single_chars:
# # vals = gaussian_filter1d(single_chars[c], 2)
# vals = single_chars[c]
# stacks_gmms[c] = get_gmm_for_stack(vals)
# print c, 'number of sizes' ,len(stacks_gmms[c].means_)
from functools import partial
assign_sizes = partial(assign_sizes_for_page, stack_inx_pointer=stack_group_inx, stacks_gmms=stacks_gmms, stack_mean=stack_mean, prev_size='', smooth_method=smooth_method)
pool = multiprocessing.Pool()
prev_size = ''
modes = Counter()
mapped = pool.map(assign_sizes, [p['content'] for p in page_objects])
for i, p in enumerate(page_objects):
mode, size_info = mapped[i]
# mode, size_info = assign_sizes_for_page(p['content'], stack_group_inx, stacks_gmms, stack_mean, prev_size, smooth_method=smooth_method)
modes.update(mode)
p['size_info'] = size_info
mode = modes.most_common(1)[0][0]
# truncated_page_objects = []
for p in page_objects:
# pn = {}
if not p['size_info']:
continue
size_info = p['size_info']
buffer = 'volume-mode\t' + mode + '\n'
# print size_info
for l in size_info:
if l:
buffer += ''.join(l)
buffer += '\n'
prev_size = l[-1]
# print buffer
# print
p['page_info']['size_info'] = buffer
# pn['id'] = p['id']
# truncated_page_objects.append(pn)
# reduce info being sent back to database by throwing out all info that
# hasn't been changed for each page
# print 'trying to visualize'
# for i, p in page_objects:
# p = page_objects[55]
# show_sample_page(p)
# for i, p in enumerate(page_objects):
# if p['tiffname'] == 'tbocrtifs/ngb_vol05/out_0017.tif':
# print 'showing sample_page', i
# show_sample_page(p)
# break
# else:
# print 'tiffnotfound'
# sys.exit()
return page_objects
def show_sample_page(page_object):
print 'generating a sample image to view'
pginfo = page_object['other_page_info']
print page_object['tiffname']
contents = json.loads(zlib.decompress(pginfo.decode('string-escape')))
size_info = page_object['line_boundaries']
slines = size_info.split('\n')
mode = slines[0].split('\t')[1]
# if mode == 'b':
# special = 's'
# else:
# special = 'b'
prev_size = slines[1][0]
all_lines = []
for i, s in enumerate(contents):
line = contents[i]
sizes = slines[i+1]
cur_line = []
for j, char in enumerate(line):
size = sizes[j]
if j == 0 and size != mode:
cur_line.append('<span class="format_size">')
cur_line.append(char[-1])
elif size != mode and prev_size == mode:
cur_line.append('<span class="format_size">')
cur_line.append(char[-1])
elif size == mode and prev_size != mode:
cur_line.append('</span>')
cur_line.append(char[-1])
elif j == len(line) -1 and size != mode and prev_size != mode:
cur_line.append(char[-1])
cur_line.append('</span>')
elif j == len(line) -1 and size != mode and prev_size == mode:
cur_line.append('<span class="format_size">')
cur_line.append(char[-1])
cur_line.append('</span>')
else:
cur_line.append(char[-1])
prev_size = size
all_lines.append(''.join(cur_line))
template = '''
<html>
<head>
<style>
body {
font-family: "Qomolangma-Uchen Sarchung";
}
.format_size {
color: blue;
}
</style>
</head>
<body>
%s
<hr>
%s
</body>
</html>
''' % ('<br>'.join(all_lines), '\n'.join(size_info))
import codecs
codecs.open('/tmp/format_test.html', 'w', 'utf-8').write(template)
import webbrowser
webbrowser.open_new_tab('file:///tmp/format_test.html')
# return mode, all_lines | mit |
f3r/scikit-learn | examples/ensemble/plot_partial_dependence.py | 4 | 4443 | """
========================
Partial Dependence Plots
========================
Partial dependence plots show the dependence between the target function [2]_
and a set of 'target' features, marginalizing over the
values of all other features (the complement features). Due to the limits
of human perception the size of the target feature set must be small (usually,
one or two) thus the target features are usually chosen among the most
important features
(see :attr:`~sklearn.ensemble.GradientBoostingRegressor.feature_importances_`).
This example shows how to obtain partial dependence plots from a
:class:`~sklearn.ensemble.GradientBoostingRegressor` trained on the California
housing dataset. The example is taken from [1]_.
The plot shows four one-way and one two-way partial dependence plots.
The target variables for the one-way PDP are:
median income (`MedInc`), avg. occupants per household (`AvgOccup`),
median house age (`HouseAge`), and avg. rooms per household (`AveRooms`).
We can clearly see that the median house price shows a linear relationship
with the median income (top left) and that the house price drops when the
avg. occupants per household increases (top middle).
The top right plot shows that the house age in a district does not have
a strong influence on the (median) house price; so does the average rooms
per household.
The tick marks on the x-axis represent the deciles of the feature values
in the training data.
Partial dependence plots with two target features enable us to visualize
interactions among them. The two-way partial dependence plot shows the
dependence of median house price on joint values of house age and avg.
occupants per household. We can clearly see an interaction between the
two features:
For an avg. occupancy greater than two, the house price is nearly independent
of the house age, whereas for values less than two there is a strong dependence
on age.
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning Ed. 2", Springer, 2009.
.. [2] For classification you can think of it as the regression score before
the link function.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.datasets.california_housing import fetch_california_housing
# fetch California housing dataset
cal_housing = fetch_california_housing()
# split 80/20 train-test
X_train, X_test, y_train, y_test = train_test_split(cal_housing.data,
cal_housing.target,
test_size=0.2,
random_state=1)
names = cal_housing.feature_names
print('_' * 80)
print("Training GBRT...")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4,
learning_rate=0.1, loss='huber',
random_state=1)
clf.fit(X_train, y_train)
print("done.")
print('_' * 80)
print('Convenience plot with ``partial_dependence_plots``')
print
features = [0, 5, 1, 2, (5, 1)]
fig, axs = plot_partial_dependence(clf, X_train, features, feature_names=names,
n_jobs=3, grid_resolution=50)
fig.suptitle('Partial dependence of house value on nonlocation features\n'
'for the California housing dataset')
plt.subplots_adjust(top=0.9) # tight_layout causes overlap with suptitle
print('_' * 80)
print('Custom 3d plot via ``partial_dependence``')
print
fig = plt.figure()
target_feature = (1, 5)
pdp, (x_axis, y_axis) = partial_dependence(clf, target_feature,
X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('Partial dependence of house value on median age and '
'average occupancy')
plt.subplots_adjust(top=0.9)
plt.show()
| bsd-3-clause |
rdussurget/py-altiwaves | kernel/io.py | 1 | 10570 | # -*- coding: latin-1 -*-
'''
kernel.io module
@summary : I/O tools for wavelet analysis
@requires: altimetry.tools.nctools
@since: Created on 6 déc. 2012
@author: rdussurg
@copyright: Renaud Dussurget 2012.
@license: GNU Lesser General Public License
This file is part of PyAltiWAVES.
PyAltiWAVES is free software: you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation, either version 3 of the License, or (at your option)
any later version.
PyAltiWAVES is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
for more details.
You should have received a copy of the GNU Lesser General Public License along
with PyAltiWAVES. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import numpy as np
from collections import OrderedDict
from altimetry.tools.nctools import nc
if __debug__ : import matplotlib.pyplot as plt
def save_analysis(filename,
lon,
lat,
time,
cycle,
track,
sla,
sa_spectrum,
sa_lscales,
wvsla,
daughter,
dimensions=None,
attributes=None,
verbose=1,
clobber=True):
#Init netcdf object
obj=nc(verbose=verbose) #Init NetCDF object
#Setup dimensions if required
if dimensions is None :
nx=len(lon)
ncyc=len(cycle)
nt=len(track)
dimensions=OrderedDict({'_ndims':3,'record':0,'cycle':ncyc,'track':nt})
#Setup default attributes
attrStr=OrderedDict()
attrStr['description']='Along-track wavelet analysis'
attrStr['input']=filename
if attributes is not None : #Append attributes if asked
for a in [k for k in attributes.keys() if (k is not 'description') and (k is not 'input')] :
attrStr.update({a:attributes[a]})
#Setup output structure
dataStr=OrderedDict()
dataStr['_dimensions']=dimensions
dataStr['_attributes']=attrStr
dataStr['record']={'data':np.arange(nx),'long_name':'record_number','units':'1','_dimensions':('record',)}
dataStr['lon']={'data':lon,'long_name':'longitude','units':'degrees_north','_dimensions':('record',)}
dataStr['lat']={'data':lat,'long_name':'latitude','units':'degrees_east','_dimensions':('record',)}
dataStr['time']={'data':time,'long_name':'time of measurement','units':'days since 1950-01-01 00:00:00UTC','_dimensions':('cycle',)}
dataStr['cycle']={'data':cycle,'long_name':'cycle number','units':'1','_dimensions':('cycle',)}
dataStr['track']={'data':track,'long_name':'track number','units':'1','_dimensions':('track',)}
dataStr['sla']={'data':sla,'long_name':'sea_level_anomaly','units':'m','_dimensions':('cycle','record')}
dataStr['sa_spectrum']={'data':sa_spectrum,'long_name':'scale_averaged_spectrum','units':'m^^2','_dimensions':('cycle','record')}
dataStr['sa_lscales']={'data':sa_lscales,'long_name':'spatial_lengthscale','units':'km','_dimensions':('cycle','record')}
dataStr['wvsla']={'data':wvsla,'long_name':'filtered_sea_level_anomaly','units':'m','_dimensions':('cycle','record')}
dataStr['daughter']={'data':daughter,'long_name':'most_energetic_wavelet','units':'m','_dimensions':('cycle','record')}
#Write data
res=obj.write(dataStr,filename,clobber=clobber)
return res
def save_detection(filename,
eind,
lon,
lat,
amplitude, diameter, relvort, ugdiameter, ugamplitude, rk_relvort, rk_center, self_advect,
dimensions=None,
attributes=None,
verbose=1,
clobber=True):
#Init netcdf object
obj=nc(verbose=verbose) #Init NetCDF object
#Setup dimensions if required
if dimensions is None :
nx=len(amplitude)
dimensions=OrderedDict({'_ndims':1,'record':0})
#Setup default attributes
attrStr=OrderedDict()
attrStr['description']='Eddy-like features detected from wavelet analysis'
attrStr['input']=filename
if attributes is not None : #Append attributes if asked
for a in [k for k in attributes.keys() if (k is not 'description') and (k is not 'input')] :
attrStr.update({a:attributes[a]})
#Setup output structure
dataStr=OrderedDict()
dataStr['_dimensions']=dimensions
dataStr['_attributes']=attrStr
dataStr['record']={'data':np.arange(nx),'long_name':'record_number','units':'1','_dimensions':('record',)}
dataStr['xind']={'data':eind[0,:],'long_name':'along_track_index','units':'1','_dimensions':('record',)}
dataStr['yind']={'data':eind[1,:],'long_name':'time_index','units':'1','_dimensions':('record',)}
dataStr['lon']={'data':lon[eind[0,:]],'long_name':'longitude','units':'degrees_north','_dimensions':('record',)}
dataStr['lat']={'data':lat[eind[0,:]],'long_name':'latitude','units':'degrees_east','_dimensions':('record',)}
dataStr['amplitude']={'data':amplitude,'long_name':'amplitude','units':'cm','_dimensions':('record',)}
dataStr['diameter']={'data':diameter,'long_name':'diameter','units':'km','_dimensions':('record',)}
dataStr['relvort']={'data':relvort,'long_name':'relative_vorticity','units':'s-1','_dimensions':('record',)}
dataStr['ugdiameter']={'data':ugdiameter,'long_name':'eddy_core_diameter','units':'km','_dimensions':('record',)}
dataStr['ugamplitude']={'data':ugamplitude,'long_name':'eddy_core_amplitude','units':'cm','_dimensions':('record',)}
dataStr['rk_relvort']={'data':rk_relvort,'long_name':'rankine_eddy_vorticity','units':'s-1','_dimensions':('record',)}
dataStr['rkxind']={'data':rk_center,'long_name':'rankine_eddy_index','units':'1','_dimensions':('record',)}
dataStr['rk_lon']={'data':lon[rk_center],'long_name':'rankine_eddy_longitude','units':'degrees_north','_dimensions':('record',)}
dataStr['rk_lat']={'data':lat[rk_center],'long_name':'rankine_eddy_latitude','units':'degrees_north','_dimensions':('record',)}
dataStr['advection']={'data':self_advect,'long_name':'eddy_self_advection','units':'m.s-1','_dimensions':('record',)}
#Write data
res=obj.write(dataStr,filename,clobber=clobber)
return res
def save_binning(filename,
blon, blat, hist, ampmn, lenmn, rvmn, amprms, lenrms, rvrms, \
btime, thist, tampmn, tlenmn, trvmn, tamprms, tlenrms, trvrms, \
# blon, blat, hist, ampmn, lenmn, rvmn, uglmn, wvlenmn, amprms, lenrms, rvrms, uglrms, wvlenrms,
# btime, thist, tampmn, tlenmn, trvmn, tuglmn, twvlmn, tamprms, tlenrms, trvrms, tuglrms, twvlrms,
dimensions=None,
attributes=None,
description='Climatology of eddy-like processes variability',
verbose=1,
clobber=True):
#Init netcdf object
obj=nc(verbose=verbose) #Init NetCDF object
#Setup dimensions if required
if dimensions is None :
nx=len(blat)
nt=len(btime)
sdimensions=OrderedDict({'_ndims':1,'lat':0})
tdimensions=OrderedDict({'_ndims':1,'time':0})
#Setup default attributes
attrStr=OrderedDict()
attrStr['description']=description+' - space'
if attributes is not None : #Append attributes if asked
for a in [k for k in attributes.keys() if (k is not 'description') and (k is not 'input')] :
attrStr.update({a:attributes[a]})
#Setup output structure
sStr=OrderedDict()
sStr['_dimensions']=sdimensions
sStr['_attributes']=attrStr
sStr['lat']={'data':blat,'long_name':'latitude','units':'degrees_east','_dimensions':('lat',)}
sStr['lon']={'data':blon,'long_name':'longitude','units':'degrees_north','_dimensions':('lat',)}
sStr['hist']={'data':hist,'long_name':'spatial_occurence_frequency','units':'%','_dimensions':('lat',)}
sStr['amplitude']={'data':ampmn,'long_name':'amplitude','units':'cm','_dimensions':('lat',)}
sStr['diameter']={'data':lenmn,'long_name':'diameter','units':'km','_dimensions':('lat',)}
sStr['relvort']={'data':rvmn,'long_name':'relative_vorticity','units':'s-1','_dimensions':('lat',)}
sStr['amplitude_rms']={'data':amprms,'long_name':'RMS_of_amplitude','units':'cm','_dimensions':('lat',)}
sStr['diameter_rms']={'data':lenrms,'long_name':'RMS_of_diameter','units':'km','_dimensions':('lat',)}
sStr['relvort_rms']={'data':rvrms,'long_name':'RMS_of_relative_vorticity','units':'s-1','_dimensions':('lat',)}
tStr=OrderedDict()
attrStr['description']=description+' - time'
tStr['_dimensions']=tdimensions
tStr['_attributes']=attrStr
tStr['time']={'data':btime,'long_name':'time','units':'days since 1950-01-01 00:00:00UTC','_dimensions':('time',)}
tStr['hist']={'data':thist,'long_name':'time_occurence_frequency','units':'%','_dimensions':('time',)}
tStr['amplitude']={'data':tampmn,'long_name':'amplitude','units':'cm','_dimensions':('time',)}
tStr['diameter']={'data':tlenmn,'long_name':'diameter','units':'km','_dimensions':('time',)}
tStr['relvort']={'data':trvmn,'long_name':'relative_vorticity','units':'s-1','_dimensions':('time',)}
tStr['amplitude_rms']={'data':tamprms,'long_name':'RMS_of_amplitude','units':'cm','_dimensions':('time',)}
tStr['diameter_rms']={'data':tlenrms,'long_name':'RMS_of_diameter','units':'km','_dimensions':('time',)}
tStr['relvort_rms']={'data':trvrms,'long_name':'RMS_of_relative_vorticity','units':'s-1','_dimensions':('time',)}
#Setup file name
sfname=os.path.dirname(filename)+os.path.sep+'space_clim.'+os.path.splitext(os.path.basename(filename))[0]+os.path.splitext(os.path.basename(filename))[1]
tfname=os.path.dirname(filename)+os.path.sep+'time_clim.'+os.path.splitext(os.path.basename(filename))[0]+os.path.splitext(os.path.basename(filename))[1]
#Write data
res=obj.write(sStr,sfname,clobber=clobber)
return res
| lgpl-3.0 |
crichardson17/starburst_atlas | Low_resolution_sims/Dusty_LowRes/Padova_cont/padova_cont_2/fullgrid/Optical1.py | 30 | 9342 | import csv
import matplotlib.pyplot as plt
from numpy import *
import scipy.interpolate
import math
from pylab import *
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
import matplotlib.patches as patches
from matplotlib.path import Path
import os
# ------------------------------------------------------------------------------------------------------
#inputs
for file in os.listdir('.'):
if file.endswith("1.grd"):
gridfile1 = file
for file in os.listdir('.'):
if file.endswith("2.grd"):
gridfile2 = file
for file in os.listdir('.'):
if file.endswith("3.grd"):
gridfile3 = file
# ------------------------
for file in os.listdir('.'):
if file.endswith("1.txt"):
Elines1 = file
for file in os.listdir('.'):
if file.endswith("2.txt"):
Elines2 = file
for file in os.listdir('.'):
if file.endswith("3.txt"):
Elines3 = file
# ------------------------------------------------------------------------------------------------------
#Patches data
#for the Kewley and Levesque data
verts = [
(1., 7.97712125471966000000), # left, bottom
(1., 9.57712125471966000000), # left, top
(2., 10.57712125471970000000), # right, top
(2., 8.97712125471966000000), # right, bottom
(0., 0.), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
# ------------------------
#for the Kewley 01 data
verts2 = [
(2.4, 9.243038049), # left, bottom
(2.4, 11.0211893), # left, top
(2.6, 11.0211893), # right, top
(2.6, 9.243038049), # right, bottom
(0, 0.), # ignored
]
path = Path(verts, codes)
path2 = Path(verts2, codes)
# -------------------------
#for the Moy et al data
verts3 = [
(1., 6.86712125471966000000), # left, bottom
(1., 10.18712125471970000000), # left, top
(3., 12.18712125471970000000), # right, top
(3., 8.86712125471966000000), # right, bottom
(0., 0.), # ignored
]
path = Path(verts, codes)
path3 = Path(verts3, codes)
# ------------------------------------------------------------------------------------------------------
#the routine to add patches for others peoples' data onto our plots.
def add_patches(ax):
patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0)
patch2 = patches.PathPatch(path2, facecolor='green', lw=0)
patch = patches.PathPatch(path, facecolor='red', lw=0)
ax1.add_patch(patch3)
ax1.add_patch(patch2)
ax1.add_patch(patch)
# ------------------------------------------------------------------------------------------------------
#the subplot routine
def add_sub_plot(sub_num):
numplots = 16
plt.subplot(numplots/4.,4,sub_num)
rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear')
zi = rbf(xi, yi)
contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed')
contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5)
plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*')
plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10)
plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10)
if sub_num == numplots / 2.:
print "half the plots are complete"
#axis limits
yt_min = 8
yt_max = 23
xt_min = 0
xt_max = 12
plt.ylim(yt_min,yt_max)
plt.xlim(xt_min,xt_max)
plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10)
if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]:
plt.tick_params(labelleft = 'off')
else:
plt.tick_params(labelleft = 'on')
plt.ylabel('Log ($ \phi _{\mathrm{H}} $)')
if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]:
plt.tick_params(labelbottom = 'off')
else:
plt.tick_params(labelbottom = 'on')
plt.xlabel('Log($n _{\mathrm{H}} $)')
if sub_num == 1:
plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10)
if sub_num == 13:
plt.yticks(arange(yt_min,yt_max,1),fontsize=10)
plt.xticks(arange(xt_min,xt_max,1), fontsize = 10)
if sub_num == 16 :
plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10)
# ---------------------------------------------------
#this is where the grid information (phi and hdens) is read in and saved to grid.
grid1 = [];
grid2 = [];
grid3 = [];
with open(gridfile1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid1.append(row);
grid1 = asarray(grid1)
with open(gridfile2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid2.append(row);
grid2 = asarray(grid2)
with open(gridfile3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
for row in csvReader:
grid3.append(row);
grid3 = asarray(grid3)
#here is where the data for each line is read in and saved to dataEmissionlines
dataEmissionlines1 = [];
dataEmissionlines2 = [];
dataEmissionlines3 = [];
with open(Elines1, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers = csvReader.next()
for row in csvReader:
dataEmissionlines1.append(row);
dataEmissionlines1 = asarray(dataEmissionlines1)
with open(Elines2, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers2 = csvReader.next()
for row in csvReader:
dataEmissionlines2.append(row);
dataEmissionlines2 = asarray(dataEmissionlines2)
with open(Elines3, 'rb') as f:
csvReader = csv.reader(f,delimiter='\t')
headers3 = csvReader.next()
for row in csvReader:
dataEmissionlines3.append(row);
dataEmissionlines3 = asarray(dataEmissionlines3)
print "import files complete"
# ---------------------------------------------------
#for concatenating grid
#pull the phi and hdens values from each of the runs. exclude header lines
grid1new = zeros((len(grid1[:,0])-1,2))
grid1new[:,0] = grid1[1:,6]
grid1new[:,1] = grid1[1:,7]
grid2new = zeros((len(grid2[:,0])-1,2))
x = array(17.00000)
grid2new[:,0] = repeat(x,len(grid2[:,0])-1)
grid2new[:,1] = grid2[1:,6]
grid3new = zeros((len(grid3[:,0])-1,2))
grid3new[:,0] = grid3[1:,6]
grid3new[:,1] = grid3[1:,7]
grid = concatenate((grid1new,grid2new,grid3new))
hdens_values = grid[:,1]
phi_values = grid[:,0]
# ---------------------------------------------------
#for concatenating Emission lines data
Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:]))
#for lines
headers = headers[1:]
concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0])))
max_values = zeros((len(concatenated_data[0]),4))
# ---------------------------------------------------
#constructing grid by scaling
#select the scaling factor
#for 1215
#incident = Emissionlines[1:,4]
#for 4860
incident = concatenated_data[:,57]
#take the ratio of incident and all the lines and put it all in an array concatenated_data
for i in range(len(Emissionlines)):
for j in range(len(Emissionlines[0])):
if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0:
concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10)
else:
concatenated_data[i,j] == 0
# for 1215
#for i in range(len(Emissionlines)):
# for j in range(len(Emissionlines[0])):
# if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0:
# concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10)
# else:
# concatenated_data[i,j] == 0
# ---------------------------------------------------
#find the maxima to plot onto the contour plots
for j in range(len(concatenated_data[0])):
max_values[j,0] = max(concatenated_data[:,j])
max_values[j,1] = argmax(concatenated_data[:,j], axis = 0)
max_values[j,2] = hdens_values[max_values[j,1]]
max_values[j,3] = phi_values[max_values[j,1]]
#to round off the maxima
max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ]
print "data arranged"
# ---------------------------------------------------
#Creating the grid to interpolate with for contours.
gridarray = zeros((len(concatenated_data),2))
gridarray[:,0] = hdens_values
gridarray[:,1] = phi_values
x = gridarray[:,0]
y = gridarray[:,1]
# ---------------------------------------------------
#change desired lines here!
line = [36, #NE 3 3343A
38, #BA C
39, #3646
40, #3726
41, #3727
42, #3729
43, #3869
44, #3889
45, #3933
46, #4026
47, #4070
48, #4074
49, #4078
50, #4102
51, #4340
52] #4363
#create z array for this plot
z = concatenated_data[:,line[:]]
# ---------------------------------------------------
# Interpolate
print "starting interpolation"
xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10)
xi, yi = meshgrid(xi, yi)
# ---------------------------------------------------
print "interpolatation complete; now plotting"
#plot
plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots
levels = arange(10**-1,10, .2)
levels2 = arange(10**-2,10**2, 1)
plt.suptitle("Dusty Optical Lines", fontsize=14)
# ---------------------------------------------------
for i in range(16):
add_sub_plot(i)
ax1 = plt.subplot(4,4,1)
add_patches(ax1)
print "complete"
plt.savefig('Dusty_optical_lines.pdf')
plt.clf()
print "figure saved"
| gpl-2.0 |
mikebenfield/scikit-learn | sklearn/datasets/tests/test_rcv1.py | 322 | 2414 | """Test the rcv1 loader.
Skipped if rcv1 is not already downloaded to data_home.
"""
import errno
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import fetch_rcv1
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
def test_fetch_rcv1():
try:
data1 = fetch_rcv1(shuffle=False, download_if_missing=False)
except IOError as e:
if e.errno == errno.ENOENT:
raise SkipTest("Download RCV1 dataset to run this test.")
X1, Y1 = data1.data, data1.target
cat_list, s1 = data1.target_names.tolist(), data1.sample_id
# test sparsity
assert_true(sp.issparse(X1))
assert_true(sp.issparse(Y1))
assert_equal(60915113, X1.data.size)
assert_equal(2606875, Y1.data.size)
# test shapes
assert_equal((804414, 47236), X1.shape)
assert_equal((804414, 103), Y1.shape)
assert_equal((804414,), s1.shape)
assert_equal(103, len(cat_list))
# test ordering of categories
first_categories = [u'C11', u'C12', u'C13', u'C14', u'C15', u'C151']
assert_array_equal(first_categories, cat_list[:6])
# test number of sample for some categories
some_categories = ('GMIL', 'E143', 'CCAT')
number_non_zero_in_cat = (5, 1206, 381327)
for num, cat in zip(number_non_zero_in_cat, some_categories):
j = cat_list.index(cat)
assert_equal(num, Y1[:, j].data.size)
# test shuffling and subset
data2 = fetch_rcv1(shuffle=True, subset='train', random_state=77,
download_if_missing=False)
X2, Y2 = data2.data, data2.target
s2 = data2.sample_id
# The first 23149 samples are the training samples
assert_array_equal(np.sort(s1[:23149]), np.sort(s2))
# test some precise values
some_sample_ids = (2286, 3274, 14042)
for sample_id in some_sample_ids:
idx1 = s1.tolist().index(sample_id)
idx2 = s2.tolist().index(sample_id)
feature_values_1 = X1[idx1, :].toarray()
feature_values_2 = X2[idx2, :].toarray()
assert_almost_equal(feature_values_1, feature_values_2)
target_values_1 = Y1[idx1, :].toarray()
target_values_2 = Y2[idx2, :].toarray()
assert_almost_equal(target_values_1, target_values_2)
| bsd-3-clause |
pythonvietnam/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
fatcloud/PyCV-time | opencv-official-samples/2.4.9/common.py | 23 | 6299 | #!/usr/bin/env python
'''
This module contais some common routines used by other samples.
'''
import numpy as np
import cv2
import os
from contextlib import contextmanager
import itertools as it
image_extensions = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.pbm', '.pgm', '.ppm']
class Bunch(object):
def __init__(self, **kw):
self.__dict__.update(kw)
def __str__(self):
return str(self.__dict__)
def splitfn(fn):
path, fn = os.path.split(fn)
name, ext = os.path.splitext(fn)
return path, name, ext
def anorm2(a):
return (a*a).sum(-1)
def anorm(a):
return np.sqrt( anorm2(a) )
def homotrans(H, x, y):
xs = H[0, 0]*x + H[0, 1]*y + H[0, 2]
ys = H[1, 0]*x + H[1, 1]*y + H[1, 2]
s = H[2, 0]*x + H[2, 1]*y + H[2, 2]
return xs/s, ys/s
def to_rect(a):
a = np.ravel(a)
if len(a) == 2:
a = (0, 0, a[0], a[1])
return np.array(a, np.float64).reshape(2, 2)
def rect2rect_mtx(src, dst):
src, dst = to_rect(src), to_rect(dst)
cx, cy = (dst[1] - dst[0]) / (src[1] - src[0])
tx, ty = dst[0] - src[0] * (cx, cy)
M = np.float64([[ cx, 0, tx],
[ 0, cy, ty],
[ 0, 0, 1]])
return M
def lookat(eye, target, up = (0, 0, 1)):
fwd = np.asarray(target, np.float64) - eye
fwd /= anorm(fwd)
right = np.cross(fwd, up)
right /= anorm(right)
down = np.cross(fwd, right)
R = np.float64([right, down, fwd])
tvec = -np.dot(R, eye)
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
axis = np.cross(vt[0], vt[1])
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
self.prev_pt = pt
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
else:
self.prev_pt = None
# palette data from matplotlib/_cm.py
_jet_data = {'red': ((0., 0, 0), (0.35, 0, 0), (0.66, 1, 1), (0.89,1, 1),
(1, 0.5, 0.5)),
'green': ((0., 0, 0), (0.125,0, 0), (0.375,1, 1), (0.64,1, 1),
(0.91,0,0), (1, 0, 0)),
'blue': ((0., 0.5, 0.5), (0.11, 1, 1), (0.34, 1, 1), (0.65,0, 0),
(1, 0, 0))}
cmap_data = { 'jet' : _jet_data }
def make_cmap(name, n=256):
data = cmap_data[name]
xs = np.linspace(0.0, 1.0, n)
channels = []
eps = 1e-6
for ch_name in ['blue', 'green', 'red']:
ch_data = data[ch_name]
xp, yp = [], []
for x, y1, y2 in ch_data:
xp += [x, x+eps]
yp += [y1, y2]
ch = np.interp(xs, xp, yp)
channels.append(ch)
return np.uint8(np.array(channels).T*255)
def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
@contextmanager
def Timer(msg):
print msg, '...',
start = clock()
try:
yield
finally:
print "%.2f ms" % ((clock()-start)*1000)
class StatValue:
def __init__(self, smooth_coef = 0.5):
self.value = None
self.smooth_coef = smooth_coef
def update(self, v):
if self.value is None:
self.value = v
else:
c = self.smooth_coef
self.value = c * self.value + (1.0-c) * v
class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
self.drag_rect = None
if x1-x0 > 0 and y1-y0 > 0:
self.drag_rect = (x0, y0, x1, y1)
else:
rect = self.drag_rect
self.drag_start = None
self.drag_rect = None
if rect:
self.callback(rect)
def draw(self, vis):
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
return self.drag_rect is not None
def grouper(n, iterable, fillvalue=None):
'''grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx'''
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def mosaic(w, imgs):
'''Make a grid from images.
w -- number of grid columns
imgs -- images (must have same size and format)
'''
imgs = iter(imgs)
img0 = imgs.next()
pad = np.zeros_like(img0)
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
def mdot(*args):
return reduce(np.dot, args)
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
| mit |
clk8908/pymc3 | pymc3/examples/ARM5_4.py | 14 | 1026 | '''
Created on May 18, 2012
@author: jsalvatier
'''
import numpy as np
from pymc3 import *
import theano.tensor as t
import pandas as pd
wells = get_data_file('pymc3.examples', 'data/wells.dat')
data = pd.read_csv(wells, delimiter=u' ', index_col=u'id',
dtype={u'switch': np.int8})
data.dist /= 100
data.educ /= 4
col = data.columns
P = data[col[1:]]
P = P - P.mean()
P['1'] = 1
Pa = np.array(P)
with Model() as model:
effects = Normal(
'effects', mu=0, tau=100. ** -2, shape=len(P.columns))
p = sigmoid(dot(Pa, effects))
s = Bernoulli('s', p, observed=np.array(data.switch))
def run(n=3000):
if n == "short":
n = 50
with model:
# move the chain to the MAP which should be a good starting point
start = find_MAP()
H = model.fastd2logp() # find a good orientation using the hessian at the MAP
h = H(start)
step = HamiltonianMC(model.vars, h)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
| apache-2.0 |
marcocaccin/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
shenzebang/scikit-learn | sklearn/linear_model/ransac.py | 191 | 14261 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <RansacRegression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = residual_metric(diff)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
HACKMESEU/DicomBrowser | dwilib/dwi/autoroi.py | 2 | 3796 | """Automatic ROI search."""
import numpy as np
from .types import AlgParams
ADCM_MIN = 0.00050680935535585281
ADCM_MAX = 0.0017784125828491648
def get_score_param(img, param):
"""Return parameter score of given ROI."""
if param.startswith('ADC'):
# return 1 - np.mean(img)
# return 1 / (np.mean(img) - 0.0008)
if np.mean(img) > 0:
return 1 / np.mean(img)
return 0
# NOTE The following min/max limit seems to make things worse.
# if (img < ADCM_MIN).any() or (img > ADCM_MAX).any():
# return 0
elif param.startswith('K'):
return np.mean(img) / 1000
elif param.startswith('score'):
return np.mean(img)
elif param == 'prostate_mask':
# Ban areas more than a certain amount outside of prostate.
if img.sum() / img.size > 0.20:
return 1
return -1e20
elif param == 'prostate_mask_strict':
# Ban areas even partly outside of prostate.
if img.all():
return 1
return -1e20
return 0 # Unknown parameter
def get_roi_scores(img, d, params):
"""Return array of all scores for each possible ROI of given dimension."""
shape = [img.shape[i]-d[i]+1 for i in range(3)] + [len(params)]
scores = np.empty(shape, dtype=np.float32)
for z, y, x, i in np.ndindex(scores.shape):
roi = img[z:z+d[0], y:y+d[1], x:x+d[2], i]
scores[z, y, x, i] = get_score_param(roi, params[i])
return scores
def scale_scores(scores):
"""Scale scores in-place."""
scores[...] /= scores[...].max()
# import sklearn.preprocessing
# shape = scores.shape
# a = scores.ravel()
# sklearn.preprocessing.scale(a, copy=False)
# a.shape = shape
# scores[...] = a
def get_scoremap(img, d, params, nrois):
"""Return array like original image, with scores of nrois best ROI's."""
scores = get_roi_scores(img, d, params)
for i in range(len(params)):
scale_scores(scores[..., i])
scores = np.sum(scores, axis=-1) # Sum scores parameter-wise.
indices = scores.ravel().argsort() # Sort ROI's by score.
indices = indices[-nrois:] # Select best ones.
indices = [np.unravel_index(i, scores.shape) for i in indices]
scoremap = np.zeros(img.shape[0:3] + (1,), dtype=np.float32)
for z, y, x in indices:
scoremap[z:z+d[0], y:y+d[1], x:x+d[2], 0] += scores[z, y, x]
return scoremap
def add_mask(img, mask):
"""Add mask to image as an extra parameter."""
m = mask.array.view()
m.shape += (1,)
return np.concatenate((img, m), axis=3)
def find_roi(img, roidim, params, prostate_mask=None, ap=None):
if ap is None:
ap = AlgParams(depthmin=2, depthmax=3, sidemin=10, sidemax=10,
nrois=500)
assert ap.depthmin <= ap.depthmax
assert ap.sidemin <= ap.sidemax
# Draw score map.
dims = [(j, i, i) for i in range(ap.sidemin, ap.sidemax+1)
for j in range(ap.depthmin, ap.depthmax+1)]
if prostate_mask:
img = add_mask(img, prostate_mask)
params = params + ['prostate_mask']
scoremaps = [get_scoremap(img, d, params, ap.nrois) for d in dims]
scoremap = sum(scoremaps)
# Find optimal ROI.
scoremap_params = ['score']
if prostate_mask:
scoremap = add_mask(scoremap, prostate_mask)
scoremap_params += ['prostate_mask_strict']
roimap = get_scoremap(scoremap, roidim, scoremap_params, 1)
# Get first nonzero position at each axis.
corner = [axis[0] for axis in roimap[..., 0].nonzero()]
# Convert to [(start, stop), ...] notation.
coords = [(x, x+d) for x, d in zip(corner, roidim)]
return dict(algparams=ap, scoremap=scoremap[..., 0], roi_corner=corner,
roi_coords=coords)
| gpl-3.0 |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tests/test_graphics.py | 9 | 152089 | #!/usr/bin/env python
# coding: utf-8
import nose
import itertools
import os
import string
import warnings
from distutils.version import LooseVersion
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.compat import (range, lrange, StringIO, lmap, lzip, u, zip,
iteritems, OrderedDict, PY3)
from pandas.util.decorators import cache_readonly
import pandas.core.common as com
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import rand, randn
from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
"""
These tests are for ``Dataframe.plot`` and ``Series.plot``.
Other plot methods such as ``.hist``, ``.boxplot`` and other miscellaneous
are tested in test_graphics_others.py
"""
def _skip_if_no_scipy_gaussian_kde():
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
raise nose.SkipTest("scipy version doesn't support gaussian_kde")
def _ok_for_gaussian_kde(kind):
if kind in ['kde','density']:
try:
import scipy
from scipy.stats import gaussian_kde
except ImportError:
return False
return True
@tm.mplskip
class TestPlotBase(tm.TestCase):
def setUp(self):
import matplotlib as mpl
mpl.rcdefaults()
n = 100
with tm.RNGContext(42):
gender = tm.choice(['Male', 'Female'], size=n)
classroom = tm.choice(['A', 'B', 'C'], size=n)
self.hist_df = DataFrame({'gender': gender,
'classroom': classroom,
'height': random.normal(66, 4, size=n),
'weight': random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
self.mpl_le_1_2_1 = plotting._mpl_le_1_2_1()
self.mpl_ge_1_3_1 = plotting._mpl_ge_1_3_1()
self.mpl_ge_1_4_0 = plotting._mpl_ge_1_4_0()
self.mpl_ge_1_5_0 = plotting._mpl_ge_1_5_0()
if self.mpl_ge_1_4_0:
self.bp_n_objects = 7
else:
self.bp_n_objects = 8
if self.mpl_ge_1_5_0:
# 1.5 added PolyCollections to legend handler
# so we have twice as many items.
self.polycollection_factor = 2
else:
self.polycollection_factor = 1
def tearDown(self):
tm.close()
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@cache_readonly
def colorconverter(self):
import matplotlib.colors as colors
return colors.colorConverter
def _check_legend_labels(self, axes, labels=None, visible=True):
"""
Check each axes has expected legend labels
Parameters
----------
axes : matplotlib Axes object, or its list-like
labels : list-like
expected legend labels
visible : bool
expected legend visibility. labels are checked only when visible is True
"""
if visible and (labels is None):
raise ValueError('labels must be specified when visible is True')
axes = self._flatten_visible(axes)
for ax in axes:
if visible:
self.assertTrue(ax.get_legend() is not None)
self._check_text_labels(ax.get_legend().get_texts(), labels)
else:
self.assertTrue(ax.get_legend() is None)
def _check_data(self, xp, rs):
"""
Check each axes has identical lines
Parameters
----------
xp : matplotlib Axes object
rs : matplotlib Axes object
"""
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_allclose(xpdata, rsdata)
self.assertEqual(len(xp_lines), len(rs_lines))
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
def _check_visible(self, collections, visible=True):
"""
Check each artist is visible or not
Parameters
----------
collections : matplotlib Artist or its list-like
target Artist or its list or collection
visible : bool
expected visibility
"""
from matplotlib.collections import Collection
if not isinstance(collections, Collection) and not com.is_list_like(collections):
collections = [collections]
for patch in collections:
self.assertEqual(patch.get_visible(), visible)
def _get_colors_mapped(self, series, colors):
unique = series.unique()
# unique and colors length can be differed
# depending on slice value
mapped = dict(zip(unique, colors))
return [mapped[v] for v in series.values]
def _check_colors(self, collections, linecolors=None, facecolors=None,
mapping=None):
"""
Check each artist has expected line colors and face colors
Parameters
----------
collections : list-like
list or collection of target artist
linecolors : list-like which has the same length as collections
list of expected line colors
facecolors : list-like which has the same length as collections
list of expected face colors
mapping : Series
Series used for color grouping key
used for andrew_curves, parallel_coordinates, radviz test
"""
from matplotlib.lines import Line2D
from matplotlib.collections import Collection, PolyCollection
conv = self.colorconverter
if linecolors is not None:
if mapping is not None:
linecolors = self._get_colors_mapped(mapping, linecolors)
linecolors = linecolors[:len(collections)]
self.assertEqual(len(collections), len(linecolors))
for patch, color in zip(collections, linecolors):
if isinstance(patch, Line2D):
result = patch.get_color()
# Line2D may contains string color expression
result = conv.to_rgba(result)
elif isinstance(patch, PolyCollection):
result = tuple(patch.get_edgecolor()[0])
else:
result = patch.get_edgecolor()
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
if facecolors is not None:
if mapping is not None:
facecolors = self._get_colors_mapped(mapping, facecolors)
facecolors = facecolors[:len(collections)]
self.assertEqual(len(collections), len(facecolors))
for patch, color in zip(collections, facecolors):
if isinstance(patch, Collection):
# returned as list of np.array
result = patch.get_facecolor()[0]
else:
result = patch.get_facecolor()
if isinstance(result, np.ndarray):
result = tuple(result)
expected = conv.to_rgba(color)
self.assertEqual(result, expected)
def _check_text_labels(self, texts, expected):
"""
Check each text has expected labels
Parameters
----------
texts : matplotlib Text object, or its list-like
target text, or its list
expected : str or list-like which has the same length as texts
expected text label, or its list
"""
if not com.is_list_like(texts):
self.assertEqual(texts.get_text(), expected)
else:
labels = [t.get_text() for t in texts]
self.assertEqual(len(labels), len(expected))
for l, e in zip(labels, expected):
self.assertEqual(l, e)
def _check_ticks_props(self, axes, xlabelsize=None, xrot=None,
ylabelsize=None, yrot=None):
"""
Check each axes has expected tick properties
Parameters
----------
axes : matplotlib Axes object, or its list-like
xlabelsize : number
expected xticks font size
xrot : number
expected xticks rotation
ylabelsize : number
expected yticks font size
yrot : number
expected yticks rotation
"""
from matplotlib.ticker import NullFormatter
axes = self._flatten_visible(axes)
for ax in axes:
if xlabelsize or xrot:
if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
# If minor ticks has NullFormatter, rot / fontsize are not retained
labels = ax.get_xticklabels()
else:
labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
for label in labels:
if xlabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), xlabelsize)
if xrot is not None:
self.assertAlmostEqual(label.get_rotation(), xrot)
if ylabelsize or yrot:
if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
labels = ax.get_yticklabels()
else:
labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
for label in labels:
if ylabelsize is not None:
self.assertAlmostEqual(label.get_fontsize(), ylabelsize)
if yrot is not None:
self.assertAlmostEqual(label.get_rotation(), yrot)
def _check_ax_scales(self, axes, xaxis='linear', yaxis='linear'):
"""
Check each axes has expected scales
Parameters
----------
axes : matplotlib Axes object, or its list-like
xaxis : {'linear', 'log'}
expected xaxis scale
yaxis : {'linear', 'log'}
expected yaxis scale
"""
axes = self._flatten_visible(axes)
for ax in axes:
self.assertEqual(ax.xaxis.get_scale(), xaxis)
self.assertEqual(ax.yaxis.get_scale(), yaxis)
def _check_axes_shape(self, axes, axes_num=None, layout=None, figsize=(8.0, 6.0)):
"""
Check expected number of axes is drawn in expected layout
Parameters
----------
axes : matplotlib Axes object, or its list-like
axes_num : number
expected number of axes. Unnecessary axes should be set to invisible.
layout : tuple
expected layout, (expected number of rows , columns)
figsize : tuple
expected figsize. default is matplotlib default
"""
visible_axes = self._flatten_visible(axes)
if axes_num is not None:
self.assertEqual(len(visible_axes), axes_num)
for ax in visible_axes:
# check something drawn on visible axes
self.assertTrue(len(ax.get_children()) > 0)
if layout is not None:
result = self._get_axes_layout(plotting._flatten(axes))
self.assertEqual(result, layout)
self.assert_numpy_array_equal(np.round(visible_axes[0].figure.get_size_inches()),
np.array(figsize))
def _get_axes_layout(self, axes):
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
def _flatten_visible(self, axes):
"""
Flatten axes, and filter only visible
Parameters
----------
axes : matplotlib Axes object, or its list-like
"""
axes = plotting._flatten(axes)
axes = [ax for ax in axes if ax.get_visible()]
return axes
def _check_has_errorbars(self, axes, xerr=0, yerr=0):
"""
Check axes has expected number of errorbars
Parameters
----------
axes : matplotlib Axes object, or its list-like
xerr : number
expected number of x errorbar
yerr : number
expected number of y errorbar
"""
axes = self._flatten_visible(axes)
for ax in axes:
containers = ax.containers
xerr_count = 0
yerr_count = 0
for c in containers:
has_xerr = getattr(c, 'has_xerr', False)
has_yerr = getattr(c, 'has_yerr', False)
if has_xerr:
xerr_count += 1
if has_yerr:
yerr_count += 1
self.assertEqual(xerr, xerr_count)
self.assertEqual(yerr, yerr_count)
def _check_box_return_type(self, returned, return_type, expected_keys=None,
check_ax_title=True):
"""
Check box returned type is correct
Parameters
----------
returned : object to be tested, returned from boxplot
return_type : str
return_type passed to boxplot
expected_keys : list-like, optional
group labels in subplot case. If not passed,
the function checks assuming boxplot uses single ax
check_ax_title : bool
Whether to check the ax.title is the same as expected_key
Intended to be checked by calling from ``boxplot``.
Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
"""
from matplotlib.axes import Axes
types = {'dict': dict, 'axes': Axes, 'both': tuple}
if expected_keys is None:
# should be fixed when the returning default is changed
if return_type is None:
return_type = 'dict'
self.assertTrue(isinstance(returned, types[return_type]))
if return_type == 'both':
self.assertIsInstance(returned.ax, Axes)
self.assertIsInstance(returned.lines, dict)
else:
# should be fixed when the returning default is changed
if return_type is None:
for r in self._flatten_visible(returned):
self.assertIsInstance(r, Axes)
return
self.assertTrue(isinstance(returned, OrderedDict))
self.assertEqual(sorted(returned.keys()), sorted(expected_keys))
for key, value in iteritems(returned):
self.assertTrue(isinstance(value, types[return_type]))
# check returned dict has correct mapping
if return_type == 'axes':
if check_ax_title:
self.assertEqual(value.get_title(), key)
elif return_type == 'both':
if check_ax_title:
self.assertEqual(value.ax.get_title(), key)
self.assertIsInstance(value.ax, Axes)
self.assertIsInstance(value.lines, dict)
elif return_type == 'dict':
line = value['medians'][0]
if check_ax_title:
self.assertEqual(line.get_axes().get_title(), key)
else:
raise AssertionError
def _check_grid_settings(self, obj, kinds, kws={}):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
import matplotlib as mpl
def is_grid_on():
xoff = all(not g.gridOn for g in self.plt.gca().xaxis.get_major_ticks())
yoff = all(not g.gridOn for g in self.plt.gca().yaxis.get_major_ticks())
return not(xoff and yoff)
spndx=1
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=False)
obj.plot(kind=kind, **kws)
self.assertFalse(is_grid_on())
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=True)
obj.plot(kind=kind, grid=False, **kws)
self.assertFalse(is_grid_on())
if kind != 'pie':
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=True)
obj.plot(kind=kind, **kws)
self.assertTrue(is_grid_on())
self.plt.subplot(1,4*len(kinds),spndx); spndx+=1
mpl.rc('axes',grid=False)
obj.plot(kind=kind, grid=True, **kws)
self.assertTrue(is_grid_on())
def _maybe_unpack_cycler(self, rcParams, field='color'):
"""
Compat layer for MPL 1.5 change to color cycle
Before: plt.rcParams['axes.color_cycle'] -> ['b', 'g', 'r'...]
After : plt.rcParams['axes.prop_cycle'] -> cycler(...)
"""
if self.mpl_ge_1_5_0:
cyl = rcParams['axes.prop_cycle']
colors = [v[field] for v in cyl]
else:
colors = rcParams['axes.color_cycle']
return colors
@tm.mplskip
class TestSeriesPlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
axes = _check_plot_works(self.ts.plot, rot=0)
self._check_ticks_props(axes, xrot=0)
ax = _check_plot_works(self.ts.plot, style='.', logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = _check_plot_works(self.ts.plot, style='.', loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
_check_plot_works(self.ts[:10].plot.bar)
_check_plot_works(self.ts.plot.area, stacked=False)
_check_plot_works(self.iseries.plot)
for kind in ['line', 'bar', 'barh', 'kde', 'hist', 'box']:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(self.series[:5].plot, kind=kind)
_check_plot_works(self.series[:10].plot.barh)
ax = _check_plot_works(Series(randn(10)).plot.bar, color='black')
self._check_colors([ax.patches[0]], facecolors=['black'])
# GH 6951
ax = _check_plot_works(self.ts.plot, subplots=True)
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(-1, 1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
ax = _check_plot_works(self.ts.plot, subplots=True, layout=(1, -1))
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
@slow
def test_plot_figsize_and_title(self):
# figsize and title
ax = self.series.plot(title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16, 8))
def test_dont_modify_rcParams(self):
# GH 8242
if self.mpl_ge_1_5_0:
key = 'axes.prop_cycle'
else:
key = 'axes.color_cycle'
colors = self.plt.rcParams[key]
Series([1, 2, 3]).plot()
self.assertEqual(colors, self.plt.rcParams[key])
def test_ts_line_lim(self):
ax = self.ts.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
tm.close()
ax = self.ts.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data(orig=False)[0][0])
self.assertEqual(xmax, lines[0].get_data(orig=False)[0][-1])
def test_ts_area_lim(self):
ax = self.ts.plot.area(stacked=False)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
# GH 7471
ax = self.ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
tz_ts = self.ts.copy()
tz_ts.index = tz_ts.tz_localize('GMT').tz_convert('CET')
ax = tz_ts.plot.area(stacked=False, x_compat=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
tm.close()
ax = tz_ts.plot.area(stacked=False, secondary_y=True)
xmin, xmax = ax.get_xlim()
line = ax.get_lines()[0].get_data(orig=False)[0]
self.assertEqual(xmin, line[0])
self.assertEqual(xmax, line[-1])
def test_label(self):
s = Series([1, 2])
ax = s.plot(label='LABEL', legend=True)
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['None'])
self.plt.close()
# get name from index
s.name = 'NAME'
ax = s.plot(legend=True)
self._check_legend_labels(ax, labels=['NAME'])
self.plt.close()
# override the default
ax = s.plot(legend=True, label='LABEL')
self._check_legend_labels(ax, labels=['LABEL'])
self.plt.close()
# Add lebel info, but don't draw
ax = s.plot(legend=False, label='LABEL')
self.assertEqual(ax.get_legend(), None) # Hasn't been drawn
ax.legend() # draw it
self._check_legend_labels(ax, labels=['LABEL'])
def test_line_area_nan_series(self):
values = [1, 2, np.nan, 3]
s = Series(values)
ts = Series(values, index=tm.makeDateIndex(k=4))
for d in [s, ts]:
ax = _check_plot_works(d.plot)
masked = ax.lines[0].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(masked.mask, np.array([False, False, True, False]))
expected = np.array([1, 2, 0, 3])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected)
def test_line_use_index_false(self):
s = Series([1, 2, 3], index=['a', 'b', 'c'])
s.index.name = 'The Index'
ax = s.plot(use_index=False)
label = ax.get_xlabel()
self.assertEqual(label, '')
ax2 = s.plot.bar(use_index=False)
label2 = ax2.get_xlabel()
self.assertEqual(label2, '')
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot.bar(log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([200, 500]).plot.barh(log=True)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
tm.close()
# GH 9905
expected = np.array([1.0e-03, 1.0e-02, 1.0e-01, 1.0e+00])
if not self.mpl_le_1_2_1:
expected = np.hstack((1.0e-04, expected, 1.0e+01))
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='bar')
tm.assert_numpy_array_equal(ax.get_ylim(), (0.001, 0.10000000000000001))
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
tm.close()
ax = Series([0.1, 0.01, 0.001]).plot(log=True, kind='barh')
tm.assert_numpy_array_equal(ax.get_xlim(), (0.001, 0.10000000000000001))
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), expected)
@slow
def test_bar_ignore_index(self):
df = Series([1, 2, 3, 4], index=['a', 'b', 'c', 'd'])
ax = df.plot.bar(use_index=False)
self._check_text_labels(ax.get_xticklabels(), ['0', '1', '2', '3'])
def test_rotation(self):
df = DataFrame(randn(5, 5))
# Default rot 0
axes = df.plot()
self._check_ticks_props(axes, xrot=0)
axes = df.plot(rot=30)
self._check_ticks_props(axes, xrot=30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_pie_series(self):
# if sum of values is less than 1.0, pie handle them as rate and draw semicircle.
series = Series(np.random.randint(1, 5),
index=['a', 'b', 'c', 'd', 'e'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, series.index)
self.assertEqual(ax.get_ylabel(), 'YLABEL')
# without wedge labels
ax = _check_plot_works(series.plot.pie, labels=None)
self._check_text_labels(ax.texts, [''] * 5)
# with less colors than elements
color_args = ['r', 'g', 'b']
ax = _check_plot_works(series.plot.pie, colors=color_args)
color_expected = ['r', 'g', 'b', 'r', 'g']
self._check_colors(ax.patches, facecolors=color_expected)
# with labels and colors
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
ax = _check_plot_works(series.plot.pie, labels=labels, colors=color_args)
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
# with autopct and fontsize
ax = _check_plot_works(series.plot.pie, colors=color_args,
autopct='%.2f', fontsize=7)
pcts = ['{0:.2f}'.format(s * 100) for s in series.values / float(series.sum())]
iters = [iter(series.index), iter(pcts)]
expected_texts = list(next(it) for it in itertools.cycle(iters))
self._check_text_labels(ax.texts, expected_texts)
for t in ax.texts:
self.assertEqual(t.get_fontsize(), 7)
# includes negative value
with tm.assertRaises(ValueError):
series = Series([1, 2, 0, 4, -1], index=['a', 'b', 'c', 'd', 'e'])
series.plot.pie()
# includes nan
series = Series([1, 2, np.nan, 4],
index=['a', 'b', 'c', 'd'], name='YLABEL')
ax = _check_plot_works(series.plot.pie)
self._check_text_labels(ax.texts, ['a', 'b', '', 'd'])
def test_pie_nan(self):
s = Series([1, np.nan, 1, 1])
ax = s.plot.pie(legend=True)
expected = ['0', '', '2', '3']
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
@slow
def test_hist_df_kwargs(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 10)
@slow
def test_hist_df_with_nonnumerics(self):
# GH 9853
with tm.RNGContext(1):
df = DataFrame(np.random.randn(10, 4), columns=['A', 'B', 'C', 'D'])
df['E'] = ['x', 'y'] * 5
ax = df.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 20)
ax = df.plot.hist() # bins=10
self.assertEqual(len(ax.patches), 40)
@slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, filterwarnings='ignore', by=self.ts.index.month)
_check_plot_works(self.ts.hist, filterwarnings='ignore', by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
self.assertEqual(len(ax.patches), 2)
@slow
def test_hist_layout(self):
df = self.hist_df
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
df = self.hist_df
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.gender, layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.gender, layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
axes = _check_plot_works(df.height.hist, filterwarnings='ignore',
by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_hist_secondary_legend(self):
# GH 9610
df = DataFrame(np.random.randn(30, 4), columns=list('abcd'))
# primary -> secondary
ax = df['a'].plot.hist(legend=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
df['b'].plot.hist(ax=ax, legend=True, secondary_y=True)
# both legends are draw on left ax
# left axis must be invisible, right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b (right)'])
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> primary
ax = df['a'].plot.hist(legend=True, secondary_y=True)
# right axes is returned
df['b'].plot.hist(ax=ax, legend=True)
# both legends are draw on left ax
# left and right axis must be visible
self._check_legend_labels(ax.left_ax, labels=['a (right)', 'b'])
self.assertTrue(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_df_series_secondary_legend(self):
# GH 9779
df = DataFrame(np.random.randn(30, 3), columns=list('abc'))
s = Series(np.random.randn(30), name='x')
# primary -> secondary (without passing ax)
ax = df.plot()
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# primary -> secondary (with passing ax)
ax = df.plot()
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left and right axis must be visible
self._check_legend_labels(ax, labels=['a', 'b', 'c', 'x (right)'])
self.assertTrue(ax.get_yaxis().get_visible())
self.assertTrue(ax.right_ax.get_yaxis().get_visible())
tm.close()
# seconcary -> secondary (without passing ax)
ax = df.plot(secondary_y=True)
s.plot(legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, labels=expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a (right)', 'b (right)', 'c (right)', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
# secondary -> secondary (with passing ax)
ax = df.plot(secondary_y=True, mark_right=False)
s.plot(ax=ax, legend=True, secondary_y=True)
# both legends are dran on left ax
# left axis must be invisible and right axis must be visible
expected = ['a', 'b', 'c', 'x (right)']
self._check_legend_labels(ax.left_ax, expected)
self.assertFalse(ax.left_ax.get_yaxis().get_visible())
self.assertTrue(ax.get_yaxis().get_visible())
tm.close()
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_kde(self):
ax = self.ts.plot.hist(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
# ticks are values, thus ticklabels are blank
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
_check_plot_works(self.ts.plot.kde)
_check_plot_works(self.ts.plot.density)
ax = self.ts.plot.kde(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [''] * len(xlabels))
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kde_kwargs(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from numpy import linspace
_check_plot_works(self.ts.plot.kde, bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot.density, bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot.kde(logy=True, bw_method=.5, ind=linspace(-100,100,20))
self._check_ax_scales(ax, yaxis='log')
self._check_text_labels(ax.yaxis.get_label(), 'Density')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
s = Series(np.random.uniform(size=50))
s[0] = np.nan
ax = _check_plot_works(s.plot.kde)
@slow
def test_hist_kwargs(self):
ax = self.ts.plot.hist(bins=5)
self.assertEqual(len(ax.patches), 5)
self._check_text_labels(ax.yaxis.get_label(), 'Frequency')
tm.close()
if self.mpl_ge_1_3_1:
ax = self.ts.plot.hist(orientation='horizontal')
self._check_text_labels(ax.xaxis.get_label(), 'Frequency')
tm.close()
ax = self.ts.plot.hist(align='left', stacked=True)
tm.close()
@slow
def test_hist_kde_color(self):
ax = self.ts.plot.hist(logy=True, bins=10, color='b')
self._check_ax_scales(ax, yaxis='log')
self.assertEqual(len(ax.patches), 10)
self._check_colors(ax.patches, facecolors=['b'] * 10)
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
ax = self.ts.plot.kde(logy=True, color='r')
self._check_ax_scales(ax, yaxis='log')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self._check_colors(lines, ['r'])
@slow
def test_boxplot_series(self):
ax = self.ts.plot.box(logy=True)
self._check_ax_scales(ax, yaxis='log')
xlabels = ax.get_xticklabels()
self._check_text_labels(xlabels, [self.ts.name])
ylabels = ax.get_yticklabels()
self._check_text_labels(ylabels, [''] * len(ylabels))
@slow
def test_kind_both_ways(self):
s = Series(range(3))
for kind in plotting._common_kinds + plotting._series_kinds:
if not _ok_for_gaussian_kde(kind):
continue
s.plot(kind=kind)
getattr(s.plot, kind)()
@slow
def test_invalid_plot_data(self):
s = Series(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@slow
def test_errorbar_plot(self):
s = Series(np.arange(10), name='x')
s_err = np.random.randn(10)
d_err = DataFrame(randn(10, 2), index=s.index, columns=['x', 'y'])
# test line and bar plots
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(s.plot, yerr=Series(s_err), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=s_err.tolist(), kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(s.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=1, yerr=1)
ax = _check_plot_works(s.plot, xerr=s_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
# test time series plotting
ix = date_range('1/1/2000', '1/1/2001', freq='M')
ts = Series(np.arange(12), index=ix, name='x')
ts_err = Series(np.random.randn(12), index=ix)
td_err = DataFrame(randn(12, 2), index=ix, columns=['x', 'y'])
ax = _check_plot_works(ts.plot, yerr=ts_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(ts.plot, yerr=td_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
# check incorrect lengths and types
with tm.assertRaises(ValueError):
s.plot(yerr=np.arange(11))
s_err = ['zzz']*10
# in mpl 1.5+ this is a TypeError
with tm.assertRaises((ValueError, TypeError)):
s.plot(yerr=s_err)
def test_table(self):
_check_plot_works(self.series.plot, table=True)
_check_plot_works(self.series.plot, table=self.series)
@slow
def test_series_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(Series([1,2,3]),
plotting._series_kinds + plotting._common_kinds)
@slow
def test_standard_colors(self):
for c in ['r', 'red', 'green', '#FF0000']:
result = plotting._get_standard_colors(1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(3, color=[c])
self.assertEqual(result, [c] * 3)
@slow
def test_standard_colors_all(self):
import matplotlib.colors as colors
# multiple colors like mediumaquamarine
for c in colors.cnames:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
# single letter colors like k
for c in colors.ColorConverter.colors:
result = plotting._get_standard_colors(num_colors=1, color=c)
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=1, color=[c])
self.assertEqual(result, [c])
result = plotting._get_standard_colors(num_colors=3, color=c)
self.assertEqual(result, [c] * 3)
result = plotting._get_standard_colors(num_colors=3, color=[c])
self.assertEqual(result, [c] * 3)
def test_series_plot_color_kwargs(self):
# GH1890
ax = Series(np.arange(12) + 1).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_kwargs(self):
# #1890
ax = Series(np.arange(12) + 1, index=date_range(
'1/1/2000', periods=12)).plot(color='green')
self._check_colors(ax.get_lines(), linecolors=['green'])
def test_time_series_plot_color_with_empty_kwargs(self):
import matplotlib as mpl
if self.mpl_ge_1_5_0:
def_colors = self._maybe_unpack_cycler(mpl.rcParams)
else:
def_colors = mpl.rcParams['axes.color_cycle']
index = date_range('1/1/2000', periods=12)
s = Series(np.arange(1, 13), index=index)
ncolors = 3
for i in range(ncolors):
ax = s.plot()
self._check_colors(ax.get_lines(), linecolors=def_colors[:ncolors])
def test_xticklabels(self):
# GH11529
s = Series(np.arange(10), index=['P%02d' % i for i in range(10)])
ax = s.plot(xticks=[0,3,5,9])
exp = ['P%02d' % i for i in [0,3,5,9]]
self._check_text_labels(ax.get_xticklabels(), exp)
@tm.mplskip
class TestDataFramePlots(TestPlotBase):
def setUp(self):
TestPlotBase.setUp(self)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20)})
from pandas import read_csv
path = os.path.join(curpath(), 'data', 'iris.csv')
self.iris = read_csv(path)
@slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, filterwarnings='ignore', grid=False)
axes = _check_plot_works(df.plot, filterwarnings='ignore', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = _check_plot_works(df.plot, filterwarnings='ignore',
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
axes = _check_plot_works(df.plot, filterwarnings='ignore',
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
with tm.assertRaises(TypeError):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, filterwarnings='ignore', subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots, once without an ax
# kwarg and once with an ax kwarg and the new sharex behaviour does not remove the
# visibility of the latter axis (as ax is present).
# see: https://github.com/pydata/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
#axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True,
layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
self.assertEqual(len(axes), 1)
if self.mpl_ge_1_5_0:
result = ax.axes
else:
result = ax.get_axes() # deprecated
self.assertIs(result, axes[0])
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color = ['red', 'black'], style = ['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
self.assertEqual(linestyle, ['-', '--'])
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
self.assertEqual(color, ['red', 'black'])
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with tm.assertRaises(ValueError):
df.plot(color = ['red', 'black'], style = ['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
self.assertEqual(df.index.name, 'NAME')
@slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)},
index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
self.assertNotIsInstance(lines[0].get_xdata(), PeriodIndex)
self.assertIsInstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
self.assertEqual(axes.shape, (3, ))
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[com.pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
self.assertTrue(ax.get_legend() is None)
@slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
@slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
self.assertEqual(axes.shape, (2, 2))
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
self.assertEqual(axes.shape, (1, 4))
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
self.assertEqual(axes.shape, (4, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(1, 1))
with tm.assertRaises(ValueError):
axes = df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
self.assertEqual(axes.shape, (3, 3))
@slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
self.assertEqual(returned.shape, (3, ))
self.assertIs(returned[0].figure, fig)
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with tm.assertRaises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
self.assertEqual(returned.shape, (4, ))
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self.assertEqual(axes.shape, (1, ))
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(np.random.randn(10, 9), index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
#Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
#Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
#First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
#Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
self.assertEqual(len(ax.lines), 1)
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
self.assertEqual(len(ax.lines), 1)
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
self.assertEqual(len(ax.lines), 0)
self.assertEqual(len(ax.right_ax.lines), 5)
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with tm.assertRaises(ValueError):
df.plot.area(logy=True)
with tm.assertRaises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
self.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4),
columns=['w', 'x', 'y', 'z'])
neg_df = - df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6), 'x': rand(6),
'y': - rand(6), 'z': - rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4), index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with tm.assertRaises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1, 'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
self.assert_numpy_array_equal(np.delete(masked1.data, 2), np.array([1, 2, 3]))
self.assert_numpy_array_equal(np.delete(masked2.data, 1), np.array([3, 2, 1]))
self.assert_numpy_array_equal(masked1.mask, np.array([False, False, True, False]))
self.assert_numpy_array_equal(masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3])
expected2 = np.array([3, 0, 2, 1])
ax = _check_plot_works(d.plot, stacked=True)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
self.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
self.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
self.assertTrue(hasattr(ax, 'left_ax'))
self.assertFalse(hasattr(ax, 'right_ax'))
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
def test_area_lim(self):
df = DataFrame(rand(6, 4),
columns=['x', 'y', 'z', 'four'])
neg_df = - df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
self.assertEqual(xmin, lines[0].get_data()[0][0])
self.assertEqual(xmax, lines[0].get_data()[0][-1])
self.assertEqual(ymin, 0)
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
self.assertEqual(ymax, 0)
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._maybe_unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width / len(df.columns))
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width / len(df.columns))
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
self.assertEqual(r.get_height(), width)
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_width(), width)
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_height(), width)
@slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9, position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9, position=0.2)
@slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
self.assertEqual(result, [-1, -2, -3, -4, -5])
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 25)
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
self.assertEqual(result, [1, 2, 3, 4, 5])
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
self.assertEqual(result, [-1] * 5)
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
self.assertEqual(result, [1] * 5)
@slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20], 'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
self.assertEqual(result, expected)
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
self.assertEqual(result, expected)
@slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with tm.assertRaises(TypeError):
df.plot.scatter(x='x')
with tm.assertRaises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
self.assertEqual(ax.collections[0].cmap.name, 'Greys')
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
self.assertEqual(ax.collections[0].colorbar._label, 'z')
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
self.assertIs(ax.collections[0].colorbar, None)
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
self.assertIs(ax.collections[0].colorbar, None)
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pydata/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
self.assertTrue(
np.array_equal(
ax.collections[0].get_facecolor(),
rgba_array))
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with tm.assertRaises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
(0, 0, 1, 1))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
(1, 1, 1, 1))
@slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
_check_plot_works(df.plot.bar, filterwarnings='ignore', subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center',
width=0.5, position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position,
grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
self.assertAlmostEqual(ax_min, min_edge - 0.25)
self.assertAlmostEqual(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
self.assertTrue((axis.get_ticklocs() == np.arange(len(df))).all())
if align == 'center':
# Check whether the bar locates on center
self.assertAlmostEqual(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
self.assertAlmostEqual(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True,
width=0.9, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True,
width=0.9, align='edge')
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot.bar(log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
self.assertEqual(len(ax.lines),
self.bp_n_objects * len(numeric_cols))
# different warning on py3
if not PY3:
axes = _check_plot_works(df.plot.box,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [com.pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
axes = _check_plot_works(df.plot.box, filterwarnings='ignore', subplots=True,
vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
self.assertEqual(len(ax.lines), self.bp_n_objects)
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
self.assertEqual(len(ax.lines), self.bp_n_objects * len(numeric_cols))
@slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with tm.assertRaises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
self.assertIsInstance(result, np.ndarray)
self._check_box_return_type(result, None,
expected_keys=['height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
axes = _check_plot_works(df.plot, filterwarnings='ignore', kind='kde', subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
ax = _check_plot_works(df.plot, kind='kde')
@slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
if self.mpl_le_1_2_1:
raise nose.SkipTest("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [com.pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
axes = _check_plot_works(df.plot.hist, filterwarnings='ignore', subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
self.assertAlmostEqual(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
if expected_y is not None:
self.assert_numpy_array_equal(result_y, expected_y)
if expected_h is not None:
self.assert_numpy_array_equal(result_height, expected_h)
if expected_x is not None:
self.assert_numpy_array_equal(result_x, expected_x)
if expected_w is not None:
self.assert_numpy_array_equal(result_width, expected_w)
@slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5], expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True, orientation='horizontal')
self._check_box_coord(ax.patches[:5], expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10], expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:], expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True,
subplots=True, orientation='horizontal')
self._check_box_coord(axes[0].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches, expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False,
label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b','LABEL_c'])
self.assertTrue(df5.columns.tolist() == ['b','c'])
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^', 1: '+', 2: 'o'},
{0: '^', 1: '+'},
['^', '+', 'o'],
['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
@slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
self.assertEqual(ax.get_legend(), None)
ax = s.plot(legend=True)
self.assertEqual(ax.get_legend().get_texts()[0].get_text(),
'None')
@slow
def test_line_colors(self):
import sys
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tmp = sys.stderr
sys.stderr = StringIO()
try:
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
self.assertEqual(l1.get_color(), l2.get_color())
finally:
sys.stderr = tmp
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.ix[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with tm.assertRaises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with tm.assertRaises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors, subplots=True,
filterwarnings='ignore')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.ix[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
# legend is stored as Line2D, thus check linecolors
linehandles = [x for x in handles if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
linehandles = [x for x in handles if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
self.assertTrue(h.get_alpha() is None)
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
# Line2D can't have alpha in its linecolor
self._check_colors(handles[:len(jet_colors)], linecolors=jet_colors)
for h in handles:
self.assertEqual(h.get_alpha(), 0.5)
@slow
def test_hist_colors(self):
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist( color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist( colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist( colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.ix[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@slow
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.ix[:, [0]].plot(kind='kde', color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k', fliers_c='b'):
self._check_colors(bp['boxes'], linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'], linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'], linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'], linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'], linecolors=[caps_c] * len(bp['caps']))
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), '#123456')
with tm.assertRaises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
colors = list('rgbk')
if self.mpl_ge_1_5_0:
import cycler
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
else:
plt.rcParams['axes.color_cycle'] = colors
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
self.assertTrue(xticks[0] < xticks[1])
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
@slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with tm.assertRaises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with tm.assertRaises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with tm.assertRaises(ValueError):
df.plot(kind='aasdf')
@slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
self.assertEqual(len(ax.collections), 1)
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another is colorbar
self.assertEqual(len(axes[0].figure.axes), 2)
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
self.assertEqual(len(ax.collections), 1)
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
self.assertEqual(len(ax.collections), 1)
@slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
self.assertEqual(ax.collections[0].cmap.name, 'BuGn')
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
self.assertEqual(ax.collections[0].cmap.name, cm)
@slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
self.assertIs(ax.collections[0].colorbar, None)
@slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
self.assertEqual(ax.collections[0].cmap.name, 'YlGn')
with tm.assertRaises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn',
colormap='BuGn')
@slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with tm.assertRaises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
axes = _check_plot_works(df.plot.pie, filterwarnings='ignore', subplots=True)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
self.assertEqual(ax.get_ylabel(), ylabel)
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
axes = _check_plot_works(df.plot.pie, filterwarnings='ignore', subplots=True,
labels=labels, colors=color_args)
self.assertEqual(len(axes), len(df.columns))
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
self.assertEqual(result, expected)
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pydata/pandas/issues/8390
self.assertEqual([x.get_text() for x in
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i+1:])
@slow
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'], xerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
axes = _check_plot_works(df.plot, filterwarnings='ignore', yerr=df_err,
xerr=df_err, subplots=True, kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df+1).plot, yerr=df_err, xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12)*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12))*0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12)*0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with tm.assertRaises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz']*12, 'y': ['zzz']*12})
with tm.assertRaises((ValueError, TypeError)):
df.plot(yerr=df_err)
@slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12)*0.2, 'z': np.ones(12)*0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@slow
def test_errorbar_timeseries(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12)*0.2, 'y': np.ones(12)*0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
axes = _check_plot_works(tdf.plot, filterwarnings='ignore', kind=kind,
yerr=tdf_err, subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
data = np.random.randn(5, 3)
df = DataFrame(data)
ax = df.plot(yerr=err, xerr=err/2)
self.assertEqual(ax.lines[7].get_ydata()[0], data[0,1]-err[1,0,0])
self.assertEqual(ax.lines[8].get_ydata()[0], data[0,1]+err[1,1,0])
self.assertEqual(ax.lines[5].get_xdata()[0], -err[1,0,0]/2)
self.assertEqual(ax.lines[6].get_xdata()[0], err[1,1,0]/2)
with tm.assertRaises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
self.assertTrue(len(ax.tables) == 0)
plotting.table(ax, df.T)
self.assertTrue(len(ax.tables) == 1)
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y',
xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
errs = [c.lines[1][0] for c in ax.containers if getattr(c, has_err, False)]
self._check_colors(errs, linecolors=[expected] * len(errs))
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@slow
def test_sharex_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_sharey_and_ax(self):
# https://github.com/pydata/pandas/issues/9737
# using gridspec, the axis in fig.get_axis() are sorted differently than pandas expected
# them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with tm.assertRaises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
self.assertEqual(len(ax1.lines), 1)
ax2 = df.plot(ax=ax2)
self.assertEqual(len(ax2.lines), 2)
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
self.assertEqual(len(ax1.lines), 1)
self.assertEqual(len(ax2.lines), 1)
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2],
sharex=True, sharey=True)
self.assertEqual(len(axes[0].lines), 1)
self.assertEqual(len(axes[1].lines), 1)
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2],
sharex=True, sharey=True)
self.assertEqual(len(axes[0].lines), 1)
self.assertEqual(len(axes[1].lines), 1)
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3,3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
self.assertEqual(len(ax.lines), 1)
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(DataFrame({'a':[1,2,3],'b':[2,3,4]}),
plotting._dataframe_kinds, kws={'x':'a','y':'b'})
def test_option_mpl_style(self):
set_option('display.mpl_style', 'default')
set_option('display.mpl_style', None)
set_option('display.mpl_style', False)
with tm.assertRaises(ValueError):
set_option('display.mpl_style', 'default2')
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with tm.assertRaises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0,0,1,1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
@tm.mplskip
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = tm.choice(['male', 'female'], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
#Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame({'def': [1,1,1,2,2,2,3,3,3],
'val': np.random.randn(9)},
index=[1.0,2.0,3.0,1.0,2.0,3.0,1.0,2.0,3.0])
df.groupby('def')['val'].plot()
tm.close()
df.groupby('def')['val'].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1, 2, 3, 2, 1],
'z': list('ababa')})
df.groupby('z').plot.scatter('x', 'y')
tm.close()
df.groupby('z')['x'].plot.line()
tm.close()
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, np.ndarray):
for el in objs.flat:
assert isinstance(el, plt.Axes), ('one of \'objs\' is not a '
'matplotlib Axes instance, '
'type encountered {0!r}'
''.format(el.__class__.__name__))
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def _check_plot_works(f, filterwarnings='always', **kwargs):
import matplotlib.pyplot as plt
ret = None
with warnings.catch_warnings():
warnings.simplefilter(filterwarnings)
try:
try:
fig = kwargs['figure']
except KeyError:
fig = plt.gcf()
plt.clf()
ax = kwargs.get('ax', fig.add_subplot(211))
ret = f(**kwargs)
assert_is_valid_plot_return_object(ret)
try:
kwargs['ax'] = fig.add_subplot(212)
ret = f(**kwargs)
except Exception:
pass
else:
assert_is_valid_plot_return_object(ret)
with ensure_clean(return_filelike=True) as path:
plt.savefig(path)
finally:
tm.close(fig)
return ret
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0,0])
ax_ll = plt.subplot(gs[1,0])
ax_tr = plt.subplot(gs[0,1])
ax_lr = plt.subplot(gs[1,1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
jhektor/Puffin | inputfiles/calibration_crypla/calibration_main_elab.py | 1 | 8217 | from subprocess import call #interface to command LangevinNoisePositive
import scipy.optimize as spo
import scipy.io as sio
import numpy as np
import matplotlib.pyplot as plt
plt.ion() #interactive plotting
fig = plt.figure()
ax1 = fig.add_subplot(131)
ax1.set_title('010 loading')
ax1.set_ylim([0,35])
ax2 = fig.add_subplot(132)
ax2.set_title('100 loading')
ax2.set_ylim([0,35])
ax3 = fig.add_subplot(133)
ax3.set_title('101 loading')
ax3.set_ylim([0,35])
#Define the objective function, minimize the 2-norm or simulation-experiment
def calibfcn(x, pltstring = '--'):
error = 0
for loadcase in [0, 1, 2]:
#Read experimental data from mat files
if loadcase is 0:
data_file = '/home/viktor/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_010.mat'
eul2 = 'Materials/elasticity_tensor/euler_angle_1=90 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=0' #changes the rotation of crystal to 001 along loading in input file
elif loadcase is 1:
data_file = '/home/viktor/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_100.mat'
eul2 = 'Materials/elasticity_tensor/euler_angle_1=180 Materials/elasticity_tensor/euler_angle_2=90 Materials/elasticity_tensor/euler_angle_3=90' #changes the rotation of crystal to 100 along loading in input file
elif loadcase is 2:
data_file = '/home/viktor/projects/Puffin/inputfiles/calibration_crypla/data/5compDirTest_101.mat'
eul2 = 'Materials/elasticity_tensor/euler_angle_1=0 Materials/elasticity_tensor/euler_angle_2=45 Materials/elasticity_tensor/euler_angle_3=90' #changes the rotation of crystal to 101 along loading in input file
data = sio.loadmat(data_file)
strain_exp = data['xx'][:,0]
stress_exp = data['yy'][:,0]*1e-6 #in MPa
#Set up moose input file
inputfile = '1element_calib.i '
# names of properties to calibrate
slip_rate_props_name = 'UserObjects/slip_rate_gss/flowprops='
state_var_props_name = 'UserObjects/state_var_gss/group_values='
state_var_rate_h0_name = 'UserObjects/state_var_evol_rate_comp_gss/h0_group_values='
state_var_rate_tauSat_name = 'UserObjects/state_var_evol_rate_comp_gss/tauSat_group_values='
state_var_rate_hardeningExponent_name = 'UserObjects/state_var_evol_rate_comp_gss/hardeningExponent_group_values='
# initial values (from Darbandi2012)
slip_rate_props_vals = [1, 4, x[0], x[1], 5, 8, x[2], x[3], 9, 12, x[4], x[5], 13, 16, x[6], x[7], 17, 20, x[8], x[9], 21, 32, x[10], x[11]] #start_ss end_ss gamma0 1/m m = 20??
state_var_props_vals = [x[12], x[13], x[14], x[15], x[16], x[17]]# initial slip resistance values of each ss
state_var_rate_h0_vals = [x[18], x[19], x[20], x[21], x[22], x[23]] # h0 of each ss
state_var_rate_tauSat_vals = [x[24], x[25], x[26], x[27], x[28], x[29]] # tau saturation of each ss
state_var_rate_hardeningExponent_vals = [x[30], x[31], x[32], x[33], x[34], x[35]] # the hardening exponent c
slip_rate_props = '\''+" ".join(str(x) for x in slip_rate_props_vals)+'\' '
state_var_props = '\''+" ".join(str(x) for x in state_var_props_vals)+'\' '
state_var_rate_h0 = '\''+" ".join(str(x) for x in state_var_rate_h0_vals)+'\' '
state_var_rate_tauSat = '\''+" ".join(str(x) for x in state_var_rate_tauSat_vals)+'\' '
state_var_rate_hardeningExponent = '\''+" ".join(str(x) for x in state_var_rate_hardeningExponent_vals)+'\' '
#Run moose simulation
print 'Load case:', loadcase
print "\033[94mCurrent material parameters [MPa]:" + "\033[94m{}\033[0m".format(x*160.217662)
print "\033[95mCurrent material parameters:" + "\033[95m{}\033[0m".format(x)
runcmd = 'mpirun -n 1 ../../puffin-opt -i ' + inputfile + slip_rate_props_name + slip_rate_props + state_var_props_name + state_var_props + state_var_rate_h0_name + state_var_rate_h0 + state_var_rate_tauSat_name + state_var_rate_tauSat + state_var_rate_hardeningExponent_name + state_var_rate_hardeningExponent + eul2 + ' > mooselog.txt'
print 'Running this command:\n' + runcmd + "\033[0m"
call(runcmd, shell=True)
#Get stress strain curve from csv file
# aa = np.recfromcsv('calibrationSn.csv')
aa = np.loadtxt('calibrationSn.csv',delimiter = ',', skiprows = 1)
# idx = (np.abs(-aa[:,-3] - 0.12)).argmin()
#idx = -1
strain_sim = -aa[:,-3] #eps_yy
stress_sim = -aa[:,-1]*160.217662 #sigma_yy in MPa (compression positive)
if np.max(strain_sim) < 0.048: #this means the simulation failed ???
error += 20
else:
#Interpolate experimental values to simulated times
stress_exp_interp = np.interp(strain_sim,strain_exp,stress_exp)
#Calculate error
error += np.linalg.norm((stress_sim-stress_exp_interp)/stress_exp_interp)
if loadcase is 0:
# error = np.linalg.norm((stress_sim-stress_exp_interp)/stress_exp_interp)
ax1.plot(strain_exp,stress_exp,'ko')
ax1.plot(strain_sim,stress_sim,pltstring)
elif loadcase is 1:
ax2.plot(strain_exp,stress_exp,'ko')
ax2.plot(strain_sim,stress_sim,pltstring)
elif loadcase is 2:
ax3.plot(strain_exp,stress_exp,'ko')
ax3.plot(strain_sim,stress_sim,pltstring)
plt.pause(0.05)
print "\033[91mError is: \033[00m"+"\033[91m {}\033[00m".format(error)
return error
# Minimize the objective function
# x = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
#bounds = (((1e-3, 1e-1),) + ((5e-3, 1e-1),))*6 + ((5e-2, 0.8),)*6 + ((0.15, 0.75),)*6 + ((5e-2, 0.4),)*6 + ((1.1, 3),)*6 # set bounds for all variables
bounds = ((0, None),)*36 # set bounds for all variables
#Initial values of parameter to calibrate (from Darbandi2012) [h0, ss, s0] used to scale x
#Add more s0 varibles. Take initial values from table 8.4 and 8.5 in Darbandis thesis
# mpar = [0.468, 0.075, 0.053, 0.0268, 0.0649, 0.0281, 0.0350, 0.0318, 0.0462, 0.0936, 0.0412, 0.0749]
# mpar = [0.44401723, 0.01013415, 0.05331709, 0.02615524, 0.06898327, 0.02759656, 0.03323911, 0.01238013, 0.04818832, 0.09332103, 0.6712851, 0.02919173]
# mpar = [0.44401723, 0.075, 0.05331709, 0.02615524, 0.064912, 0.0281, 0.034952, 0.031832, 0.046187, 0.093623, 0.041194, 0.074898]
# x = mpar*x
#Results of optimization on 001 to 12% strain
#mpar001 = [0.39584498,0.11386226,0.05122774,0.03452174,0.064912,0.01450561,0.034952,0.03256501,0.04525029,0.08612754,0.0749127,0.04518588, 1, 1] # added 2 extra values hacks
#Results of optimization on 110 only
#mpar110 = [0.17458343,0.05236679,0.05122774,0.03452174,0.03696807,0.00912421,0.02046358,0.01612225,0.04525029,0.08612754,0.29181706,0.02277457, 1, 1]
#mpar = np.array([0]*14)
#mpar = 0.5*(np.array(mpar001)+np.array(mpar110))
#mpar[2] = mpar001[2]
#mpar[3] = mpar001[3]
#mpar[4] = mpar110[4]
#mpar[6] = mpar110[6]
#mpar[8] = mpar001[8]
# mpar[10] = 0.04
mpar = ([0.1, 0.1, 0.03815724, 0.005, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1,
0.05, 0.35717711, 0.05, 0.05, 0.05, 0.05, 0.56760849, 0.6, 0.15, 0.51982378,
0.53509619, 0.54767919, 0.05000003, 0.22, 0.05000002, 0.05, 0.05, 0.05000002, 1.81232383, 1.75,
1.66587885, 2.11896068, 1.47774703, 1.25959834])
#mpar = np.array([0.1]*36) # --- TODO insert result from an optimization
#mpar[0:12] = [0.03816292, 0.18873662]*6
#mpar[12:18] = [0.25020678, 0.2502589, 0.21444156, 0.21446406, 0.2501918, 0.21424638]
#mpar[18:24] = 0.6
#mpar[24:30] = [0.22, 0.22, 0.19147265, 0.19120634, 0.22, 0.19149677]
#mpar[30:36] = 1.75
results = spo.minimize(calibfcn,mpar, bounds=bounds)
print mpar*160.217662 #result
if not results.success:
print results.message
else:
print "Successful optimization!, %5d, iterations" % (results.nit)
#Run simulation with the calibrated parameters
calibfcn(results.x,pltstring='-')
# calibfcn(mpar,pltstring='-')
# plt.pause()
# ax.plot(strain_exp,stress_exp,strain_sim,stress_sim,strain_sim,stress_exp_interp)
plt.show(block=True)
# calibfcn(mpar,(strain_exp, stress_exp))
| lgpl-2.1 |
asampat3090/keras | examples/kaggle_otto_nn.py | 70 | 3775 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import pandas as pd
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.advanced_activations import PReLU
from keras.utils import np_utils, generic_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
'''
This demonstrates how to reach a score of 0.4890 (local validation)
on the Kaggle Otto challenge, with a deep net using Keras.
Compatible Python 2.7-3.4. Requires Scikit-Learn and Pandas.
Recommended to run on GPU:
Command: THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python kaggle_otto_nn.py
On EC2 g2.2xlarge instance: 19s/epoch. 6-7 minutes total training time.
Best validation score at epoch 21: 0.4881
Try it at home:
- with/without BatchNormalization (BatchNormalization helps!)
- with ReLU or with PReLU (PReLU helps!)
- with smaller layers, largers layers
- with more layers, less layers
- with different optimizers (SGD+momentum+decay is probably better than Adam!)
Get the data from Kaggle: https://www.kaggle.com/c/otto-group-product-classification-challenge/data
'''
def load_data(path, train=True):
df = pd.read_csv(path)
X = df.values.copy()
if train:
np.random.shuffle(X) # https://youtu.be/uyUXoap67N8
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
return X, labels
else:
X, ids = X[:, 1:].astype(np.float32), X[:, 0].astype(str)
return X, ids
def preprocess_data(X, scaler=None):
if not scaler:
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
return X, scaler
def preprocess_labels(labels, encoder=None, categorical=True):
if not encoder:
encoder = LabelEncoder()
encoder.fit(labels)
y = encoder.transform(labels).astype(np.int32)
if categorical:
y = np_utils.to_categorical(y)
return y, encoder
def make_submission(y_prob, ids, encoder, fname):
with open(fname, 'w') as f:
f.write('id,')
f.write(','.join([str(i) for i in encoder.classes_]))
f.write('\n')
for i, probs in zip(ids, y_prob):
probas = ','.join([i] + [str(p) for p in probs.tolist()])
f.write(probas)
f.write('\n')
print("Wrote submission to file {}.".format(fname))
print("Loading data...")
X, labels = load_data('train.csv', train=True)
X, scaler = preprocess_data(X)
y, encoder = preprocess_labels(labels)
X_test, ids = load_data('test.csv', train=False)
X_test, _ = preprocess_data(X_test, scaler)
nb_classes = y.shape[1]
print(nb_classes, 'classes')
dims = X.shape[1]
print(dims, 'dims')
print("Building model...")
model = Sequential()
model.add(Dense(dims, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, 512, init='glorot_uniform'))
model.add(PReLU((512,)))
model.add(BatchNormalization((512,)))
model.add(Dropout(0.5))
model.add(Dense(512, nb_classes, init='glorot_uniform'))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer="adam")
print("Training model...")
model.fit(X, y, nb_epoch=20, batch_size=128, validation_split=0.15)
print("Generating submission...")
proba = model.predict_proba(X_test)
make_submission(proba, ids, encoder, fname='keras-otto.csv')
| mit |
wwf5067/statsmodels | statsmodels/tsa/statespace/tests/test_kalman.py | 19 | 22530 | """
Tests for _statespace module
Author: Chad Fulton
License: Simplified-BSD
References
----------
Kim, Chang-Jin, and Charles R. Nelson. 1999.
"State-Space Models with Regime Switching:
Classical and Gibbs-Sampling Approaches with Applications".
MIT Press Books. The MIT Press.
Hamilton, James D. 1994.
Time Series Analysis.
Princeton, N.J.: Princeton University Press.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
try:
from scipy.linalg.blas import find_best_blas_type
except ImportError:
# Shim for SciPy 0.11, derived from tag=0.11 scipy.linalg.blas
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z', 'G': 'z'}
def find_best_blas_type(arrays):
dtype, index = max(
[(ar.dtype, i) for i, ar in enumerate(arrays)])
prefix = _type_conv.get(dtype.char, 'd')
return (prefix, dtype, None)
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.tsa.statespace import _statespace as ss
from .results import results_kalman_filter
from numpy.testing import assert_almost_equal, assert_allclose
from nose.exc import SkipTest
prefix_statespace_map = {
's': ss.sStatespace, 'd': ss.dStatespace,
'c': ss.cStatespace, 'z': ss.zStatespace
}
prefix_kalman_filter_map = {
's': ss.sKalmanFilter, 'd': ss.dKalmanFilter,
'c': ss.cKalmanFilter, 'z': ss.zKalmanFilter
}
current_path = os.path.dirname(os.path.abspath(__file__))
class Clark1987(object):
"""
Clark's (1987) univariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_uni
self.true_states = pd.DataFrame(self.true['states'])
# GDP, Quarterly, 1947.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP']
)
data['lgdp'] = np.log(data['GDP'])
# Parameters
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Observed data
self.obs = np.array(data['lgdp'], ndmin=2, dtype=dtype, order="F")
# Measurement equation
self.k_endog = k_endog = 1 # dimension of observed data
# design matrix
self.design = np.zeros((k_endog, 4, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [1, 1, 0, 0]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
self.k_states = k_states = 4 # dimension of state space
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3],
[0, 3, 1, 2, 1, 3],
[0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype, order="F")
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, phi_1, phi_2) = np.array(
self.true['parameters'], dtype=dtype
)
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, sigma_w**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Durbin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](self.true['start']), self.true['loglike'], 5
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
class TestClark1987Single(Clark1987):
"""
Basic single precision test for the loglikelihood and filtered states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987Single, self).__init__(
dtype=np.float32, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987Double(Clark1987):
"""
Basic double precision test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Double, self).__init__(
dtype=float, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987SingleComplex(Clark1987):
"""
Basic single precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
raise SkipTest('Not implemented')
super(TestClark1987SingleComplex, self).__init__(
dtype=np.complex64, conserve_memory=0
)
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_allclose(
self.result['loglike'](self.true['start']), self.true['loglike'],
rtol=1e-3
)
def test_filtered_state(self):
assert_allclose(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0],
atol=1e-2
)
assert_allclose(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1],
atol=1e-2
)
assert_allclose(
self.result['state'][3][self.true['start']:],
self.true_states.iloc[:, 2],
atol=1e-2
)
class TestClark1987DoubleComplex(Clark1987):
"""
Basic double precision complex test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987DoubleComplex, self).__init__(
dtype=complex, conserve_memory=0
)
self.init_filter()
self.run_filter()
class TestClark1987Conserve(Clark1987):
"""
Memory conservation test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1987Forecast(Clark1987):
"""
Forecasting test for the loglikelihood and filtered states.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1987Forecast, self).__init__(
dtype, conserve_memory
)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(np.r_[self.obs[0, :], [np.nan]*nforecast],
ndmin=2, dtype=dtype, order="F")
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][3][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
class TestClark1987ForecastDouble(Clark1987Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1987ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1987ForecastDoubleComplex(Clark1987Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1987ForecastConserve(Clark1987Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1987ConserveAll(Clark1987):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1987ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08
)
self.loglikelihood_burn = self.true['start']
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 5
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
class Clark1989(object):
"""
Clark's (1989) bivariate unobserved components model of real GDP (as
presented in Kim and Nelson, 1999)
Tests two-dimensional observation data.
Test data produced using GAUSS code described in Kim and Nelson (1999) and
found at http://econ.korea.ac.kr/~cjkim/SSMARKOV.htm
See `results.results_kalman_filter` for more information.
"""
def __init__(self, dtype=float, conserve_memory=0, loglikelihood_burn=0):
self.true = results_kalman_filter.uc_bi
self.true_states = pd.DataFrame(self.true['states'])
# GDP and Unemployment, Quarterly, 1948.1 - 1995.3
data = pd.DataFrame(
self.true['data'],
index=pd.date_range('1947-01-01', '1995-07-01', freq='QS'),
columns=['GDP', 'UNEMP']
)[4:]
data['GDP'] = np.log(data['GDP'])
data['UNEMP'] = (data['UNEMP']/100)
# Observed data
self.obs = np.array(data, ndmin=2, dtype=dtype, order="C").T
# Parameters
self.k_endog = k_endog = 2 # dimension of observed data
self.k_states = k_states = 6 # dimension of state space
self.conserve_memory = conserve_memory
self.loglikelihood_burn = loglikelihood_burn
# Measurement equation
# design matrix
self.design = np.zeros((k_endog, k_states, 1), dtype=dtype, order="F")
self.design[:, :, 0] = [[1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 1]]
# observation intercept
self.obs_intercept = np.zeros((k_endog, 1), dtype=dtype, order="F")
# observation covariance matrix
self.obs_cov = np.zeros((k_endog, k_endog, 1), dtype=dtype, order="F")
# Transition equation
# transition matrix
self.transition = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
self.transition[([0, 0, 1, 1, 2, 3, 4, 5],
[0, 4, 1, 2, 1, 2, 4, 5],
[0, 0, 0, 0, 0, 0, 0, 0])] = [1, 1, 0, 0, 1, 1, 1, 1]
# state intercept
self.state_intercept = np.zeros((k_states, 1), dtype=dtype, order="F")
# selection matrix
self.selection = np.asfortranarray(np.eye(k_states)[:, :, None],
dtype=dtype)
# state covariance matrix
self.state_cov = np.zeros((k_states, k_states, 1),
dtype=dtype, order="F")
# Initialization: Diffuse priors
self.initial_state = np.zeros((k_states,), dtype=dtype)
self.initial_state_cov = np.asfortranarray(np.eye(k_states)*100,
dtype=dtype)
# Update matrices with given parameters
(sigma_v, sigma_e, sigma_w, sigma_vl, sigma_ec,
phi_1, phi_2, alpha_1, alpha_2, alpha_3) = np.array(
self.true['parameters'], dtype=dtype
)
self.design[([1, 1, 1], [1, 2, 3], [0, 0, 0])] = [
alpha_1, alpha_2, alpha_3
]
self.transition[([1, 1], [1, 2], [0, 0])] = [phi_1, phi_2]
self.obs_cov[1, 1, 0] = sigma_ec**2
self.state_cov[
np.diag_indices(k_states)+(np.zeros(k_states, dtype=int),)] = [
sigma_v**2, sigma_e**2, 0, 0, sigma_w**2, sigma_vl**2
]
# Initialization: modification
# Due to the difference in the way Kim and Nelson (1999) and Drubin
# and Koopman (2012) define the order of the Kalman filter routines,
# we need to modify the initial state covariance matrix to match
# Kim and Nelson's results, since the *Statespace models follow Durbin
# and Koopman.
self.initial_state_cov = np.asfortranarray(
np.dot(
np.dot(self.transition[:, :, 0], self.initial_state_cov),
self.transition[:, :, 0].T
)
)
def init_filter(self):
# Use the appropriate Statespace model
prefix = find_best_blas_type((self.obs,))
cls = prefix_statespace_map[prefix[0]]
# Instantiate the statespace model
self.model = cls(
self.obs, self.design, self.obs_intercept, self.obs_cov,
self.transition, self.state_intercept, self.selection,
self.state_cov
)
self.model.initialize_known(self.initial_state, self.initial_state_cov)
# Initialize the appropriate Kalman filter
cls = prefix_kalman_filter_map[prefix[0]]
self.filter = cls(self.model, conserve_memory=self.conserve_memory,
loglikelihood_burn=self.loglikelihood_burn)
def run_filter(self):
# Filter the data
self.filter()
# Get results
self.result = {
'loglike': lambda burn: np.sum(self.filter.loglikelihood[burn:]),
'state': np.array(self.filter.filtered_state),
}
def test_loglike(self):
assert_almost_equal(
# self.result['loglike'](self.true['start']),
self.result['loglike'](0),
self.true['loglike'], 2
)
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:],
self.true_states.iloc[:, 3], 4
)
class TestClark1989(Clark1989):
"""
Basic double precision test for the loglikelihood and filtered
states with two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989, self).__init__(dtype=float, conserve_memory=0)
self.init_filter()
self.run_filter()
class TestClark1989Conserve(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self):
super(TestClark1989Conserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class Clark1989Forecast(Clark1989):
"""
Memory conservation test for the loglikelihood and filtered states with
two-dimensional observation vector.
"""
def __init__(self, dtype=float, nforecast=100, conserve_memory=0):
super(Clark1989Forecast, self).__init__(dtype, conserve_memory)
self.nforecast = nforecast
# Add missing observations to the end (to forecast)
self._obs = self.obs
self.obs = np.array(
np.c_[
self._obs,
np.r_[[np.nan, np.nan]*nforecast].reshape(2, nforecast)
],
ndmin=2, dtype=dtype, order="F"
)
self.init_filter()
self.run_filter()
def test_filtered_state(self):
assert_almost_equal(
self.result['state'][0][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 0], 4
)
assert_almost_equal(
self.result['state'][1][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 1], 4
)
assert_almost_equal(
self.result['state'][4][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 2], 4
)
assert_almost_equal(
self.result['state'][5][self.true['start']:-self.nforecast],
self.true_states.iloc[:, 3], 4
)
class TestClark1989ForecastDouble(Clark1989Forecast):
"""
Basic double forecasting test for the loglikelihood and filtered states.
"""
def __init__(self):
super(TestClark1989ForecastDouble, self).__init__()
self.init_filter()
self.run_filter()
class TestClark1989ForecastDoubleComplex(Clark1989Forecast):
"""
Basic double complex forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastDoubleComplex, self).__init__(
dtype=complex
)
self.init_filter()
self.run_filter()
class TestClark1989ForecastConserve(Clark1989Forecast):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ForecastConserve, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02
)
self.init_filter()
self.run_filter()
class TestClark1989ConserveAll(Clark1989):
"""
Memory conservation forecasting test for the loglikelihood and filtered
states.
"""
def __init__(self):
super(TestClark1989ConserveAll, self).__init__(
dtype=float, conserve_memory=0x01 | 0x02 | 0x04 | 0x08,
)
# self.loglikelihood_burn = self.true['start']
self.loglikelihood_burn = 0
self.init_filter()
self.run_filter()
def test_loglike(self):
assert_almost_equal(
self.result['loglike'](0), self.true['loglike'], 2
)
def test_filtered_state(self):
end = self.true_states.shape[0]
assert_almost_equal(
self.result['state'][0][-1],
self.true_states.iloc[end-1, 0], 4
)
assert_almost_equal(
self.result['state'][1][-1],
self.true_states.iloc[end-1, 1], 4
)
assert_almost_equal(
self.result['state'][4][-1],
self.true_states.iloc[end-1, 2], 4
)
assert_almost_equal(
self.result['state'][5][-1],
self.true_states.iloc[end-1, 3], 4
)
| bsd-3-clause |
ened/scancode-toolkit | tests/cluecode/data/copyrights/copyright_btt_plot1_py-btt_plot_py.py | 13 | 11120 | #! /usr/bin/env python
#
# btt_plot.py: Generate matplotlib plots for BTT generate data files
#
# (C) Copyright 2009 Hewlett-Packard Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
btt_plot.py: Generate matplotlib plots for BTT generated data files
Files handled:
AQD - Average Queue Depth Running average of queue depths
BNOS - Block numbers accessed Markers for each block
Q2D - Queue to Issue latencies Running averages
D2C - Issue to Complete latencies Running averages
Q2C - Queue to Complete latencies Running averages
Usage:
btt_plot_aqd.py equivalent to: btt_plot.py -t aqd <type>=aqd
btt_plot_bnos.py equivalent to: btt_plot.py -t bnos <type>=bnos
btt_plot_q2d.py equivalent to: btt_plot.py -t q2d <type>=q2d
btt_plot_d2c.py equivalent to: btt_plot.py -t d2c <type>=d2c
btt_plot_q2c.py equivalent to: btt_plot.py -t q2c <type>=q2c
Arguments:
[ -A | --generate-all ] Default: False
[ -L | --no-legend ] Default: Legend table produced
[ -o <file> | --output=<file> ] Default: <type>.png
[ -T <string> | --title=<string> ] Default: Based upon <type>
[ -v | --verbose ] Default: False
<data-files...>
The -A (--generate-all) argument is different: when this is specified,
an attempt is made to generate default plots for all 5 types (aqd, bnos,
q2d, d2c and q2c). It will find files with the appropriate suffix for
each type ('aqd.dat' for example). If such files are found, a plot for
that type will be made. The output file name will be the default for
each type. The -L (--no-legend) option will be obeyed for all plots,
but the -o (--output) and -T (--title) options will be ignored.
"""
__author__ = 'Alan D. Brunelle <[email protected]>'
#------------------------------------------------------------------------------
import matplotlib
matplotlib.use('Agg')
import getopt, glob, os, sys
import matplotlib.pyplot as plt
plot_size = [10.9, 8.4] # inches...
add_legend = True
generate_all = False
output_file = None
title_str = None
type = None
verbose = False
types = [ 'aqd', 'q2d', 'd2c', 'q2c', 'bnos' ]
progs = [ 'btt_plot_%s.py' % t for t in types ]
get_base = lambda file: file[file.find('_')+1:file.rfind('_')]
#------------------------------------------------------------------------------
def fatal(msg):
"""Generate fatal error message and exit"""
print >>sys.stderr, 'FATAL: %s' % msg
sys.exit(1)
#----------------------------------------------------------------------
def get_data(files):
"""Retrieve data from files provided.
Returns a database containing:
'min_x', 'max_x' - Minimum and maximum X values found
'min_y', 'max_y' - Minimum and maximum Y values found
'x', 'y' - X & Y value arrays
'ax', 'ay' - Running average over X & Y --
if > 10 values provided...
"""
#--------------------------------------------------------------
def check(mn, mx, v):
"""Returns new min, max, and float value for those passed in"""
v = float(v)
if mn == None or v < mn: mn = v
if mx == None or v > mx: mx = v
return mn, mx, v
#--------------------------------------------------------------
def avg(xs, ys):
"""Computes running average for Xs and Ys"""
#------------------------------------------------------
def _avg(vals):
"""Computes average for array of values passed"""
total = 0.0
for val in vals:
total += val
return total / len(vals)
#------------------------------------------------------
if len(xs) < 1000:
return xs, ys
axs = [xs[0]]
ays = [ys[0]]
_xs = [xs[0]]
_ys = [ys[0]]
x_range = (xs[-1] - xs[0]) / 100
for idx in range(1, len(ys)):
if (xs[idx] - _xs[0]) > x_range:
axs.append(_avg(_xs))
ays.append(_avg(_ys))
del _xs, _ys
_xs = [xs[idx]]
_ys = [ys[idx]]
else:
_xs.append(xs[idx])
_ys.append(ys[idx])
if len(_xs) > 1:
axs.append(_avg(_xs))
ays.append(_avg(_ys))
return axs, ays
#--------------------------------------------------------------
global verbose
db = {}
min_x = max_x = min_y = max_y = None
for file in files:
if not os.path.exists(file):
fatal('%s not found' % file)
elif verbose:
print 'Processing %s' % file
xs = []
ys = []
for line in open(file, 'r'):
f = line.rstrip().split(None)
if line.find('#') == 0 or len(f) < 2:
continue
(min_x, max_x, x) = check(min_x, max_x, f[0])
(min_y, max_y, y) = check(min_y, max_y, f[1])
xs.append(x)
ys.append(y)
db[file] = {'x':xs, 'y':ys}
if len(xs) > 10:
db[file]['ax'], db[file]['ay'] = avg(xs, ys)
else:
db[file]['ax'] = db[file]['ay'] = None
db['min_x'] = min_x
db['max_x'] = max_x
db['min_y'] = min_y
db['max_y'] = max_y
return db
#----------------------------------------------------------------------
def parse_args(args):
"""Parse command line arguments.
Returns list of (data) files that need to be processed -- /unless/
the -A (--generate-all) option is passed, in which case superfluous
data files are ignored...
"""
global add_legend, output_file, title_str, type, verbose
global generate_all
prog = args[0][args[0].rfind('/')+1:]
if prog == 'btt_plot.py':
pass
elif not prog in progs:
fatal('%s not a valid command name' % prog)
else:
type = prog[prog.rfind('_')+1:prog.rfind('.py')]
s_opts = 'ALo:t:T:v'
l_opts = [ 'generate-all', 'type', 'no-legend', 'output', 'title',
'verbose' ]
try:
(opts, args) = getopt.getopt(args[1:], s_opts, l_opts)
except getopt.error, msg:
print >>sys.stderr, msg
fatal(__doc__)
for (o, a) in opts:
if o in ('-A', '--generate-all'):
generate_all = True
elif o in ('-L', '--no-legend'):
add_legend = False
elif o in ('-o', '--output'):
output_file = a
elif o in ('-t', '--type'):
if not a in types:
fatal('Type %s not supported' % a)
type = a
elif o in ('-T', '--title'):
title_str = a
elif o in ('-v', '--verbose'):
verbose = True
if type == None and not generate_all:
fatal('Need type of data files to process - (-t <type>)')
return args
#------------------------------------------------------------------------------
def gen_title(fig, type, title_str):
"""Sets the title for the figure based upon the type /or/ user title"""
if title_str != None:
pass
elif type == 'aqd':
title_str = 'Average Queue Depth'
elif type == 'bnos':
title_str = 'Block Numbers Accessed'
elif type == 'q2d':
title_str = 'Queue (Q) To Issue (D) Average Latencies'
title = fig.text(.5, .95, title_str, horizontalalignment='center')
title.set_fontsize('large')
#------------------------------------------------------------------------------
def gen_labels(db, ax, type):
"""Generate X & Y 'axis'"""
#----------------------------------------------------------------------
def gen_ylabel(ax, type):
"""Set the Y axis label based upon the type"""
if type == 'aqd':
str = 'Number of Requests Queued'
elif type == 'bnos':
str = 'Block Number'
else:
str = 'Seconds'
ax.set_ylabel(str)
#----------------------------------------------------------------------
xdelta = 0.1 * (db['max_x'] - db['min_x'])
ydelta = 0.1 * (db['max_y'] - db['min_y'])
ax.set_xlim(db['min_x'] - xdelta, db['max_x'] + xdelta)
ax.set_ylim(db['min_y'] - ydelta, db['max_y'] + ydelta)
ax.set_xlabel('Runtime (seconds)')
ax.grid(True)
gen_ylabel(ax, type)
#------------------------------------------------------------------------------
def generate_output(type, db):
"""Generate the output plot based upon the type and database"""
#----------------------------------------------------------------------
def color(idx, style):
"""Returns a color/symbol type based upon the index passed."""
colors = [ 'b', 'g', 'r', 'c', 'm', 'y', 'k' ]
l_styles = [ '-', ':', '--', '-.' ]
m_styles = [ 'o', '+', '.', ',', 's', 'v', 'x', '<', '>' ]
color = colors[idx % len(colors)]
if style == 'line':
style = l_styles[(idx / len(l_styles)) % len(l_styles)]
elif style == 'marker':
style = m_styles[(idx / len(m_styles)) % len(m_styles)]
return '%s%s' % (color, style)
#----------------------------------------------------------------------
def gen_legends(a, legends):
leg = ax.legend(legends, 'best', shadow=True)
frame = leg.get_frame()
frame.set_facecolor('0.80')
for t in leg.get_texts():
t.set_fontsize('xx-small')
#----------------------------------------------------------------------
global add_legend, output_file, title_str, verbose
if output_file != None:
ofile = output_file
else:
ofile = '%s.png' % type
if verbose:
print 'Generating plot into %s' % ofile
fig = plt.figure(figsize=plot_size)
ax = fig.add_subplot(111)
gen_title(fig, type, title_str)
gen_labels(db, ax, type)
idx = 0
if add_legend:
legends = []
else:
legends = None
keys = []
for file in db.iterkeys():
if not file in ['min_x', 'max_x', 'min_y', 'max_y']:
keys.append(file)
keys.sort()
for file in keys:
dat = db[file]
if type == 'bnos':
ax.plot(dat['x'], dat['y'], color(idx, 'marker'),
markersize=1)
elif dat['ax'] == None:
continue # Don't add legend
else:
ax.plot(dat['ax'], dat['ay'], color(idx, 'line'),
linewidth=1.0)
if add_legend:
legends.append(get_base(file))
idx += 1
if add_legend and len(legends) > 0:
gen_legends(ax, legends)
plt.savefig(ofile)
#------------------------------------------------------------------------------
def get_files(type):
"""Returns the list of files for the -A option based upon type"""
if type == 'bnos':
files = []
for fn in glob.glob('*c.dat'):
for t in [ 'q2q', 'd2d', 'q2c', 'd2c' ]:
if fn.find(t) >= 0:
break
else:
files.append(fn)
else:
files = glob.glob('*%s.dat' % type)
return files
#------------------------------------------------------------------------------
if __name__ == '__main__':
files = parse_args(sys.argv)
if generate_all:
output_file = title_str = type = None
for t in types:
files = get_files(t)
if len(files) == 0:
continue
elif t != 'bnos':
generate_output(t, get_data(files))
continue
for file in files:
base = get_base(file)
title_str = 'Block Numbers Accessed: %s' % base
output_file = 'bnos_%s.png' % base
generate_output(t, get_data([file]))
elif len(files) < 1:
fatal('Need data files to process')
else:
generate_output(type, get_data(files))
sys.exit(0)
| apache-2.0 |
shikhardb/scikit-learn | sklearn/manifold/tests/test_isomap.py | 28 | 4007 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
clancia/EDA | condno_rhoq.py | 1 | 2229 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# condno_rhoq.py
#
# Copyright 2014 Carlo Lancia <clancia@g6-laptop>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import numpy as np
from pnl import systemFactory, binomial
import matplotlib.pyplot as plt
#from matplotlib.ticker import MaxNLocator
from matplotlib import colors, ticker, cm
from datetime import datetime
ALPHA = 100
mtxComputer = systemFactory(ALPHA)
dr, dq = 0.04125, 0.04125
steps = np.linspace(0, 0.99, 25)
q, r = np.meshgrid(np.array(steps), np.array(steps))
z = np.empty(r.size).reshape(r.shape)
ofile = open('./figures/eda.log', 'w', 0)
ofile.write('%s :: Computation has started\n' % (datetime.now()))
for i in range(r.shape[0]):
for j in range(r.shape[1]):
#print 'Using rho =', r[i,j], 'and q =', q[i,j]
z[i,j] = np.linalg.cond(mtxComputer(r[i,j], q[i,j]),1)
ofile.write('%s :: Completed rho=%.5f, q=%.5f\n' % (datetime.now(), r[i,j], q[i,j]))
z = np.ma.masked_where(z<= 0, z)
levs = np.logspace(np.floor(np.log10(z.min())),
np.ceil(np.log10(z.max())), 20)
lev_exp = np.log10(levs)
plt.contourf(r + dr / 2., q + dq / 2., z, levs, norm=colors.LogNorm(), cmap=cm.Greys_r)
cbar = plt.colorbar(ticks=levs)
cbar.ax.set_yticklabels(['%.2f' % (l) for l in lev_exp])
plt.xlabel('rho')
plt.ylabel('q')
plt.title('Log10 of condition number (alpha_max = %d)' % (ALPHA))
plt.savefig('CondNo.alpha.100.png')
plt.savefig('/home/clancia/Dropbox/EDA/CondNo.alpha.100.png')
ofile.write('%s :: Computation completed\n' % (datetime.now()))
| gpl-2.0 |
cbmoore/statsmodels | statsmodels/sandbox/tsa/examples/example_var.py | 37 | 1218 | """
Look at some macro plots, then do some VARs and IRFs.
"""
import numpy as np
import statsmodels.api as sm
import scikits.timeseries as ts
import scikits.timeseries.lib.plotlib as tplt
from matplotlib import pyplot as plt
data = sm.datasets.macrodata.load()
data = data.data
### Create Timeseries Representations of a few vars
dates = ts.date_array(start_date=ts.Date('Q', year=1959, quarter=1),
end_date=ts.Date('Q', year=2009, quarter=3))
ts_data = data[['realgdp','realcons','cpi']].view(float).reshape(-1,3)
ts_data = np.column_stack((ts_data, (1 - data['unemp']/100) * data['pop']))
ts_series = ts.time_series(ts_data, dates)
fig = tplt.tsfigure()
fsp = fig.add_tsplot(221)
fsp.tsplot(ts_series[:,0],'-')
fsp.set_title("Real GDP")
fsp = fig.add_tsplot(222)
fsp.tsplot(ts_series[:,1],'r-')
fsp.set_title("Real Consumption")
fsp = fig.add_tsplot(223)
fsp.tsplot(ts_series[:,2],'g-')
fsp.set_title("CPI")
fsp = fig.add_tsplot(224)
fsp.tsplot(ts_series[:,3],'y-')
fsp.set_title("Employment")
# Plot real GDP
#plt.subplot(221)
#plt.plot(data['realgdp'])
#plt.title("Real GDP")
# Plot employment
#plt.subplot(222)
# Plot cpi
#plt.subplot(223)
# Plot real consumption
#plt.subplot(224)
#plt.show()
| bsd-3-clause |
compops/pmh-joe2015 | scripts-draft1/example2-correlation-versus-sigmau.py | 2 | 4940 | ##############################################################################
##############################################################################
#
# Replicates results in section 4.2
#
# J. Dahlin, F. Lindsten, J. Kronander and T. B. Schön,
# Accelerating pmMH by correlating auxiliary variables.
# Pre-print, arXiv:1512:05483v1, 2015.
#
# Copyright (c) 2016 Johan Dahlin [ johan.dahlin (at) liu.se ]
# Distributed under the MIT license.
#
##############################################################################
##############################################################################
import numpy as np
import Quandl
from state import smc
from para import pmh_correlatedRVs
from models import hwsv_4parameters
##############################################################################
# Arrange the data structures
##############################################################################
pmh = pmh_correlatedRVs.stcPMH();
##############################################################################
# Arrange the data structures
##############################################################################
sm = smc.smcSampler();
pmh = pmh_correlatedRVs.stcPMH();
##############################################################################
# Setup the system
##############################################################################
sys = hwsv_4parameters.ssm()
sys.par = np.zeros((sys.nPar,1))
sys.par[0] = 0.00;
sys.par[1] = 0.98;
sys.par[2] = 0.16;
sys.par[3] = -0.70;
sys.T = 748;
sys.xo = 0.0;
sys.version = "standard"
##############################################################################
# Generate data
##############################################################################
sys.generateData();
d = Quandl.get("NASDAQOMX/OMXS30", trim_start="2011-01-02", trim_end="2014-01-02")
y = 100.0 * np.diff(np.log(d['Index Value']))
sys.y = y[~np.isnan(y)]
##############################################################################
# Setup the parameters
##############################################################################
th = hwsv_4parameters.ssm()
th.nParInference = 4;
th.copyData(sys);
##############################################################################
# Setup the SMC algorithm
##############################################################################
sm.filter = sm.bPFrv;
sm.sortParticles = True;
sm.nPart = 50;
sm.resampFactor = 2.0;
sm.genInitialState = True;
##############################################################################
# Check correlation in the likelihood estimator
##############################################################################
nIter = 600;
ll0 = np.zeros( nIter )
ll1 = np.zeros( nIter )
pmh.rvpGlobal = 0.0;
sigmauGrid = np.arange( 0.00,1.05,0.05 );
nPartGrid = ( 1, 2, 5, 10, 20, 50 )
covPMH = np.zeros( ( len(sigmauGrid), len(nPartGrid), 3 ) )
for ii in range(len(sigmauGrid)):
pmh.sigmaU = sigmauGrid[ii];
pmh.alpha = 0.0;
for jj in range( len(nPartGrid) ):
sm.nPart = nPartGrid[jj];
pmh.rvnSamples = 1 + sm.nPart;
for kk in range(nIter):
# Sample initial random variables and compute likelihood estimate
pmh.rv = np.random.normal( size=( pmh.rvnSamples, sys.T ) );
sm.rv = pmh.rv;
sm.filter( th );
ll0[ kk ] = sm.ll;
# Propose new random variables ( Local move )
u = np.random.uniform()
pmh.rvp = np.sqrt( 1.0 - pmh.sigmaU**2 ) * pmh.rv + pmh.sigmaU * np.random.normal( size=(pmh.rvnSamples,sys.T) );
# Compute likelihood estimate
sm.rv = pmh.rvp;
sm.filter( th );
ll1[ kk ] = sm.ll;
covPMH[ii,jj,0] = np.var( ll0 )
covPMH[ii,jj,1] = np.cov( ll0, ll1 )[0,1]
covPMH[ii,jj,2] = np.corrcoef( ll0, ll1 )[0,1]
print( (ii,len(sigmauGrid),jj,len(nPartGrid) ) );
figure(1)
for jj in range( len(nPartGrid) ):
plot(sigmauGrid,covPMH[:,jj,2])
#
import pandas
fileOut = pandas.DataFrame( covPMH[:,:,0],index=sigmauGrid, columns=nPartGrid);
fileOut.to_csv('example3-correlation-versus-sigmau-llvar.csv');
fileOut = pandas.DataFrame( covPMH[:,:,1],index=sigmauGrid, columns=nPartGrid);
fileOut.to_csv('example3-correlation-versus-sigmau-llcov.csv');
fileOut = pandas.DataFrame( covPMH[:,:,2],index=sigmauGrid, columns=nPartGrid);
fileOut.to_csv('example3-correlation-versus-sigmau-llcorr.csv');
#sqrt( var( ll0 ) )
########################################################################
# End of file
########################################################################
| mit |
aquavitae/rst2pdf-py3-dev | setup.py | 2 | 3099 | # -*- coding: utf-8 -*-
# $HeadURL$
# $LastChangedDate$
# $LastChangedRevision$
import os
from setuptools import setup, find_packages
version = '0.93'
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
long_description = (
read('LICENSE.txt')
+ '\n' +
'Detailed Documentation\n'
'**********************\n'
+ '\n' +
read('README.md')
+ '\n' +
'Contributors\n'
'************\n'
+ '\n' +
read('Contributors.txt')
+ '\n' +
'Change history\n'
'**************\n'
+ '\n' +
read('CHANGES.txt')
+ '\n' +
'Download\n'
'********\n'
)
install_requires = [
'Pygments',
'docutils',
'pdfrw',
'reportlab>=3.0',
'rson',
'setuptools',
'smartypants',
'tenjin',
]
tests_require = ['pyPdf2', 'nose']
sphinx_require = ['sphinx']
hyphenation_require = ['wordaxe>=1.0']
images_require = ['pillow']
pdfimages_require = ['pyPdf2', 'PythonMagick']
pdfimages2_require = ['pyPdf2', 'SWFTools']
svgsupport_require = ['svg2rlg']
aafiguresupport_require = ['aafigure>=0.4']
mathsupport_require = ['matplotlib']
rawhtmlsupport_require = ['xhtml2pdf']
setup(
name="rst2pdf",
version=version,
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
package_data=dict(rst2pdf=['styles/*.json',
'styles/*.style',
'images/*png',
'images/*jpg',
'templates/*tmpl'
]),
include_package_data=True,
dependency_links=[
],
install_requires=install_requires,
tests_require=tests_require,
extras_require=dict(
tests=tests_require,
sphinx=sphinx_require,
hyphenation=hyphenation_require,
images=images_require,
pdfimages=pdfimages_require,
pdfimages2=pdfimages2_require,
svgsupport=svgsupport_require,
aafiguresupport=aafiguresupport_require,
mathsupport=mathsupport_require,
rawhtmlsupport=rawhtmlsupport_require,
),
# metadata for upload to PyPI
# Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Documentation',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Utilities',
],
author="Roberto Alsina",
author_email="ralsina at netmanagers dot com dot ar",
description="Convert restructured text to PDF via reportlab.",
long_description=long_description,
license="MIT",
keywords="restructured convert rst pdf docutils pygments reportlab",
url="http://rst2pdf.googlecode.com",
download_url="http://code.google.com/p/rst2pdf/downloads/list",
entry_points={'console_scripts': ['rst2pdf = rst2pdf.createpdf:main']},
test_suite='rst2pdf.tests.test_rst2pdf.test_suite',
)
| mit |
majetideepak/arrow | python/pyarrow/__init__.py | 1 | 11976 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# flake8: noqa
from __future__ import absolute_import
import os as _os
import sys as _sys
try:
from ._generated_version import version as __version__
except ImportError:
# Package is not installed, parse git tag at runtime
try:
import setuptools_scm
# Code duplicated from setup.py to avoid a dependency on each other
def parse_git(root, **kwargs):
"""
Parse function for setuptools_scm that ignores tags for non-C++
subprojects, e.g. apache-arrow-js-XXX tags.
"""
from setuptools_scm.git import parse
kwargs['describe_command'] = \
"git describe --dirty --tags --long --match 'apache-arrow-[0-9].*'"
return parse(root, **kwargs)
__version__ = setuptools_scm.get_version('../',
parse=parse_git)
except ImportError:
__version__ = None
import pyarrow.compat as compat
from pyarrow.lib import cpu_count, set_cpu_count
from pyarrow.lib import (null, bool_,
int8, int16, int32, int64,
uint8, uint16, uint32, uint64,
time32, time64, timestamp, date32, date64,
float16, float32, float64,
binary, string, utf8,
large_binary, large_string, large_utf8,
decimal128,
list_, large_list, struct, union, dictionary, field,
type_for_alias,
DataType, DictionaryType, StructType,
ListType, LargeListType,
UnionType, TimestampType, Time32Type, Time64Type,
FixedSizeBinaryType, Decimal128Type,
BaseExtensionType, ExtensionType,
UnknownExtensionType,
DictionaryMemo,
Field,
Schema,
schema,
Array, Tensor,
array, chunked_array, table,
SparseTensorCSR, SparseTensorCOO,
infer_type, from_numpy_dtype,
NullArray,
NumericArray, IntegerArray, FloatingPointArray,
BooleanArray,
Int8Array, UInt8Array,
Int16Array, UInt16Array,
Int32Array, UInt32Array,
Int64Array, UInt64Array,
ListArray, LargeListArray, UnionArray,
BinaryArray, StringArray,
LargeBinaryArray, LargeStringArray,
FixedSizeBinaryArray,
DictionaryArray,
Date32Array, Date64Array,
TimestampArray, Time32Array, Time64Array,
Decimal128Array, StructArray, ExtensionArray,
ArrayValue, Scalar, NA, _NULL as NULL,
BooleanValue,
Int8Value, Int16Value, Int32Value, Int64Value,
UInt8Value, UInt16Value, UInt32Value, UInt64Value,
HalfFloatValue, FloatValue, DoubleValue,
ListValue, LargeListValue,
BinaryValue, StringValue,
LargeBinaryValue, LargeStringValue,
FixedSizeBinaryValue,
DecimalValue, UnionValue, StructValue, DictionaryValue,
Date32Value, Date64Value,
Time32Value, Time64Value,
TimestampValue)
# Buffers, allocation
from pyarrow.lib import (Buffer, ResizableBuffer, foreign_buffer, py_buffer,
compress, decompress, allocate_buffer)
from pyarrow.lib import (MemoryPool, LoggingMemoryPool, ProxyMemoryPool,
total_allocated_bytes, set_memory_pool,
default_memory_pool, logging_memory_pool,
proxy_memory_pool, log_memory_allocations)
# I/O
from pyarrow.lib import (HdfsFile, NativeFile, PythonFile,
CompressedInputStream, CompressedOutputStream,
FixedSizeBufferWriter,
BufferReader, BufferOutputStream,
OSFile, MemoryMappedFile, memory_map,
create_memory_map, have_libhdfs, have_libhdfs3,
MockOutputStream, input_stream, output_stream)
from pyarrow.lib import (ChunkedArray, RecordBatch, Table,
concat_arrays, concat_tables)
# Exceptions
from pyarrow.lib import (ArrowException,
ArrowKeyError,
ArrowInvalid,
ArrowIOError,
ArrowMemoryError,
ArrowNotImplementedError,
ArrowTypeError,
ArrowSerializationError)
# Serialization
from pyarrow.lib import (deserialize_from, deserialize,
deserialize_components,
serialize, serialize_to, read_serialized,
SerializedPyObject, SerializationContext,
SerializationCallbackError,
DeserializationCallbackError)
from pyarrow.filesystem import FileSystem, LocalFileSystem
from pyarrow.hdfs import HadoopFileSystem
import pyarrow.hdfs as hdfs
from pyarrow.ipc import (Message, MessageReader,
RecordBatchFileReader, RecordBatchFileWriter,
RecordBatchStreamReader, RecordBatchStreamWriter,
read_message, read_record_batch, read_schema,
read_tensor, write_tensor,
get_record_batch_size, get_tensor_size,
open_stream,
open_file,
serialize_pandas, deserialize_pandas)
import pyarrow.ipc as ipc
def open_stream(source):
"""
pyarrow.open_stream deprecated since 0.12, use pyarrow.ipc.open_stream
"""
import warnings
warnings.warn("pyarrow.open_stream is deprecated, please use "
"pyarrow.ipc.open_stream")
return ipc.open_stream(source)
def open_file(source):
"""
pyarrow.open_file deprecated since 0.12, use pyarrow.ipc.open_file
"""
import warnings
warnings.warn("pyarrow.open_file is deprecated, please use "
"pyarrow.ipc.open_file")
return ipc.open_file(source)
localfs = LocalFileSystem.get_instance()
from pyarrow.serialization import (default_serialization_context,
register_default_serialization_handlers,
register_torch_serialization_handlers)
import pyarrow.types as types
# Entry point for starting the plasma store
def _plasma_store_entry_point():
"""Entry point for starting the plasma store.
This can be used by invoking e.g.
``plasma_store -s /tmp/plasma -m 1000000000``
from the command line and will start the plasma_store executable with the
given arguments.
"""
import pyarrow
plasma_store_executable = _os.path.join(pyarrow.__path__[0],
"plasma_store_server")
_os.execv(plasma_store_executable, _sys.argv)
# ----------------------------------------------------------------------
# Deprecations
from pyarrow.util import _deprecate_api # noqa
# ----------------------------------------------------------------------
# Returning absolute path to the pyarrow include directory (if bundled, e.g. in
# wheels)
def get_include():
"""
Return absolute path to directory containing Arrow C++ include
headers. Similar to numpy.get_include
"""
return _os.path.join(_os.path.dirname(__file__), 'include')
def _get_pkg_config_executable():
return _os.environ.get('PKG_CONFIG', 'pkg-config')
def _has_pkg_config(pkgname):
import subprocess
try:
return subprocess.call([_get_pkg_config_executable(),
'--exists', pkgname]) == 0
except OSError:
# TODO: replace with FileNotFoundError once we ditch 2.7
return False
def _read_pkg_config_variable(pkgname, cli_args):
import subprocess
cmd = [_get_pkg_config_executable(), pkgname] + cli_args
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
if proc.returncode != 0:
raise RuntimeError("pkg-config failed: " + err.decode('utf8'))
return out.rstrip().decode('utf8')
def get_libraries():
"""
Return list of library names to include in the `libraries` argument for C
or Cython extensions using pyarrow
"""
return ['arrow', 'arrow_python']
def get_library_dirs():
"""
Return lists of directories likely to contain Arrow C++ libraries for
linking C or Cython extensions using pyarrow
"""
package_cwd = _os.path.dirname(__file__)
library_dirs = [package_cwd]
def append_library_dir(library_dir):
if library_dir not in library_dirs:
library_dirs.append(library_dir)
# Search library paths via pkg-config. This is necessary if the user
# installed libarrow and the other shared libraries manually and they
# are not shipped inside the pyarrow package (see also ARROW-2976).
pkg_config_executable = _os.environ.get('PKG_CONFIG') or 'pkg-config'
for pkgname in ["arrow", "arrow_python"]:
if _has_pkg_config(pkgname):
library_dir = _read_pkg_config_variable(pkgname,
["--libs-only-L"])
# pkg-config output could be empty if Arrow is installed
# as a system package.
if library_dir:
if not library_dir.startswith("-L"):
raise ValueError(
"pkg-config --libs-only-L returned unexpected "
"value {0!r}".format(library_dir))
append_library_dir(library_dir[2:])
if _sys.platform == 'win32':
# TODO(wesm): Is this necessary, or does setuptools within a conda
# installation add Library\lib to the linker path for MSVC?
python_base_install = _os.path.dirname(_sys.executable)
library_dir = _os.path.join(python_base_install, 'Library', 'lib')
if _os.path.exists(_os.path.join(library_dir, 'arrow.lib')):
append_library_dir(library_dir)
# ARROW-4074: Allow for ARROW_HOME to be set to some other directory
if _os.environ.get('ARROW_HOME'):
append_library_dir(_os.path.join(_os.environ['ARROW_HOME'], 'lib'))
else:
# Python wheels bundle the Arrow libraries in the pyarrow directory.
append_library_dir(_os.path.dirname(_os.path.abspath(__file__)))
return library_dirs
| apache-2.0 |
krikru/tensorflow-opencl | tensorflow/contrib/learn/python/learn/learn_io/__init__.py | 37 | 2375 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import _read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import _read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.numpy_io import numpy_input_fn
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
| apache-2.0 |
imaculate/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 78 | 17611 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import (assert_array_almost_equal,
assert_array_equal,
assert_equal)
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert_equal(X_means.dtype, output_dtype)
assert_equal(X_vars.dtype, output_dtype)
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = 0
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, axis,
last_mean, last_var, last_n)
assert_raises(TypeError, incr_mean_variance_axis, X_lil, axis,
last_mean, last_var, last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis, last_mean, last_var, last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr) # X.shape[axis] picks # samples
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis, last_mean,
last_var, last_n)
assert_equal(X_means_incr.dtype, output_dtype)
assert_equal(X_vars_incr.dtype, output_dtype)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_equal(X.shape[axis], n_incr)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-3,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=2,
last_mean=None, last_var=None, last_n=None)
assert_raises(ValueError, incr_mean_variance_axis, X_csr, axis=-1,
last_mean=None, last_var=None, last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
inplace_csr_row_normalize(X_csr)
assert_equal(X_csr.dtype, dtype)
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
| bsd-3-clause |
GuessWhoSamFoo/pandas | pandas/tests/io/msgpack/test_except.py | 2 | 1177 | # coding: utf-8
from datetime import datetime
import pytest
from pandas.io.msgpack import packb, unpackb
class DummyException(Exception):
pass
class TestExceptions(object):
def test_raise_on_find_unsupported_value(self):
msg = "can\'t serialize datetime"
with pytest.raises(TypeError, match=msg):
packb(datetime.now())
def test_raise_from_object_hook(self):
def hook(_):
raise DummyException()
with pytest.raises(DummyException):
unpackb(packb({}), object_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': 'buzz'}), object_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': 'buzz'}), object_pairs_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': {'buzz': 'spam'}}), object_hook=hook)
with pytest.raises(DummyException):
unpackb(packb({'fizz': {'buzz': 'spam'}}), object_pairs_hook=hook)
def test_invalid_value(self):
msg = "Unpack failed: error"
with pytest.raises(ValueError, match=msg):
unpackb(b"\xd9\x97#DL_")
| bsd-3-clause |
spallavolu/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
cedadev/jasmin_cis | cis/data_io/ungridded_data.py | 2 | 53343 | """
Module for the UngriddedData class
"""
import logging
from time import gmtime, strftime
import numpy
import six
from cis.data_io.netcdf import get_data as netcdf_get_data
from cis.data_io.hdf_vd import get_data as hdf_vd_get_data
from cis.data_io.hdf_sd import get_data as hdf_sd_get_data
from cis.data_io.common_data import CommonData, CommonDataList
from cis.data_io.hyperpoint_view import UngriddedHyperPointView
from cis.data_io.write_netcdf import add_data_to_file, write_coordinates
from cis.utils import listify
import cis.maths
class Metadata(object):
@classmethod
def from_CubeMetadata(cls, cube_meta):
return cls(name=cube_meta.var_name, standard_name=cube_meta.standard_name, long_name=cube_meta.long_name,
units=str(cube_meta.units), misc=cube_meta.attributes)
def __init__(self, name='', standard_name='', long_name='', shape=None, units='', range=None, factor=None,
offset=None, missing_value=None, history='', misc=None):
self._name = name
self._standard_name = ''
if standard_name:
self.standard_name = standard_name
elif name:
self.standard_name = Metadata.guess_standard_name(name)
self.long_name = long_name
self.shape = shape
self.units = units
self.range = range
self.factor = factor
self.offset = offset
self.missing_value = missing_value
self.history = history
if misc is None:
self.misc = {}
else:
self.misc = misc
def __eq__(self, other):
result = NotImplemented
if isinstance(other, Metadata):
result = self._name == other._name and \
self._standard_name == other._standard_name and \
self.long_name == other.long_name and \
self.units == other.units
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__, Python 3 does not enable it if __eq__ is defined
def __hash__(self):
return hash(id(self))
def summary(self, offset=5):
"""
Creates a unicode summary of the metadata object
:param offset: The left hand padding to apply to the text
:return: The summary
"""
from datetime import datetime
string = ''
string += '{pad:{width}}Long name = {lname}\n'.format(pad=' ', width=offset, lname=self.long_name)
string += '{pad:{width}}Standard name = {sname}\n'.format(pad=' ', width=offset, sname=self.standard_name)
string += '{pad:{width}}Units = {units}\n'.format(pad=' ', width=offset, units=self.units)
string += '{pad:{width}}Missing value = {mval}\n'.format(pad=' ', width=offset, mval=self.missing_value)
# str(tuple) returns repr(obj) on each item in the tuple, if we have a datetime tuple then we want str(obj)
# instead. Just make that ourselves here instead (as a str to avoid the extra quotes if we make a 'real' tuple)
if isinstance(self.range[0], datetime):
range_tuple = '({}, {})'.format(*self.range)
else:
range_tuple = self.range
string += '{pad:{width}}Range = {range}\n'.format(pad=' ', width=offset, range=range_tuple)
string += '{pad:{width}}History = {history}\n'.format(pad=' ', width=offset, history=self.history)
if self.misc:
string += '{pad:{width}}Misc attributes: \n'.format(pad=' ', width=offset)
for k, v in self.misc.items():
string += '{pad:{width}}{att} = {val}\n'.format(pad=' ', width=offset + 2, att=k.title(), val=v)
return string
def __str__(self):
# six has a decorator for this bit, but it doesn't do errors='replace'.
if six.PY3:
return self.summary()
else:
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
@property
def standard_name(self):
return self._standard_name
@standard_name.setter
def standard_name(self, standard_name):
from iris.std_names import STD_NAMES
if standard_name is None or standard_name in STD_NAMES:
# If the standard name is actually changing from one to another then log the fact
if self.standard_name is not None \
and self.standard_name.strip() is not "" \
and self.standard_name != standard_name:
logging.debug("Changing standard name for dataset from '{}' to '{}'".format(self.standard_name,
standard_name))
self._standard_name = standard_name
else:
raise ValueError('%r is not a valid standard_name' % standard_name)
@property
def units(self):
return self._units
@units.setter
def units(self, units):
from cf_units import Unit
if not isinstance(units, Unit):
try:
# Try some basic tidying up of unit
if isinstance(units, six.string_types):
if 'since' in units.lower():
# Often this time since epoch units are weirdly capitalised, or include extra punctuation
units = units.lower().replace("since:", "since").replace(",", "")
else:
# Replace number with 1 (e.g. #/cm3 == cm-3)
units = units.replace('#', '1')
units = Unit(units)
except ValueError:
logging.info("Unable to parse cf-units: {}. Some operations may not be available.".format(units))
self._units = units
@staticmethod
def guess_standard_name(name):
standard_name = None
if name.lower().startswith('lat'):
standard_name = 'latitude'
elif name.lower().startswith('lon'):
standard_name = 'longitude'
elif name.lower().startswith('alt') or name.lower() == 'height':
standard_name = 'altitude'
elif name.lower().startswith('pres') or name.lower() == 'air_pressure':
standard_name = 'air_pressure'
elif name.lower() == 'time':
standard_name = 'time'
return standard_name
# This defines the mappings for each of the ungridded data types to their reading routines, this allows 'lazy loading'
static_mappings = {"SDS": hdf_sd_get_data,
"HDF_SDS": hdf_sd_get_data,
"VDS": hdf_vd_get_data,
"Variable": netcdf_get_data,
"_Variable": netcdf_get_data}
class LazyData(object):
"""
Wrapper (adaptor) class for the different types of possible ungridded data.
"""
def __init__(self, data, metadata, data_retrieval_callback=None):
"""
:param data: The data handler (e.g. SDS instance) for the specific data type, or a numpy array of data
This can be a list of data handlers, or a single data handler
:param metadata: Any associated metadata
:param data_retrieval_callback: An, optional, method for retrieving data when needed
"""
from cis.exceptions import InvalidDataTypeError
from iris.cube import CubeMetadata
import numpy as np
self._data_flattened = None
self.attributes = {}
self.metadata = Metadata.from_CubeMetadata(metadata) if isinstance(metadata, CubeMetadata) else metadata
if isinstance(data, np.ndarray):
# If the data input is a numpy array we can just copy it in and ignore the data_manager
self._data = data
self._data_manager = None
self._post_process()
else:
# If the data input wasn't a numpy array we assume it is a data reference (e.g. SDS) and we refer
# this as a 'data manager' as it is responsible for getting the actual data.
self._data = None
# Although the data can be a list or a single item it's useful to cast it
# to a list here to make accessing it consistent
self._data_manager = listify(data)
if data_retrieval_callback is not None:
# Use the given data retrieval method
self.retrieve_raw_data = data_retrieval_callback
elif type(self._data_manager[0]).__name__ in static_mappings and \
all([type(d).__name__ == type(self._data_manager[0]).__name__ for d in self._data_manager]):
# Check that we recognise the data manager and that they are all the same
# Set the retrieve_raw_data method to it's mapped function name
self.retrieve_raw_data = static_mappings[type(self._data_manager[0]).__name__]
else:
raise InvalidDataTypeError
def name(self, default='unknown'):
"""
This routine returns the first name property which is not empty out of: standard_name, long_name and var_name.
If they are all empty it returns the default string (which is 'unknown' by default).
:return: The name of the data object as a string
"""
return self.standard_name or self.long_name or self.var_name or default
@property
def shape(self):
return self.metadata.shape
@shape.setter
def shape(self, shape):
self.metadata.shape = shape
@property
def long_name(self):
return self.metadata.long_name
@long_name.setter
def long_name(self, long_name):
self.metadata.long_name = long_name
@property
def standard_name(self):
return self.metadata.standard_name
@standard_name.setter
def standard_name(self, standard_name):
self.metadata.standard_name = standard_name
@property
def var_name(self):
return self.metadata._name
@var_name.setter
def var_name(self, var_name):
self.metadata._name = var_name
@property
def units(self):
return self.metadata.units
@units.setter
def units(self, units):
self.metadata.units = units
@property
def data(self):
"""
This is a getter for the data property. It caches the raw data if it has not already been read.
Throws a MemoryError when reading for the first time if the data is too large.
"""
import numpy.ma as ma
if self._data is None:
try:
# If we were given a list of data managers then we need to concatenate them now...
self._data = self.retrieve_raw_data(self._data_manager[0])
if len(self._data_manager) > 1:
for manager in self._data_manager[1:]:
self._data = ma.concatenate((self._data, self.retrieve_raw_data(manager)), axis=0)
self._post_process()
except MemoryError:
raise MemoryError(
"Failed to read the ungridded data as there was not enough memory available.\n"
"Consider freeing up variables or indexing the cube before getting its data.")
return self._data
def _post_process(self):
"""
Perform a post-processing step on lazy loaded data
:return: None
"""
pass
def __eq__(self, other):
import numpy as np
result = NotImplemented
if isinstance(other, LazyData):
# Check the metadata
result = self.metadata == other.metadata
# Then, if that is OK, check the data
if result:
result = np.allclose(self.data, other.data)
return result
# Must supply __ne__, Python does not defer to __eq__ for negative equality
def __ne__(self, other):
result = self.__eq__(other)
if result is not NotImplemented:
result = not result
return result
# Must supply __hash__, Python 3 does not enable it if __eq__ is defined
def __hash__(self):
return hash(id(self))
# Maths operator overloads
__add__ = cis.maths.add
__radd__ = __add__
__sub__= cis.maths.subtract
__mul__ = cis.maths.multiply
__rmul__ = cis.maths.multiply
__div__ = cis.maths.divide
__truediv__ = cis.maths.divide
__pow__ = cis.maths.exponentiate
@data.setter
def data(self, value):
self._data = value
self._data_flattened = None
@property
def data_flattened(self):
"""Returns a 1D flattened view (or copy, if necessary) of the data.
"""
if self._data_flattened is None:
data = self.data
self._data_flattened = data.ravel()
return self._data_flattened
def add_history(self, new_history):
"""Appends to, or creates, the metadata history attribute using the supplied history string.
The new entry is prefixed with a timestamp.
:param new_history: history string
"""
timestamp = strftime("%Y-%m-%dT%H:%M:%SZ ", gmtime())
if hasattr(self.metadata, 'history') and len(self.metadata.history) > 0:
self.metadata.history += '\n' + timestamp + new_history
else:
self.metadata.history = timestamp + new_history
def add_attributes(self, attributes):
"""
Add a variable attribute to this data
:param attributes: Dictionary of attribute names (keys) and values.
:return:
"""
self.attributes.update(attributes)
def remove_attribute(self, key):
"""
Remove a variable attribute from this data
:param key: Attribute key to remove
:return:
"""
self.attributes.pop(key, None)
def save_data(self, output_file):
logging.info('Saving data to %s' % output_file)
write_coordinates(self, output_file)
add_data_to_file(self, output_file)
def update_shape(self, shape=None):
if shape:
self.metadata.shape = shape
else:
self.metadata.shape = self.data.shape
def update_range(self, range=None):
from cis.time_util import cis_standard_time_unit
# If the user hasn't specified a range then work it out...
if not range:
standard_time = False
try:
standard_time = self.units == cis_standard_time_unit
except ValueError:
# If UDUNITS can't compare the units then it will raise a ValueError, in which case it's definitely not
# our standard time
pass
try:
if standard_time:
range = (cis_standard_time_unit.num2date(self.data.min()),
cis_standard_time_unit.num2date(self.data.max()))
else:
range = (self.data.min(), self.data.max())
except ValueError as e:
# If we can't set a range for some reason then just leave it blank
range = ()
self.metadata.range = range
def convert_units(self, new_units):
"""
Convert units of LazyData object to new_units in place
:param LazyData ug_data:
:param cf_units.Unit or str new_units:
:raises ValueError if new_units can't be converted to standard units, or units are incompatible
"""
from cf_units import Unit
if not isinstance(new_units, Unit):
new_units = Unit(new_units)
if not isinstance(self.units, Unit):
# If our units aren't cf_units then they can't be...
raise ValueError("Unable to convert non-standard LazyData units: {}".format(self.units))
self.units.convert(self.data, new_units, inplace=True)
self.units = new_units
class UngriddedData(LazyData, CommonData):
"""
Wrapper (adaptor) class for the different types of possible ungridded data.
"""
def __init__(self, data, metadata, coords, data_retrieval_callback=None):
"""
Constructor
:param data: The data handler (e.g. SDS instance) for the specific data type, or a numpy array of data.
This can be a list of data handlers, or a single data handler
:param metadata: Any associated metadata
:param coords: A list of the associated Coord objects
:param data_retrieval_callback: A method for retrieving data when needed
"""
from cis.data_io.Coord import CoordList, Coord
if isinstance(coords, list):
self._coords = CoordList(coords)
elif isinstance(coords, CoordList):
self._coords = coords
elif isinstance(coords, Coord):
self._coords = CoordList([coords])
else:
raise ValueError("Invalid Coords type")
# TODO Find a cleaner workaround for this, for some reason UDUNITS can not parse 'per kilometer per steradian'
if str(metadata.units) == 'per kilometer per steradian':
metadata.units = 'kilometer^-1 steradian^-1'
super(UngriddedData, self).__init__(data, metadata, data_retrieval_callback)
@property
def coords_flattened(self):
all_coords = self.coords().find_standard_coords()
return [(c.data_flattened if c is not None else None) for c in all_coords]
def _post_process(self):
"""
Perform a post processing step on lazy loaded Ungridded Data.
:return:
"""
# Load the data if not already loaded
if self._data is None:
data = self.data
else:
# Remove any points with missing coordinate values:
combined_mask = numpy.zeros(self._data.shape, dtype=bool).flatten()
for coord in self._coords:
combined_mask |= numpy.ma.getmaskarray(coord.data).flatten()
if coord.data.dtype != 'object':
combined_mask |= numpy.isnan(coord.data).flatten()
coord.update_shape()
coord.update_range()
if combined_mask.any():
n_points = numpy.count_nonzero(combined_mask)
logging.warning(
"Identified {n_points} point(s) which were missing values for some or all coordinates - "
"these points have been removed from the data.".format(n_points=n_points))
for coord in self._coords:
coord.data = numpy.ma.masked_array(coord.data.flatten(), mask=combined_mask).compressed()
coord.update_shape()
coord.update_range()
if numpy.ma.is_masked(self._data):
new_data_mask = numpy.ma.masked_array(self._data.mask.flatten(), mask=combined_mask).compressed()
new_data = numpy.ma.masked_array(self._data.data.flatten(), mask=combined_mask).compressed()
self._data = numpy.ma.masked_array(new_data, mask=new_data_mask)
else:
self._data = numpy.ma.masked_array(self._data.flatten(), mask=combined_mask).compressed()
self.update_shape()
self.update_range()
def make_new_with_same_coordinates(self, data=None, var_name=None, standard_name=None,
long_name=None, history=None, units=None, flatten=False):
"""
Create a new, empty UngriddedData object with the same coordinates as this one.
:param data: Data to use (if None then defaults to all zeros)
:param var_name: Variable name
:param standard_name: Variable CF standard name
:param long_name: Variable long name
:param history: Data history string
:param units: Variable units
:param flatten: Whether to flatten the data and coordinates (for ungridded data only)
:return: UngriddedData instance
"""
if data is None:
data = numpy.zeros(self.shape)
metadata = Metadata(name=var_name, standard_name=standard_name,
long_name=long_name, history='', units=units)
if flatten:
from cis.data_io.Coord import Coord
data = data.flatten()
new_coords = []
for coord in self._coords:
new_coords.append(Coord(coord.data_flattened, coord.metadata, coord.axis))
else:
new_coords = self._coords
ug_data = UngriddedData(data=data, metadata=metadata, coords=new_coords)
# Copy the history in separately so it gets the timestamp added.
if history:
ug_data.add_history(history)
return ug_data
def __getitem__(self, keys):
"""
Return a COPY of the data with the given slice. We copy to emulate the Iris Cube behaviour
"""
from copy import deepcopy
# Create a copy of the slice of each of the coords
new_coords = []
for c in self.coords():
new_coords.append(c[keys])
# The data is just a new LazyData objects with the sliced data. Note this is a slice of the whole (concatenated)
# data, and will lead to post-processing before slicing.
# TODO: We could be cleverer and figure out the right slice across the various data managers to only read the
# right data from disk.
return UngriddedData(data=self.data[keys].copy(), metadata=deepcopy(self.metadata), coords=new_coords)
def copy(self, data=None):
"""
Create a copy of this UngriddedData object with new data and coordinates
so that that they can be modified without held references being affected.
Will call any lazy loading methods in the data and coordinates
:param ndarray data: Replace the data of the ungridded data copy with provided data
:return: Copied UngriddedData object
"""
from copy import deepcopy
data = data if data is not None else numpy.ma.copy(self.data) # This will load the data if lazy load
coords = self.coords().copy()
return UngriddedData(data=data, metadata=deepcopy(self.metadata), coords=coords)
@property
def size(self):
return self.data.size
def count(self):
return self.data.count() if hasattr(self.data, 'count') else self.data.size
@property
def history(self):
return self.metadata.history
@property
def x(self):
return self.coord(axis='X')
@property
def y(self):
return self.coord(axis='Y')
@property
def lat(self):
return self.coord(standard_name='latitude')
@property
def lon(self):
return self.coord(standard_name='longitude')
@property
def time(self):
return self.coord(standard_name='time')
def hyper_point(self, index):
"""
:param index: The index in the array to find the point for
:return: A hyperpoint representing the data at that point
"""
from cis.data_io.hyperpoint import HyperPoint
return HyperPoint(self.coord(standard_name='latitude').data.flat[index],
self.coord(standard_name='longitude').data.flat[index],
self.coord(standard_name='altitude').data.flat[index],
self.coord(standard_name='time').data.flat[index],
self.coord(standard_name='air_pressure').data.flat[index],
self.data.flat[index])
def as_data_frame(self, copy=True, time_index=True, name=None):
"""
Convert an UngriddedData object to a Pandas DataFrame.
:param copy: Create a copy of the data for the new DataFrame? Default is True.
:return: A Pandas DataFrame representing the data and coordinates. Note that this won't include any metadata.
"""
df = _coords_as_data_frame(self.coords(), time_index=time_index)
try:
df[name or self.name()] = _to_flat_ndarray(self.data, copy)
except ValueError:
logging.warn("Copy created of MaskedArray for {} when creating Pandas DataFrame".format(self.name()))
df[name or self.name()] = _to_flat_ndarray(self.data, True)
return df
def coords(self, name_or_coord=None, standard_name=None, long_name=None, attributes=None, axis=None, var_name=None,
dim_coords=True):
"""
:return: A list of coordinates in this UngriddedData object fitting the given criteria
"""
self._post_process()
return self._coords.get_coords(name_or_coord, standard_name, long_name, attributes, axis, var_name)
def coord(self, name_or_coord=None, standard_name=None, long_name=None, attributes=None, axis=None, var_name=None):
"""
:raise: CoordinateNotFoundError
:return: A single coord given the same arguments as :meth:`coords`.
"""
return self.coords().get_coord(name_or_coord, standard_name, long_name, attributes, axis, var_name)
def get_coordinates_points(self):
"""Returns a HyperPointView of the coordinates of points.
:return: HyperPointView of the coordinates of points
"""
return UngriddedHyperPointView(self.coords_flattened, None)
def get_all_points(self):
"""Returns a HyperPointView of the points.
:return: HyperPointView of all the data points
"""
return UngriddedHyperPointView(self.coords_flattened, self.data_flattened)
def get_non_masked_points(self):
"""Returns a HyperPointView for which the default iterator omits masked points.
:return: HyperPointView of the data points
"""
return UngriddedHyperPointView(self.coords_flattened, self.data_flattened, non_masked_iteration=True)
def find_standard_coords(self):
"""Constructs a list of the standard coordinates.
The standard coordinates are latitude, longitude, altitude, air_pressure and time; they occur in the return
list in this order.
:return: list of coordinates or None if coordinate not present
"""
return self.coords().find_standard_coords()
@property
def is_gridded(self):
"""Returns value indicating whether the data/coordinates are gridded.
"""
return False
@classmethod
def from_points_array(cls, hyperpoints):
"""
Constuctor for building an UngriddedData object from a list of hyper points
:param hyperpoints: list of HyperPoints
"""
from cis.data_io.Coord import Coord, CoordList
from cis.data_io.hyperpoint import HyperPointList
from cis.time_util import cis_standard_time_unit
if not isinstance(hyperpoints, HyperPointList):
hyperpoints = HyperPointList(hyperpoints)
values = hyperpoints.vals
latitude = hyperpoints.latitudes
longitude = hyperpoints.longitudes
air_pressure = hyperpoints.air_pressures
altitude = hyperpoints.altitudes
time = hyperpoints.times
coord_list = []
if latitude is not None:
coord_list.append(Coord(latitude, Metadata(standard_name='latitude', units='degrees north')))
if longitude is not None:
coord_list.append(Coord(longitude, Metadata(standard_name='longitude', units='degrees east')))
if air_pressure is not None:
coord_list.append(Coord(air_pressure, Metadata(standard_name='air_pressure', units='Pa')))
if altitude is not None:
coord_list.append(Coord(altitude, Metadata(standard_name='altitude', units='meters')))
if time is not None:
coord_list.append(Coord(time, Metadata(standard_name='time', units=cis_standard_time_unit)))
coords = CoordList(coord_list)
return cls(values, Metadata(), coords)
def summary(self, shorten=False):
"""
Unicode summary of the UngriddedData with metadata of itself and its coordinates
"""
summary = 'Ungridded data: {name} / ({units}) \n'.format(name=self.name(), units=self.units)
if shorten:
return summary
summary += ' Shape = {}\n'.format(self.data.shape) + '\n'
summary += ' Total number of points = {}\n'.format(self.size)
summary += ' Number of non-masked points = {}\n'.format(self.count())
summary += str(self.metadata)
summary += ' Coordinates: \n'
for c in self.coords():
summary += '{pad:{width}}{name}\n'.format(pad=' ', width=7, name=c.name())
c.update_range()
summary += c.metadata.summary(offset=10)
return summary
def __repr__(self):
return "<cis 'UngriddedData' of %s>" % self.summary(shorten=True)
def __str__(self):
# six has a decorator for this bit, but it doesn't do errors='replace'.
if six.PY3:
return self.summary()
else:
return self.summary().encode(errors='replace')
def __unicode__(self):
return self.summary()
def set_longitude_range(self, range_start):
"""
Rotates the longitude coordinate array and changes its values by
360 as necessary to force the values to be within a 360 range starting
at the specified value.
:param range_start: starting value of required longitude range
"""
from cis.utils import fix_longitude_range
self.coord(standard_name='longitude').data = fix_longitude_range(self.lon.points, range_start)
def subset(self, **kwargs):
"""
Subset the data based on the specified constraints. Note that the limits are inclusive.
The subset region is defined by passing keyword arguments for each dimension to be subset over, each argument
must be a slice, or have two entries (a maximum and a minimum). Datetime objects can be used to specify upper
and lower datetime limits, or a single PartialDateTime object can be used to specify a datetime range.
The keyword keys are used to find the relevant coordinate, they are looked for in order of name, standard_name,
axis and var_name.
For example:
data.subset(x=[0, 80], y=slice(10, 50))
or:
data.aggregate(t=PartialDateTime(2008,9))
A shape keyword can also be supplied as a WKT string or shapely object to subset in lat/lon by an arbitrary
shape. In this case the lat/lon bounds are taken as the bounding box of the shape.
:param kwargs: The constraints for each coordinate dimension
:return CommonData:
"""
from cis.subsetting.subset import subset, UngriddedSubsetConstraint
return subset(self, UngriddedSubsetConstraint, **kwargs)
def aggregate(self, how=None, **kwargs):
"""
Aggregate the UngriddedData object based on the specified grids. The grid is defined by passing keyword
arguments for each dimension, each argument must be a slice, or have three entries (a maximum, a minimum and a
gridstep). The default aggregation method ('moments') returns the mean, standard deviation and number of points
as separate GriddedData objects.
Datetime objects can be used to specify upper and lower datetime limits, or a
single PartialDateTime object can be used to specify a datetime range. The gridstep can be specified as a
DateTimeDelta object.
The keyword keys are used to find the relevant coordinate, they are looked for in order of name, standard_name,
axis and var_name.
For example:
data.aggregate(x=[-180, 180, 360], y=slice(-90, 90, 10))
or:
data.aggregate(how='mean', t=[PartialDateTime(2008,9), timedelta(days=1))
:param str how: The kernel to use in the aggregation (moments, mean, min, etc...). Default is moments
:param kwargs: The grid specifications for each coordinate dimension
:return GriddedData:
"""
agg = _aggregate_ungridded(self, how, **kwargs)
# Return the single item if there's only one (this depends on the kernel used)
if len(agg) == 1:
agg = agg[0]
return agg
def sampled_from(self, data, how='', kernel=None, missing_data_for_missing_sample=True, fill_value=None,
var_name='', var_long_name='', var_units='', **kwargs):
"""
Collocate the CommonData object with another CommonData object using the specified collocator and kernel
:param CommonData or CommonDataList data: The data to resample
:param str how: Collocation method (e.g. lin, nn, bin or box)
:param str or cis.collocation.col_framework.Kernel kernel:
:param bool missing_data_for_missing_sample: Should missing values in sample data be ignored for collocation?
:param float fill_value: Value to use for missing data
:param str var_name: The output variable name
:param str var_long_name: The output variable's long name
:param str var_units: The output variable's units
:return CommonData: The collocated dataset
"""
return _ungridded_sampled_from(self, data, how=how, kernel=kernel,
missing_data_for_missing_sample=missing_data_for_missing_sample,
fill_value=fill_value, var_name=var_name, var_long_name=var_long_name,
var_units=var_units, **kwargs)
def _get_default_plot_type(self, lat_lon=False):
if lat_lon:
return 'scatter2d'
else:
return 'line'
class UngriddedCoordinates(CommonData):
"""
Wrapper (adaptor) class for the different types of possible ungridded data.
"""
def __init__(self, coords):
"""
Constructor
:param coords: A list of the associated Coord objects
"""
from cis.data_io.Coord import CoordList, Coord
if isinstance(coords, list):
self._coords = CoordList(coords)
elif isinstance(coords, CoordList):
self._coords = coords
elif isinstance(coords, Coord):
self._coords = CoordList([coords])
else:
raise ValueError("Invalid Coords type")
self._post_process()
def _post_process(self):
"""
Perform a post processing step on lazy loaded Coordinate Data
:return:
"""
# Remove any points with missing coordinate values:
combined_mask = numpy.zeros(self._coords[0].data_flattened.shape, dtype=bool)
for coord in self._coords:
combined_mask |= numpy.ma.getmaskarray(coord.data_flattened)
if coord.data.dtype != 'object':
combined_mask |= numpy.isnan(coord.data).flatten()
if combined_mask.any():
n_points = numpy.count_nonzero(combined_mask)
logging.warning("Identified {n_points} point(s) which were missing values for some or all coordinates - "
"these points have been removed from the data.".format(n_points=n_points))
for coord in self._coords:
coord.data = numpy.ma.masked_array(coord.data_flattened, mask=combined_mask).compressed()
coord.update_shape()
coord.update_range()
@property
def coords_flattened(self):
all_coords = self.coords().find_standard_coords()
return [(c.data_flattened if c is not None else None) for c in all_coords]
@property
def history(self):
return "UngriddedCoordinates have no history"
@property
def size(self):
if len(self._coords) > 1:
return self._coords[0].data.size
else:
return 0
def count(self):
# There can be no masked coordinate points
return self.size
@property
def x(self):
return self.coord(axis='X')
@property
def y(self):
return self.coord(axis='Y')
@property
def lat(self):
return self.coord(standard_name='latitude')
@property
def lon(self):
return self.coord(standard_name='longitude')
@property
def time(self):
return self.coord(standard_name='time')
@property
def var_name(self):
return ''
def hyper_point(self, index):
"""
:param index: The index in the array to find the point for
:return: A hyperpoint representing the data at that point
"""
from cis.data_io.hyperpoint import HyperPoint
return HyperPoint(self.coord(standard_name='latitude').data.flat[index],
self.coord(standard_name='longitude').data.flat[index],
self.coord(standard_name='altitude').data.flat[index],
self.coord(standard_name='time').data.flat[index],
self.coord(standard_name='air_pressure').data.flat[index],
None)
def as_data_frame(self, copy=True, time_index=True, name=None):
"""
Convert an UngriddedCoordinates object to a Pandas DataFrame.
:param copy: Create a copy of the data for the new DataFrame? Default is True.
:return: A Pandas DataFrame representing the data and coordinates. Note that this won't include any metadata.
"""
return _coords_as_data_frame(self._coords, time_index=time_index)
def coords(self, name_or_coord=None, standard_name=None, long_name=None, attributes=None, axis=None, var_name=None,
dim_coords=True):
"""
:return: A list of coordinates in this UngriddedData object fitting the given criteria
"""
return self._coords.get_coords(name_or_coord, standard_name, long_name, attributes, axis, var_name)
def coord(self, name_or_coord=None, standard_name=None, long_name=None, attributes=None, axis=None, var_name=None):
"""
:raise: CoordinateNotFoundError
:return: A single coord given the same arguments as :meth:`coords`.
"""
return self._coords.get_coord(name_or_coord, standard_name, long_name, attributes, axis, var_name)
def get_coordinates_points(self):
return UngriddedHyperPointView(self.coords_flattened, None)
def get_all_points(self):
"""Returns a HyperPointView of the points.
:return: HyperPointView of all the data points
"""
return UngriddedHyperPointView(self.coords_flattened, None)
def get_non_masked_points(self):
"""Returns a HyperPointView for which the default iterator omits masked points.
:return: HyperPointView of the data points
"""
return UngriddedHyperPointView(self.coords_flattened, None, non_masked_iteration=True)
@property
def is_gridded(self):
"""Returns value indicating whether the data/coordinates are gridded.
"""
return False
def set_longitude_range(self, range_start):
"""
Rotates the longitude coordinate array and changes its values by
360 as necessary to force the values to be within a 360 range starting
at the specified value.
:param range_start: starting value of required longitude range
"""
from cis.utils import fix_longitude_range
self.coord(standard_name='longitude').data = fix_longitude_range(self.lon.points, range_start)
def subset(self, **kwargs):
raise NotImplementedError("Subset is not available for UngriddedCoordinates objects")
def collocated_onto(self, sample, how='', kernel=None, **kwargs):
raise NotImplementedError("UngriddedCoordinates objects cannot be used as sources of data for collocation.")
def sampled_from(self, data, how='', kernel=None, missing_data_for_missing_sample=False, fill_value=None,
var_name='', var_long_name='', var_units='', **kwargs):
"""
Collocate the CommonData object with another CommonData object using the specified collocator and kernel
Note - that the default value for missing_data_for_missing_sample is different in this implementation as
compared to the UngriddedData implementation.
:param CommonData or CommonDataList data: The data to resample
:param str how: Collocation method (e.g. lin, nn, bin or box)
:param str or cis.collocation.col_framework.Kernel kernel:
:param bool missing_data_for_missing_sample: Should missing values in sample data be ignored for collocation?
:param float fill_value: Value to use for missing data
:param str var_name: The output variable name
:param str var_long_name: The output variable's long name
:param str var_units: The output variable's units
:return CommonData: The collocated dataset
"""
return _ungridded_sampled_from(self, data, how=how, kernel=kernel,
missing_data_for_missing_sample=missing_data_for_missing_sample,
fill_value=fill_value, var_name=var_name, var_long_name=var_long_name,
var_units=var_units, **kwargs)
def _get_default_plot_type(self, lat_lon=False):
raise NotImplementedError("UngriddedCoordinates have no default plot type")
def var_name(self):
raise NotImplementedError("UngriddedCoordinates have no var name")
class UngriddedDataList(CommonDataList):
"""
Class which represents multiple UngriddedData objects (e.g. from reading multiple variables)
"""
def __str__(self):
return "UngriddedDataList: \n%s" % super(UngriddedDataList, self).__str__()
@property
def is_gridded(self):
"""Returns value indicating whether the data/coordinates are gridded.
"""
return False
def save_data(self, output_file):
"""
Save the UngriddedDataList to a file
:param output_file: output filename
:return:
"""
logging.info('Saving data to %s' % output_file)
# Should only write coordinates out once
write_coordinates(self[0], output_file)
for data in self:
add_data_to_file(data, output_file)
def get_non_masked_points(self):
"""
Returns a list containing a HyperPointViews for which the default iterator omits masked points, for each item in
this UngriddedDataList.
:return: List of HyperPointViews of the data points
"""
points_list = []
for data in self:
points_list.append(data.get_non_masked_points())
return points_list
def coord(self, *args, **kwargs):
"""
Call :func:`UnGriddedData.coord(*args, **kwargs)` for the first item of data (assumes all data in list has
same coordinates)
:param args:
:param kwargs:
:return:
"""
return self[0].coord(*args, **kwargs)
def copy(self):
"""
Create a copy of this UngriddedDataList with new data and coordinates
so that that they can be modified without held references being affected.
Will call any lazy loading methods in the data and coordinates
:return: Copied UngriddedData object
"""
output = UngriddedDataList()
for variable in self:
output.append(variable.copy())
return output
def as_data_frame(self, copy=True):
"""
Convert an UngriddedDataList object to a Pandas DataFrame. Note that UngriddedDataList objects are expected to
share coordinates, so only the coordinates from the first object in the list are used.
:param copy: Create a copy of the data for the new DataFrame? Default is True.
:return: A Pandas DataFrame representing the data and coordinates. Note that this won't include any metadata.
.. note::
This function will copy your data by default.
If you have a large array that cannot be copied,
make sure it is not masked and use copy=False.
"""
import numpy as np
df = self[0].as_data_frame(copy=copy)
for d in self[1:]:
try:
data = _to_flat_ndarray(d.data, copy)
except ValueError:
logging.warn("Copy created of MaskedArray for {} when creating Pandas DataFrame".format(d.name()))
data = _to_flat_ndarray(d.data, True)
df[d.name()] = data
return df
def subset(self, **kwargs):
from cis.subsetting.subset import subset, UngriddedSubsetConstraint
return subset(self, UngriddedSubsetConstraint, **kwargs)
def aggregate(self, how='', **kwargs):
"""
Aggregate the UngriddedDataList object based on the specified grids. The grid is defined by passing keyword
arguments for each dimension, each argument must be a slice, or have three entries (a maximum, a minimum and a
gridstep). The default aggregation method ('moments') returns the mean, standard deviation and number of points
as separate GriddedData objects (for each UngriddedData object in the list).
Datetime objects can be used to specify upper and lower datetime limits, or a
single PartialDateTime object can be used to specify a datetime range. The gridstep can be specified as a
DateTimeDelta object.
The keyword keys are used to find the relevant coordinate, they are looked for in order of name, standard_name,
axis and var_name.
For example:
data.aggregate(x=[-180, 180, 360], y=slice(-90, 90, 10))
or:
data.aggregate(how='mean', t=[PartialDateTime(2008,9), timedelta(days=1))
:param str how: The kernel to use in the aggregation (moments, mean, min, etc...)
:param kwargs: The grid specifications for each coordinate dimension
:return GriddedDataList:
"""
return _aggregate_ungridded(self, how, **kwargs)
def _coords_as_data_frame(coord_list, copy=True, time_index=True):
"""
Convert a CoordList object to a Pandas DataFrame.
:param copy: Create a copy of the data for the new DataFrame? Default is True.
:return: A Pandas DataFrame representing the data and coordinates. Note that this won't include any metadata.
"""
import pandas as pd
from cis.time_util import cis_standard_time_unit
from cf_units import Unit
columns = {}
time = None
for coord in coord_list.get_coords():
try:
data = _to_flat_ndarray(coord.data, copy)
except ValueError:
logging.warn("Copy created of MaskedArray for {} when creating Pandas DataFrame".format(coord.name()))
data = _to_flat_ndarray(coord.data, True)
if time_index and coord.standard_name == 'time':
if str(coord.units).lower() == 'datetime object':
time = data
elif isinstance(coord.units, Unit):
time = coord.units.num2date(data)
else:
time = cis_standard_time_unit.num2date(data)
else:
columns[coord.standard_name] = data
return pd.DataFrame(columns, index=time)
def _to_flat_ndarray(data, copy=True):
"""
Convert a (possibly masked) numpy array into its flat equivalent, with or without copying it.
:param data:
:param copy:
:return:
"""
import numpy as np
if isinstance(data, np.ma.MaskedArray):
if not copy:
raise ValueError("Masked arrays must always be copied.")
# We need to cast the array to a float so that we can fill the array with NaNs for Pandas (which would do the
# same trick itself anyway)
ndarr = data.astype(np.float64).filled(np.NaN).flatten()
elif copy:
ndarr = data.flatten()
else:
ndarr = data.ravel()
return ndarr
def _ungridded_sampled_from(sample, data, how='', kernel=None, missing_data_for_missing_sample=True, fill_value=None,
var_name='', var_long_name='', var_units='', **kwargs):
"""
Collocate the CommonData object with another CommonData object using the specified collocator and kernel
:param CommonData or CommonDataList data: The data to resample
:param str how: Collocation method (e.g. lin, nn, bin or box)
:param str or cis.collocation.col_framework.Kernel kernel:
:param bool missing_data_for_missing_sample: Should missing values in sample data be ignored for collocation?
:param float fill_value: Value to use for missing data
:param str var_name: The output variable name
:param str var_long_name: The output variable's long name
:param str var_units: The output variable's units
:return CommonData: The collocated dataset
"""
from cis.collocation import col_implementations as ci
from cis.data_io.gridded_data import GriddedData, GriddedDataList
from cis.collocation.col import collocate, get_kernel
if isinstance(data, UngriddedData) or isinstance(data, UngriddedDataList):
col = ci.GeneralUngriddedCollocator(fill_value=fill_value, var_name=var_name, var_long_name=var_long_name,
var_units=var_units,
missing_data_for_missing_sample=missing_data_for_missing_sample)
# Box is the default, and only option for ungridded -> ungridded collocation
if how not in ['', 'box']:
raise ValueError("Invalid method specified for ungridded -> ungridded collocation: " + how)
con = ci.SepConstraintKdtree(**kwargs)
# We can have any kernel, default to moments
kernel = get_kernel(kernel)
elif isinstance(data, GriddedData) or isinstance(data, GriddedDataList):
col = ci.GriddedUngriddedCollocator(fill_value=fill_value, var_name=var_name, var_long_name=var_long_name,
var_units=var_units,
missing_data_for_missing_sample=missing_data_for_missing_sample)
con = None
kernel = 'lin'
else:
raise ValueError("Invalid argument, data must be either GriddedData or UngriddedData")
return collocate(data, sample, col, con, kernel)
def _aggregate_ungridded(data, how, **kwargs):
"""
Aggregate an UngriddedData or UngriddedDataList based on the specified grids
:param UngriddedData or UngriddedDataList data: The data object to aggregate
:param cis.collocation.col_framework.Kernel kernel: The kernel to use in the aggregation
:param kwargs: The grid specifications for each coordinate dimension
:return:
"""
from cis.aggregation.ungridded_aggregator import UngriddedAggregator
from cis.collocation.col import get_kernel
from cis.time_util import PartialDateTime
from datetime import datetime, timedelta
from cis import __version__
kernel = get_kernel(how)
grid_spec = {}
for dim_name, grid in kwargs.items():
c = data._get_coord(dim_name)
if all(hasattr(grid, att) for att in ('start', 'stop', 'step')):
g = grid
elif len(grid) == 2 and isinstance(grid[0], PartialDateTime):
g = slice(grid[0].min(), grid[0].max(), grid[1])
elif len(grid) == 3:
g = slice(grid[0], grid[1], grid[2])
else:
raise ValueError("Invalid subset arguments: {}".format(grid))
# Fill in defaults
grid_start = g.start if g.start is not None else c.points.min()
if isinstance(grid_start, datetime):
grid_start = c.units.date2num(grid_start)
grid_end = g.stop if g.stop is not None else c.points.max()
if isinstance(grid_end, datetime):
grid_end = c.units.date2num(grid_end)
if g.step is None:
raise ValueError("Grid step must not be None")
else:
grid_step = g.step
if isinstance(grid_step, timedelta):
# Standard time is days since, so turn this into a fractional number of days
grid_step = grid_step.total_seconds() / (24*60*60)
grid_spec[c.name()] = slice(grid_start, grid_end, grid_step)
# We have to make the history before doing the aggregation as the grid dims get popped-off during the operation
history = "Aggregated using CIS version " + __version__ + \
"\n variables: " + str(getattr(data, "var_name", "Unknown")) + \
"\n from files: " + str(getattr(data, "filenames", "Unknown")) + \
"\n using new grid: " + str(grid_spec) + \
"\n with kernel: " + str(kernel) + "."
aggregator = UngriddedAggregator(grid_spec)
data = aggregator.aggregate(data, kernel)
data.add_history(history)
return data
| gpl-3.0 |
msimet/Stile | setup.py | 1 | 1343 | #!/usr/bin/env python
from setuptools import setup
from io import open
# read the contents of the README file
with open('README.md', encoding="utf-8") as f:
long_description = f.read()
setup(name='Stile',
version='0.1',
description='Stile: Systematics Tests in Lensing pipeline',
author='The Stile team',
install_requires=['numpy', 'treecorr', 'matplotlib',
'astropy<3;python_version<"3.0"',
'astropy;python_version>="3.0"'],
author_email='[email protected]',
url='https://github.com/msimet/Stile',
packages=['stile', 'stile.hsc'],
scripts=['bin/StileVisit.py', 'bin/StileVisitNoTract.py', 'bin/StileCCD.py',
'bin/StileCCDNoTract.py', 'bin/StilePatch.py', 'bin/StileTract.py'],
test_suite='nose.collector',
tests_require=['nose'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Physics'
]
)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-0.18.1/examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
wolverton-research-group/qmpy | qmpy/utils/rendering/text.py | 1 | 1813 | import logging
from .renderable import *
import qmpy
from . import point
logger = logging.getLogger(__name__)
class Text(Renderable):
def __init__(self, pt, text, **kwargs):
self.point = point.Point(pt)
self.text = text
self.options = {"ha": "left", "va": "top"}
# if self.point.coord[0] == 0:
# self.options['va'] = 'bottom'
# if self.point.coord[1] == 1:
# self.options['ha'] = 'right'
self.options.update(kwargs)
@property
def dim(self):
return self.point.dim
def draw_in_matplotlib(self, **kwargs):
if not kwargs.get("axes"):
axes = plt.gca()
else:
axes = kwargs["axes"]
if len(self.point.coord) == 2:
x, y = self.point.coord
axes.text(x, y, self.text)
elif len(self.point.coord) == 3:
x, y, z = self.point.coord
axes.text(x, y, z, self.text)
def get_flot_series(self, **kwargs):
cmd = "\no = plot.pointOffset({{ x: {x}, y: {y} }});".format(
x=self.point.coord[0], y=self.point.coord[1]
)
opts = {}
if self.options["va"] == "top":
opts["top"] = '"+( o.top )+"px'
elif self.options["va"] == "bottom":
opts["bottom"] = '"+( o.top )+"px'
if self.options["ha"] == "left":
opts["left"] = '"+( o.left )+"px'
elif self.options["ha"] == "right":
opts["right"] = '"+( o.left )+"px'
opts["position"] = "absolute"
opts = ";".join(["%s:%s" % (k, v) for k, v in list(opts.items())])
div = "<div style={options}>{string}</div>"
div = div.format(string=self.text, options=opts)
cmd += '\nplaceholder.append("{div}");'.format(div=div)
return cmd
| mit |
balavenkatesan/yellowbrick | yellowbrick/bestfit.py | 2 | 6232 | # yellowbrick.bestfit
# Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
#
# Author: Benjamin Bengfort <[email protected]>
# Created: Sun Jun 26 17:27:08 2016 -0400
#
# Copyright (C) 2016 District Data Labs
# For license information, see LICENSE.txt
#
# ID: bestfit.py [56236f3] [email protected] $
"""
Uses Scikit-Learn to compute a best fit function, then draws it in the plot.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from sklearn import linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.metrics import mean_squared_error as mse
from operator import itemgetter
from yellowbrick.style.palettes import LINE_COLOR
from yellowbrick.exceptions import YellowbrickValueError
##########################################################################
## Module Constants
##########################################################################
# Names of the various estimator functions
LINEAR = 'linear'
QUADRATIC = 'quadratic'
EXPONENTIAL = 'exponential'
LOG = 'log'
SELECT_BEST = 'select_best'
##########################################################################
## Draw Line of Best Fit
##########################################################################
def draw_best_fit(X, y, ax, estimator='linear', **kwargs):
"""
Uses Scikit-Learn to fit a model to X and y then uses the resulting model
to predict the curve based on the X values. This curve is drawn to the ax
(matplotlib axis) which must be passed as the third variable.
The estimator function can be one of the following:
'linear': Uses OLS to fit the regression
'quadratic': Uses OLS with Polynomial order 2
'exponential': Not implemented yet
'log': Not implemented yet
'select_best': Selects the best fit via MSE
The remaining keyword arguments are passed to ax.plot to define and
describe the line of best fit.
"""
# Estimators are the types of best fit lines that can be drawn.
estimators = {
LINEAR: fit_linear, # Uses OLS to fit the regression
QUADRATIC: fit_quadratic, # Uses OLS with Polynomial order 2
EXPONENTIAL: fit_exponential, # Not implemented yet
LOG: fit_log, # Not implemented yet
SELECT_BEST: fit_select_best, # Selects the best fit via MSE
}
# Check to make sure that a correct estimator value was passed in.
if estimator not in estimators:
raise YellowbrickValueError(
"'{}' not a valid type of estimator; choose from {}".format(
estimator, ", ".join(estimators.keys())
)
)
# Then collect the estimator function from the mapping.
estimator = estimators[estimator]
# Ensure that X and y are the same length
if len(X) != len(y):
raise YellowbrickValueError((
"X and y must have same length:"
" X len {} doesn't match y len {}!"
).format(len(X), len(y)))
# Ensure that X and y are np.arrays
X = np.array(X)
y = np.array(y)
# Verify that X is a two dimensional array for Scikit-Learn esitmators
# and that its dimensions are (n, 1) where n is the number of rows.
if X.ndim < 2:
X = X[:,np.newaxis] # Reshape X into the correct dimensions
if X.ndim > 2:
raise YellowbrickValueError(
"X must be a (1,) or (n,1) dimensional array not {}".format(x.shape)
)
# Verify that y is a (n,) dimensional array
if y.ndim > 1:
raise YellowbrickValueError(
"y must be a (1,) dimensional array not {}".format(y.shape)
)
# Uses the estimator to fit the data and get the model back.
model = estimator(X, y)
# Set the color if not passed in.
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['color'] = LINE_COLOR
# Plot line of best fit onto the axes that were passed in.
# TODO: determine if xlim or X.min(), X.max() are better params
xr = np.linspace(*ax.get_xlim(), num=100)
ax.plot(xr, model.predict(xr[:,np.newaxis]), **kwargs)
return ax
##########################################################################
## Estimator Functions
##########################################################################
def fit_select_best(X, y):
"""
Selects the best fit of the estimators already implemented by choosing the
model with the smallest mean square error metric for the trained values.
"""
models = [fit(X,y) for fit in [fit_linear, fit_quadratic]]
errors = map(lambda model: mse(y, model.predict(X)), models)
return min(zip(models, errors), key=itemgetter(1))[0]
def fit_linear(X, y):
"""
Uses OLS to fit the regression.
"""
model = linear_model.LinearRegression()
model.fit(X, y)
return model
def fit_quadratic(X, y):
"""
Uses OLS with Polynomial order 2.
"""
model = make_pipeline(
PolynomialFeatures(2), linear_model.LinearRegression()
)
model.fit(X, y)
return model
def fit_exponential(X, y):
"""
Fits an exponential curve to the data.
"""
raise NotImplementedError("Exponential best fit lines are not implemented")
def fit_log(X, y):
"""
Fit a logrithmic curve to the data.
"""
raise NotImplementedError("Logrithmic best fit lines are not implemented")
if __name__ == '__main__':
import os
import pandas as pd
import matplotlib.pyplot as plt
path = os.path.join(os.path.dirname(__file__), "..", "examples", "data", "concrete.xls")
if not os.path.exists(path):
raise Exception("Could not find path for testing")
xkey = 'Fine Aggregate (component 7)(kg in a m^3 mixture)'
ykey = 'Coarse Aggregate (component 6)(kg in a m^3 mixture)'
data = pd.read_excel(path)
fig, axe = plt.subplots()
axe.scatter(data[xkey], data[ykey])
draw_best_fit(data[xkey], data[ykey], axe, 'select_best')
plt.show()
| apache-2.0 |
Achuth17/scikit-learn | sklearn/datasets/tests/test_20news.py | 280 | 3045 | """Test the 20news downloader, if the data is available."""
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn import datasets
def test_20news():
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract a reduced dataset
data2cats = datasets.fetch_20newsgroups(
subset='all', categories=data.target_names[-1:-3:-1], shuffle=False)
# Check that the ordering of the target_names is the same
# as the ordering in the full dataset
assert_equal(data2cats.target_names,
data.target_names[-2:])
# Assert that we have only 0 and 1 as labels
assert_equal(np.unique(data2cats.target).tolist(), [0, 1])
# Check that the number of filenames is consistent with data/target
assert_equal(len(data2cats.filenames), len(data2cats.target))
assert_equal(len(data2cats.filenames), len(data2cats.data))
# Check that the first entry of the reduced dataset corresponds to
# the first entry of the corresponding category in the full dataset
entry1 = data2cats.data[0]
category = data2cats.target_names[data2cats.target[0]]
label = data.target_names.index(category)
entry2 = data.data[np.where(data.target == label)[0][0]]
assert_equal(entry1, entry2)
def test_20news_length_consistency():
"""Checks the length consistencies within the bunch
This is a non-regression test for a bug present in 0.16.1.
"""
try:
data = datasets.fetch_20newsgroups(
subset='all', download_if_missing=False, shuffle=False)
except IOError:
raise SkipTest("Download 20 newsgroups to run this test")
# Extract the full dataset
data = datasets.fetch_20newsgroups(subset='all')
assert_equal(len(data['data']), len(data.data))
assert_equal(len(data['target']), len(data.target))
assert_equal(len(data['filenames']), len(data.filenames))
def test_20news_vectorized():
# This test is slow.
raise SkipTest("Test too slow.")
bunch = datasets.fetch_20newsgroups_vectorized(subset="train")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314, 107428))
assert_equal(bunch.target.shape[0], 11314)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="test")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (7532, 107428))
assert_equal(bunch.target.shape[0], 7532)
assert_equal(bunch.data.dtype, np.float64)
bunch = datasets.fetch_20newsgroups_vectorized(subset="all")
assert_true(sp.isspmatrix_csr(bunch.data))
assert_equal(bunch.data.shape, (11314 + 7532, 107428))
assert_equal(bunch.target.shape[0], 11314 + 7532)
assert_equal(bunch.data.dtype, np.float64)
| bsd-3-clause |
MuhammadVT/davitpy | davitpy/pydarn/proc/fov/test_update_backscatter.py | 1 | 105835 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# test_update_backscatter.py, Angeline G. Burrell (AGB), UoL
#
# Comments: Scripts to create plots and calculate statistics that test
# the routines in update_backscatter that determine the origin
# field-of-view (FoV) for radar backscatter
#-----------------------------------------------------------------------------
"""test_update_backscatter
Scripts to create plots and calculate statistics that test the routines in
update_backscatter that determine the origin field-of-view (FoV) for radar
backscatter
Functions
-------------------------------------------------------------
add_colorbar add to existing figure
get_sorted_legend_labels sort by hop and region
get_fractional_hop_labels hop decimal to fraction
plot_yeoman_plate1 Yeoman(2001) based plot
plot_milan_figure9 Milan(1997) based plot
plot_storm_figures plot E-region scatter
plot_single_column plot subplots
load_test_beams data for a test period
plot_scan_and_beam plot attribute for front/rear FoV
plot_meteor_figure compare HWM14 with LoS velocity
plot_map plot a FoV map
-------------------------------------------------------------
Author: Angeline G. Burrell (AGB)
Date: August 12, 2015
Inst: University of Leicester (UoL)
"""
# Import python packages
import os
import numpy as np
import datetime as dt
import logging
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import mpl_toolkits.basemap as basemap
import matplotlib.gridspec as gridspec
import matplotlib.collections as mcol
import matplotlib.colors as mcolors
# Import DaViTpy packages
import update_backscatter as ub
#--------------------------------------------------------------------------
# Define the colors (can be overwritten)
clinear = "Spectral_r"
ccenter = "Spectral"
morder = {"region":{"D":0, "E":1, "F":2},
"reg":{"0.5D":0, "1.0D":1, "0.5E":2, "1.0E":3, "1.5E":4, "2.0E":5,
"2.5E":6, "3.0E":7, "0.5F":8, "1.0F":9, "1.5F":10, "2.0F":11,
"2.5F":12, "3.0F":13},
"hop":{0.5:0, 1.0:1, 1.5:2, 2.0:3, 2.5:4, 3.0:5},}
mc = {"region":{"D":"g", "E":"m", "F":"b"},
"reg":{"0.5D":"g", "1.0D":"y", "0.5E":"r", "1.0E":"m",
"1.5E":(1.0, 0.5, 1.0), "2.0E":(0.5, 0, 0.25),
"2.5E":(1.0,0.7,0.2), "3.0E":(0.5, 0, 0.1),
"0.5F":(0.0, 0.0, 0.75), "1.0F":(0.0, 0.5, 0.5), "1.5F":"b",
"2.0F":"c", "2.5F":(0.25, 0.75, 1.0), "3.0F":(0.0, 1.0, 1.0)},
"hop":{0.5:"b", 1.0:"r", 1.5:"c", 2.0:"m",
2.5:(0.25, 0.75, 1.0), 3.0:(0.5, 0, 0.25)},}
mm = {"region":{"D":"d", "E":"o", "F":"^", "all":"|"},
"reg":{"0.5D":"d", "1.0D":"Y", "0.5E":"^", "1.0E":"o", "1.5E":">",
"2.0E":"8", "2.5E":"*", "3.0E":"H", "0.5F":"v", "1.0F":"p",
"1.5F":"<", "2.0F":"h", "2.5F":"D", "3.0F":"s", "all":"|"},
"hop":{0.5:"d", 1.0:"o", 1.5:"s", 2.0:"^", 2.5:"v", 3.0:"p",
"all":"|"},}
#--------------------------------------------------------------------------
def add_colorbar(figure_handle, contour_handle, zmin, zmax, zinc=6, name=None,
units=None, orient="vertical", scale="linear", width=1,
loc=[0.9,0.1,0.03,0.8]):
"""Add a colorbar to an existing figure
Parameters
--------
figure_handle : (pointer)
handle to the figure
contour_handle : (pointer)
handle to contour plot
zmin : (float or int)
minimum z value
zmax : (float or int)
maximum z value
zinc : (float or int)
z tick incriment (default=6)
name : (str or NoneType)
z variable name (default=None)
units : (str or NoneType)
z variable units (default=None)
orient : (str)
bar orientation (horizontal, default=vertical)
scale : (str)
linear (default) or exponential
width : (float or int)
fraction of width (0-1), default=1
loc : (list)
location of colorbar (default=[0.95,0.1,0.03,0.8])
Returns
-------
ax2 : (pointer)
handle to the colorbar axis
cb : (pointer)
handle to the colorbar
"""
# Set the z range and output the colorbar
w = np.linspace(zmin, zmax, zinc, endpoint=True)
ax2 = figure_handle.add_axes(loc)
cb = plt.colorbar(contour_handle, cax=ax2, ticks=w, orientation=orient)
# See if the upper and lower limits are multiples of pi
if zmin % np.pi == 0 and zmax % np.pi == 0:
wfac = w / np.pi
w = list(w)
for i,wval in enumerate(wfac):
if wval == 0.0:
w[i] = "{:.0f}".format(wval)
elif wval == 1.0:
w[i] = "$\pi$"
elif wval == -1.0:
w[i] = "-$\pi$"
elif wval == int(wval):
w[i] = "{:.0f}$\pi$".format(wval)
else:
w[i] = "{:.2f}$\pi$".format(wval)
if orient is "vertical":
cb.ax.set_yticklabels(w)
else:
cb.ax.set_xticklabels(w)
# Change the z scale, if necessary
if(scale is "exponetial"):
cb.formatter=FormatStrFormatter('%7.2E')
# Set the label and update the ticks
if name is not None:
if units is not None:
cb.set_label(r'{:s} (${:s}$)'.format(name, units))
else:
cb.set_label(r'{:s}'.format(name))
cb.update_ticks()
# Return the handle for the colorbar (which is treated as a subplot)
# to allow for additional adjustments
return ax2, cb
#--------------------------------------------------------------------------
def get_sorted_legend_labels(ax, marker_key="reg"):
"""Sort legend labels by hop and region
Parameters
-----------
ax : (pointer)
handle to figure axis
marker_key : (str)
key to denote what type of marker labels are being used (default="reg")
Returns
---------
handles : (list)
ordered list of marker handles
labels : (list)
ordered list of marker labels
"""
handles, labels = ax.get_legend_handles_labels()
try:
lind = {morder[marker_key][ll]:il for il,ll in enumerate(labels)}
except:
lind = {morder[marker_key][float(ll)]:il for il,ll in enumerate(labels)}
order = [lind[k] for k in sorted(lind.keys())]
return [handles[i] for i in order], [labels[i] for i in order]
#--------------------------------------------------------------------------
def get_fractional_hop_labels(legend_labels):
"""Change decimal hop labels to traditional fractions
Parameters
-----------
legend_labels : (list)
List of strings containing decimal hops (eg ["0.5", "1.0", "1.5"])
Returns
--------
legend_labels : (list)
List of strings containing fracitonal hops
"""
for i,ll in enumerate(legend_labels):
ll = ll.replace("0.5", r"$\frac{1}{2}$")
ll = ll.replace(".5", r"$\frac{1}{2}$")
ll = ll.replace(".0", "")
legend_labels[i] = ll
return(legend_labels)
#--------------------------------------------------------------------------
def plot_yeoman_plate1(intensity_all="p_l", intensity_sep="fovelv",
marker_key="reg", color={"p_l":clinear,"fovelv":clinear},
acolor="0.9", stime=dt.datetime(1998,10,15,12,10),
etime=dt.datetime(1998,10,15,12,50),
imin={"p_l":0.0, "fovelv":0.0},
imax={"p_l":30.0, "fovelv":40.0},
iinc={"p_l":6, "fovelv":6}, ymin=20, ymax=60,
rad_bms={"han":5, "pyk":15},
rad_cp={"han":-6312, "pyk":-6312},
fix_gs={"han":[[0,76]]}, figname=None, password=True,
file_type="fitacf", logfile=None, log_level=logging.WARN,
min_pnts=3, region_hmin={"D":75.0,"E":115.0,"F":150.0},
region_hmax={"D":115.0,"E":150.0,"F":900.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0), tdiff=list(),
tdiff_e=list(), tdiff_time=list(), ptest=True, step=6,
strict_gs=True, draw=True, label_type="frac",
beams=dict()):
"""Plot based on Plate 1 in Yeoman et al (2001) Radio Science, 36, 801-813.
Parameters
-----------
intensity_all : (str)
Intensity attribute to plot in the top figure with unseperated fields-
of-view. Uses SuperDARN beam fit attribute names. (default="p_l")
intensity_sep : (str)
Intensity attribute to plot in the separated fields-of-view. Uses
SuperDARN beam fit attribute names. (default="fovelv")
marker_key : (str)
key to denote what type of marker labels are being used (default="reg")
color : (dict)
Intensity color scheme. Defaults to the standard centered color
scheme for this program. Yeoman et al (2001) used "jet".
(default={"p_l":"Spectral_r","fovelv":"Spectral_r"})
acolor : (str or tuple)
Background color for subplots (default="0.9" - light grey)
stime : (dt.datetime)
Starting time of plot (will pad loaded data).
(default=dt.datetime(1998,10,15,12,10))
etime : (dt.datetime)
Ending time of plot (will pad loaded data).
(default=dt.datetime(1998,10,15,12,50))
imin : (dict)
Intensity minimums (default={"p_l":0.0, "fovelv":0.0})
imax : (dict)
Intensity maximums (default={"p_l":30.0, "fovelv":40.0})
iinc : (dict)
Intensity tick incriments (default={"p_l":6, "fovelv":6})
ymin : (int or float)
Lowest plotted range gate (default=20)
ymax : (int or float)
Highest plotted range gate (default=60)
rad_bms : (dict)
Dictionary with radar code names as keys and the beam to process
as the value. (default={"han":5,"pyk":15})
rad_cp : (dict)
Dictionary with radar program mode to load
(default={"han":-6312, "pyk":-6312})
fix_gs : (dict)
Dictionary with radar code names as keys and min/max range gate pairs
in a list, specifying the ranges where groundscatter should be flagged
as ionospheric backscatter (heater backscatter behaves slightly
differenntly than normal ionospheric backscatter).
(default={"han":[[20,40]]})
figname : (str or NoneType)
Figure name or None if no figure is to be saved (default=None)
password : (boolian or str)
When downloading data from your specified SuperDARN mirror site, a
password may be needed. It may be included here or, if True is used,
a prompt will appear for you to enter it securely. (default=True)
file_type : (str)
Type of data file to download (default="fitacf")
logfile : (str or NoneType)
Name of file to hold the log output or None for stdout. (default=None)
log_level : (int)
Level of output to report. Flag values explained in logging module.
(default=logging.WARNING)
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_rg : (list)
Maximum range gate to use each range gate and virtual height at
(default=[5,25,40,76])
max_hop : (float)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class dt.timedelta)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (list)
A list of tdiff values (in microsec) or an empty list (to use the
hardware value) (default=list())
tdiff_e : (list)
A list containing the tdiff error (in microsec) or an empty list
(no elevation/virtual height error will be computed). (default=list())
tdiff_time : (list)
A list containing the starting time (datetimes) for each tdiff.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
step : (int)
Level of processing to perform (1-6). 6 performs all steps. (default=6)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=True)
draw : (boolian)
Output figure to display? (default=True)
label_type : (str)
Type of hop label to use (frac/decimal) (default="frac")
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots. Will create this data
if it is not provided (default=dict())
Returns
---------
f : (pointer)
Figure handle
ax : (dict)
Dictionary of axis handles
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots
"""
import davitpy.pydarn.radar as pyrad
rn = "plot_yeoman_plate1"
# Load and process the desired data
dout = load_test_beams(intensity_all, intensity_sep, stime, etime,
rad_bms, rad_cp, fix_gs=fix_gs,
marker_key=marker_key, password=password,
file_type=file_type, logfile=logfile,
log_level=log_level, min_pnts=min_pnts,
region_hmax=region_hmax, region_hmin=region_hmin,
rg_box=rg_box, vh_box=vh_box, max_rg=max_rg,
max_hop=max_hop, ut_box=ut_box, tdiff=tdiff,
tdiff_e=tdiff_e, tdiff_time=tdiff_time, ptest=ptest,
step=step, strict_gs=strict_gs, beams=beams)
rad = rad_bms.keys()[0]
if not dout[0].has_key(rad) or len(dout[0][rad]) == 0:
return(dout[0], dout[1], dout[2], beams)
# Recast the data as numpy arrays
xtime = {rad:np.array(dout[0][rad]) for rad in rad_bms.keys()}
yrange = {rad:np.array(dout[1][rad]) for rad in rad_bms.keys()}
zdata = {rad:{ii:np.array(dout[2][ii][rad]) for ii in dout[2].keys()}
for rad in rad_bms.keys()}
zi = {rad:{ff:{hh:dout[3][ff][rad][hh] for hh in dout[3][ff][rad].keys()}
for ff in dout[3].keys()} for rad in rad_bms.keys()}
# Initialize the figure
iax = {ff:i for i,ff in enumerate(["all",1,-1,0])}
irad = {rad:i for i,rad in enumerate(rad_bms.keys())}
f = plt.figure(figsize=(12,10))
ax = {rad:{ff:f.add_subplot(4,2,2*(iax[ff]+1)-irad[rad])
for ff in iax.keys()} for rad in irad.keys()}
pos = {ff:[.91,.14+(3-iax[ff])*.209,.01,.183] for ff in iax.keys()}
xpos = {13:2.3, 12:2.2, 11:2.1, 10:1.95, 9:1.9, 8:1.8, 7:1.7, 6:1.6, 5:1.5,
4:1.4, 3:1.3, 2:1.2, 1:1.1}
ylabel = {"all":"Range Gate",1:"Front\nRange Gate",-1:"Rear\nRange Gate",
0:"Unassigned\nRange Gate"}
cb = dict()
handles = list()
labels = list()
# Cycle through each plot, adding the appropriate data
for rad in irad.keys():
# Cycle through the field-of-view keys
for ff in ax[rad].keys():
# Add a background color to the subplot
ax[rad][ff].set_axis_bgcolor(acolor)
# Plot the data
if ff is "all":
zz = zi[rad][ff]['all']
ii = intensity_all
con = ax[rad][ff].scatter(xtime[rad][zz], yrange[rad][zz],
c=zdata[rad][ii][zz],
cmap=cm.get_cmap(color[ii]),
vmin=imin[ii], vmax=imax[ii], s=20,
edgecolor="face", linewidth=2.0,
marker=mm[marker_key][ff])
else:
ii = intensity_sep
for hh in mc[marker_key].keys():
zz = zi[rad][ff][hh]
try:
label = "{:.1f}".format(hh)
except:
label = "{:}".format(hh)
if intensity_sep is "hop":
ax[rad][ff].plot(xtime[rad][zz], yrange[rad][zz], ms=8,
edgecolor="face",
marker=mm[marker_key][hh],
color=mc[marker_key][hh], label=label)
elif len(zz) > 0:
con = ax[rad][ff].scatter(xtime[rad][zz],
yrange[rad][zz],
c=zdata[rad][ii][zz],
cmap=cm.get_cmap(color[ii]),
vmin=imin[ii], vmax=imax[ii],
s=20, edgecolor="face",
marker=mm[marker_key][hh],
label=label)
# Format the axes; add titles, colorbars, and labels
ax[rad][ff].set_xlim(mdates.date2num(stime),
mdates.date2num(etime))
ax[rad][ff].set_ylim(ymin-1,ymax+2)
ax[rad][ff].xaxis.set_major_locator( \
mdates.MinuteLocator(interval=10))
ax[rad][ff].yaxis.set_major_locator(ticker.MultipleLocator(10))
if irad[rad] == 1:
ax[rad][ff].set_ylabel(ylabel[ff])
if irad[rad] == 0:
tfmt = ticker.FormatStrFormatter("")
ax[rad][ff].yaxis.set_major_formatter(tfmt)
if ii is not "hop":
label = pyrad.radUtils.getParamDict(ii)['label']
unit = pyrad.radUtils.getParamDict(ii)['unit']
if not iinc.has_key(ii):
iinc[ii] = 6
cb[ff] = add_colorbar(f, con, imin[ii], imax[ii], iinc[ii],
label, unit, loc=pos[ff])
if ff is "all":
ax[rad][ff].set_title("{:s} Beam {:d}".format(rad.upper(),
rad_bms[rad]),
fontsize="medium")
if iax[ff] == 3:
tfmt = mdates.DateFormatter("%H:%M")
ax[rad][ff].set_xlabel("Universal Time (HH:MM)")
# Add legend labels and handles
hl, ll = get_sorted_legend_labels(ax[rad][ff], marker_key)
if len(handles) == 0:
handles = hl
labels = ll
else:
for il,label in enumerate(ll):
try:
labels.index(label)
except:
# This label is not currently available, add it
lind = morder[marker_key][label]
jl = 0
while lind > morder[marker_key][labels[jl]]:
jl += 1
labels.insert(jl, label)
handles.insert(jl, hl[il])
else:
tfmt = mdates.DateFormatter("")
ax[rad][ff].xaxis.set_major_formatter(tfmt)
if label_type.find("frac") >= 0:
flabels = get_fractional_hop_labels(labels)
ax[rad][0].legend(handles, flabels, fontsize="medium", scatterpoints=1,
ncol=len(flabels), title="Hop", labelspacing=.1,
columnspacing=0, markerscale=2, borderpad=0.3,
handletextpad=0, bbox_to_anchor=(xpos[len(flabels)],-0.3))
plt.subplots_adjust(hspace=.15, wspace=.15, bottom=.14, left=.1, top=.95)
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
if figname is not None:
f.savefig(figname)
return(f, ax, beams)
#-------------------------------------------------------------------------
def plot_milan_figure9(intensity_all="p_l", intensity_sep="p_l",
marker_key="reg", color={"p_l":clinear, "p_l":clinear},
acolor="0.9", stime=dt.datetime(1995,12,14,5),
etime=dt.datetime(1995,12,14,16), imin={"p_l":0.0},
imax={"p_l":30.0}, rad="han", bmnum=2, cp=127,
figname=None, password=True, file_type="fitacf",
logfile=None, log_level=logging.WARN, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0), tdiff=list(),
tdiff_e=list(), tdiff_time=list(), ptest=True, step=6,
strict_gs=True, draw=True, label_type="frac",
beams=dict()):
"""Plot based on Figure 9 in Milan et al (1997) Annales Geophysicae, 15,
29-39.
Parameters
-----------
intensity_all : (str)
Intensity attribute to plot in the top figure with unseperated fields-
of-view. Uses SuperDARN beam fit attribute names. (default="p_l")
intensity_sep : (str)
Intensity attribute to plot in the separated fields-of-view. Uses
SuperDARN beam fit attribute names. (default="fovelv")
marker_key : (str)
key to denote what type of marker labels are being used (default="reg")
color : (dict of str)
Intensity color scheme. Defaults to the standard centered color
scheme for this program. Yeoman et al (2001) used "jet".
acolor : (str or tuple)
Background color for subplots (default="0.9" - light grey)
stime : (dt.datetime)
Starting time of plot (will pad loaded data).
(default=dt.datetime(1998,10,15,12,10))
etime : (dt.datetime)
Ending time of plot (will pad loaded data).
(default=dt.datetime(1998,10,15,12,50))
imin : (dict)
Intensity minimums (default={"p_l":0.0})
imax : (dict)
Intensity maximums (default={"p_l":30.0})
rad : (str)
radar code (default="han")
bmnum : (int)
Beam number (default=2)
cp : (int)
Radar programming code number. (default=127)
figname : (str or NoneType)
Figure name or None if no figure is to be saved (default=None)
password : (boolian or str)
When downloading data from your specified SuperDARN mirror site, a
password may be needed. It may be included here or, if True is used,
a prompt will appear for you to enter it securely. (default=True)
file_type : (str)
Type of data file to download (default="fitacf")
logfile : (str or NoneType)
Name of file to hold the log output or None for stdout. (default=None)
log_level : (int)
Level of output to report. Flag values explained in logging module.
(default=logging.WARNING)
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_rg : (list)
Maximum range gate to use each range gate and virtual height at
(default=[5,25,40,76])
max_hop : (float)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class dt.timedelta)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (list)
A list of tdiff values (in microsec) or an empty list (to use the
hardware value) (default=list())
tdiff_e : (list)
A list containing the tdiff error (in microsec) or an empty list
(no elevation/virtual height error will be computed). (default=list())
tdiff_time : (list)
A list containing the starting time (datetimes) for each tdiff.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
step : (int)
Level of processing to perform (1-6). 6 performs all steps. (default=6)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=True)
draw : (boolian)
Output figure to display? (default=True)
label_type : (str)
Type of hop label to use (frac/decimal) (default="frac")
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots. Will create this data
if it is not provided (default=dict())
Returns
---------
f : (pointer)
Figure handle
ax : (dict)
Dictionary of axis handles
cb : (dict)
Dictionary of colorbar handles
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots
"""
# Load and process the desired data
dout = load_test_beams(intensity_all, intensity_sep, stime, etime,
{rad:bmnum}, {rad:cp}, marker_key=marker_key,
password=password, file_type=file_type,
logfile=logfile,log_level=log_level,
min_pnts=min_pnts, region_hmax=region_hmax,
region_hmin=region_hmin, rg_box=rg_box,
vh_box=vh_box, max_rg=max_rg, max_hop=max_hop,
ut_box=ut_box, tdiff=tdiff, tdiff_e=tdiff_e,
tdiff_time=tdiff_time, ptest=ptest, step=step,
strict_gs=strict_gs, beams=beams)
if not dout[0].has_key(rad) or len(dout[0][rad]) == 0:
logging.error("can't find radar [" + rad + "] in data:" + dout[0].keys())
return(dout[0], dout[1], dout[2], beams)
# Recast the data as numpy arrays
xtime = np.array(dout[0][rad])
yrange = np.array(dout[1][rad])
zdata = {ff:np.array(dout[2][ff][rad]) for ff in dout[2].keys()}
zi = {ff:{hh:dout[3][ff][rad][hh] for hh in dout[3][ff][rad].keys()}
for ff in dout[3].keys()}
# Initialize the figure
f = plt.figure(figsize=(7,10))
if cp is not None:
ftitle = "{:s} Beam {:d} CP {:d} on {:}".format(rad.upper(), bmnum, cp,
stime.date())
else:
ftitle = "{:s} Beam {:d} on {:}".format(rad.upper(), bmnum,
stime.date())
ax, cb = plot_single_column(f, xtime, yrange, zdata, zi,
{"all":intensity_all, "sep":intensity_sep},
color, marker_key=marker_key,
xmin=mdates.date2num(stime),
xmax=mdates.date2num(etime), ymin=-1, ymax=76,
zmin=imin, zmax=imax,
xfmt=mdates.DateFormatter("%H:%M"), yfmt=None,
xinc=mdates.HourLocator(interval=2),
yinc=ticker.MultipleLocator(15),
xlabel="Universal Time (HH:MM)",
ylabels=["Range Gate","Front\nRange Gate",
"Rear\nRange Gate",
"Unassigned\nRange Gate"],
titles=["","","",""], plot_title=ftitle,
label_type=label_type, acolor=acolor,
draw=False)
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
if figname is not None:
f.savefig(figname)
return(f, ax, cb, beams)
#-------------------------------------------------------------------------
def plot_storm_figures(intensity_all="v", intensity_sep="v", marker_key="reg",
color={"v":ccenter}, acolor="0.9",
stime=dt.datetime(1997,10,10,15),
etime=dt.datetime(1997,10,10,20),
mtimes=[dt.datetime(1997,10,10,16),
dt.datetime(1997,10,10,17,30),
dt.datetime(1997,10,10,18,30),
dt.datetime(1997,10,10,19,30)],
imin={"v":-500.0}, imax={"v":500.0}, zinc={"v":5.0},
rad="pyk", bmnum=0, cp=None, figname_time=None,
figname_maps=None, password=True, file_type="fitacf",
logfile=None, log_level=logging.WARN, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0), tdiff=list(),
tdiff_e=list(), tdiff_time=list(), ptest=True, step=6,
strict_gs=True, draw=True, label_type="frac",
beams=dict()):
"""Plot showing a period of time where E-region scatter past over the
radar at pyk.
Parameters
-----------
intensity_all : (str)
Intensity attribute to plot in the top figure with unseperated fields-
of-view. Uses SuperDARN beam fit attribute names. (default="v")
intensity_sep : (str)
Intensity attribute to plot in the separated fields-of-view. Uses
SuperDARN beam fit attribute names. (default="fovelv")
marker_key : (str)
key to denote what type of marker labels are being used (default="reg")
color : (dict of str)
Intensity color scheme. Defaults to the standard centered color
scheme for this program.
color : (dict of str)
Intensity color scheme. Defaults to the standard centered color
scheme for this program.
acolor : (str or tuple)
Background color for subplots (default="0.9" - light grey)
stime : (dt.datetime)
Starting time of plot (will pad loaded data).
(default=dt.datetime(1997,10,10,15))
etime : (dt.datetime)
Ending time of plot (will pad loaded data).
(default=dt.datetime(1997,10,10,20))
mtimes : (list of dt.datetimes)
Times to plot maps, unless no times are provided.
(default=[dt.datetime(1997,10,10,16), dt.datetime(1997,10,10,17,30),
dt.datetime(1997,10,10,18,30), dt.datetime(1997,10,10,19,30)])
imin : (dict)
Dictionary of intensity minimums (default={"v":-500.0})
imax : (dict)
Dictionary of intensity maximums (default={"v":500.0})
zinc : (dict)
Dictionary of intensity colorbar tick incriments (default={"v":5.0})
rad : (str)
radar code (default="pyk")
bmnum : (int)
Beam number (default=0)
cp : (int)
Radar programming code number. (default=127)
figname_time : (str or NoneType)
Figure name or None if the time figure is not to be saved (default=None)
figname_maps : (str or NoneType)
Figure name or None if the map figure is not to be saved (default=None)
password : (boolian or str)
When downloading data from your specified SuperDARN mirror site, a
password may be needed. It may be included here or, if True is used,
a prompt will appear for you to enter it securely. (default=True)
file_type : (str)
Type of data file to download (default="fitacf")
logfile : (str or NoneType)
Name of file to hold the log output or None for stdout. (default=None)
log_level : (int)
Level of output to report. Flag values explained in logging module.
(default=logging.WARNING)
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_rg : (list)
Maximum range gate to use each range gate and virtual height at
(default=[5,25,40,76])
max_hop : (list of floats)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class dt.timedelta)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (list)
A list of tdiff values (in microsec) or an empty list (to use the
hardware value) (default=list())
tdiff_e : (list)
A list containing the tdiff error (in microsec) or an empty list
(no elevation/virtual height error will be computed). (default=list())
tdiff_time : (list)
A list containing the starting time (datetimes) for each tdiff.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
step : (int)
Level of processing to perform (1-6). 6 performs all steps. (default=6)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=True)
draw : (boolian)
Output figure to display? (default=True)
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots. Will create this data
if it is not provided (default=dict())
Returns
---------
f : (pointer)
Figure handle
ax : (dict)
Dictionary of axis handles
cb : (dict)
Dictionary of colorbar handles
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots
"""
import davitpy.pydarn.radar as pyrad
# Load and process the desired data
dout = load_test_beams(intensity_all, intensity_sep, stime, etime,
{rad:bmnum}, {rad:None}, marker_key=marker_key,
password=password, file_type=file_type,
logfile=logfile, log_level=log_level,
min_pnts=min_pnts, region_hmax=region_hmax,
region_hmin=region_hmin, rg_box=rg_box,
vh_box=vh_box, max_rg=max_rg, max_hop=max_hop,
ut_box=ut_box, tdiff=tdiff, tdiff_e=tdiff_e,
tdiff_time=tdiff_time, ptest=ptest, step=step,
strict_gs=strict_gs, beams=beams)
if not dout[0].has_key(rad) or len(dout[0][rad]) == 0:
return(dout[0], dout[1], dout[2], beams)
# Recast the data as numpy arrays
xtime = np.array(dout[0][rad])
yrange = np.array(dout[1][rad])
zdata = {ff:np.array(dout[2][ff][rad]) for ff in dout[2].keys()}
zi = {ff:{hh:dout[3][ff][rad][hh] for hh in dout[3][ff][rad].keys()}
for ff in dout[3].keys()}
# Initialize the time figure
f = plt.figure(figsize=(7,10))
if cp is not None:
ftitle = "{:s} Beam {:d} CP {:d} on {:}".format(rad.upper(), bmnum, cp,
stime.date())
else:
ftitle = "{:s} Beam {:d} on {:}".format(rad.upper(), bmnum,
stime.date())
ax, cb = plot_single_column(f, xtime, yrange, zdata, zi,
{"all":intensity_all, "sep":intensity_sep},
color, marker_key=marker_key,
xmin=mdates.date2num(stime),
xmax=mdates.date2num(etime), ymin=-1,
ymax=76, zmin=imin, zmax=imax, zinc=zinc,
xfmt=mdates.DateFormatter("%H:%M"), yfmt=None,
xinc=mdates.HourLocator(interval=2),
yinc=ticker.MultipleLocator(15),
xlabel="Universal Time (HH:MM)",
ylabels=["Range Gate","Front\nRange Gate",
"Rear\nRange Gate",
"Unassigned\nRange Gate"],
titles=["","","",""], plot_title=ftitle,
label_type=label_type, acolor=acolor,
draw=False)
mlen = len(mtimes)
if mlen > 0:
# Add lines corresponding to map times to each line of the plot
for mt in mtimes:
for a in ax.values():
a.plot([mt, mt], [-1, 76], "k--")
# Initialize the map figure
fmap = plt.figure(figsize=(12, 3 * mlen))
nrows = int(np.ceil(0.5*mlen))
axmap = [fmap.add_subplot(2, nrows, ia+1) for ia in range(mlen)]
hard = None
fovs = {1:None, -1:None}
mm = None
for ia,mt in enumerate(sorted(mtimes)):
scan = list()
for k in beams[rad].keys():
j = 0
while j < len(beams[rad][k]):
if beams[rad][k][j].scan_time > mt:
break
elif beams[rad][k][j].scan_time == mt:
scan.append(beams[rad][k][j])
j += 1
llab = True if ia % 2 == 0 else False
axmap[ia].set_axis_bgcolor(acolor)
mm, fovs, hard, con = plot_map(axmap[ia], scan, hard=hard,
map_handle=mm, fovs=fovs,
plot_beams={1:[bmnum],-1:[bmnum]},
color_beams={1:["0.6"],-1:["0.6"]},
maxgates=45, dat_attr=intensity_all,
fov_attr="fovflg",
dmax=imax[intensity_all],
dmin=imin[intensity_all],
dcolor=color[intensity_all],
lat_label=llab, draw=False)
label = pyrad.radUtils.getParamDict(intensity_all)['label']
unit = pyrad.radUtils.getParamDict(intensity_all)['unit']
cbmap = add_colorbar(fmap, con, imin[intensity_all],
imax[intensity_all], zinc[intensity_all],
label, unit, loc=[0.91,.1,.01,.8])
plt.subplots_adjust(wspace=.05)
else:
fmap = None
axmap = None
cbmap = None
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
# Save the figures, if desired
if figname_time is not None:
f.savefig(figname_time)
if figname_maps is not None and fmap is not None:
fmap.savefig(figname_maps)
# Return figure, axis, and colorbar handles, as well as beams
return({"time":f, "maps":fmap}, {"time":ax, "maps":axmap},
{"time":cb, "maps":cbmap}, beams)
#-------------------------------------------------------------------------
def plot_single_column(f, xdata, ydata, zdata, zindices, zname, color,
marker_key="reg", xmin=None, xmax=None, ymin=None,
ymax=None, zmin=None, zmax=None, xfmt=None, yfmt=None,
xinc=None, yinc=None, zinc=dict(), xlabel="",
ylabels=["All","Front","Rear","Unassigned"],
titles=["","","",""], plot_title="", label_type="frac",
acolor="w", draw=True):
"""Plot single column of subplots with all data in the first row using one
type of data in the z-axis, and a second type of data in the z-axis for the
final rows, which plot only data belonging to the front, rear, and no field-
of-view
Parameters
-----------
f : (pointer)
Figure handle
xdata : (numpy array)
x data (typically time)
ydata : (numpy array)
y data (typically time)
zdata : (dict of numpy arrays)
Dictionary of numpy arrays containing z data
zindices : (dict of dicts/lists)
Dictionary of dictionaries or lists containing z indices
zname : (dict of str)
Dictionary of key names for data. Keys are "all" and "sep".
color : (dict of str)
Intensity color scheme. Defaults to the standard linear color
scheme for this program.
marker_key : (str)
key to denote what type of marker labels are being used (default="reg")
xmin : (float)
Minimum x value to plot (default=None)
xmax : (float)
Maximum x value to plot (default=None)
ymin : (float)
Minimum y value to plot (default=None)
ymax : (float)
Maximum y value to plot (default=None)
zmin : (dict of float)
Dicitonary of minimum z values to plot (default=None)
zmax : (dict of float)
Dicitonary of maximum z values to plot (default=None)
xfmt : (class)
x-axis formatting class (default=None)
yfmt : (class)
y-axis formatting class (default=None)
xinc : (float)
x-axis tick incriment (default=None)
yinc : (float)
y-axis tick incriment (default=None)
zinc : (dict of int)
Dictionary of z colorbar tick incriments (default=dict())
xlabel : (str)
x axis label (default="")
ylabels : (list of str)
list of y axis labels (default=["All","Front","Rear","Unassigned"])
titles : (list of str)
list of subplot titles (default=["","","",""])
plot_title : (str)
Plot title (default="")
label_type : (str)
Type of hop label to use (frac/decimal) (default="frac")
acolor : (str or tuple)
Background color for subplots (default="0.9" - light grey)
draw : (boolian)
Output figure to display? (default=True)
Returns
---------
f : (pointer)
Figure handle
ax : (dict)
Dictionary of axis handles
"""
import davitpy.pydarn.radar as pyrad
# Initialize the subplots
xpos = {7:1.1, 6:1.0, 5:0.9, 4:0.8, 3:0.7, 2:0.5, 1:0.0}
iax = {ff:i for i,ff in enumerate(["all",1,-1,0])}
ax = {ff:f.add_subplot(4,1,iax[ff]+1) for ff in iax.keys()}
ypos = 0.89
for zz in zmax.keys():
if abs(zmin[zz]) > 100.0 or abs(zmax[zz]) > 100.0:
ypos = 0.85
pos = {ff:[ypos,.14+(3-iax[ff])*.209,.01,.184] for ff in iax.keys()}
cb = dict()
hops = list()
for ff in zindices.keys():
hops.extend([hh for hh in zindices[ff].keys()
if len(zindices[ff][hh]) > 0 and hh != "all"])
hops = list(set(hops))
handles = list()
labels = list()
# Cycle through the field-of-view keys
for ff in iax.keys():
# Set the plot background color
ax[ff].set_axis_bgcolor(acolor)
# Plot the data
if ff is "all":
ii = zname[ff]
zz = zindices[ff]['all']
cmap = cm.get_cmap(color[ii]) if isinstance(color[ii],
str) else color[ii]
con = ax[ff].scatter(xdata[zz], ydata[zz], c=zdata[ii][zz],
cmap=cmap, vmin=zmin[ii],
vmax=zmax[ii], s=20, edgecolor="face",
marker=mm[marker_key][ff], linewidth=2.5)
else:
ii = zname['sep']
for hh in hops:
zz = zindices[ff][hh]
ll = hh if isinstance(hh, str) else "{:.1f}".format(hh)
if ii is 'hop' or ii is 'reg':
ax[ff].plot(xdata[zz], ydata[zz], mm[marker_key][hh], ms=5,
markeredgecolor="face",
color=mc[marker_key][hh], label=ll)
elif len(zz) > 0:
if isinstance(color[ii], str):
cmap = cm.get_cmap(color[ii])
else:
cmap = color[ii]
con = ax[ff].scatter(xdata[zz], ydata[zz], c=zdata[ii][zz],
cmap=cmap, vmin=zmin[ii],
vmax=zmax[ii], s=20, edgecolor="none",
marker=mm[marker_key][hh], label=ll)
# Save legend handles
hl, ll = get_sorted_legend_labels(ax[ff], marker_key)
for il,label in enumerate(ll):
try:
labels.index(label)
except:
# This label is not currently available, add it
lind = morder[marker_key][label]
jl = 0
try:
while lind > morder[marker_key][labels[jl]]:
jl += 1
except:
pass
labels.insert(jl, label)
handles.insert(jl, hl[il])
# Format the axes; add titles, colorbars, and labels
if xmin is not None and xmax is not None:
ax[ff].set_xlim(xmin, xmax)
if ymin is not None and ymax is not None:
ax[ff].set_ylim(ymin, ymax)
if xinc is not None:
ax[ff].xaxis.set_major_locator(xinc)
if yinc is not None:
ax[ff].yaxis.set_major_locator(yinc)
if iax[ff] == 3:
ax[ff].set_xlabel(xlabel)
if xfmt is not None:
ax[ff].xaxis.set_major_formatter(xfmt)
if label_type.find("frac") >= 0:
flabels = get_fractional_hop_labels(labels)
ax[ff].legend(handles, flabels, fontsize="medium",
scatterpoints=1, ncol=len(hops), title="Hop",
labelspacing=.1, columnspacing=0, markerscale=2,
borderpad=0.3, handletextpad=0,
bbox_to_anchor=(xpos[len(hops)],-0.275))
else:
ax[ff].xaxis.set_major_formatter(ticker.FormatStrFormatter(""))
ax[ff].set_ylabel(ylabels[iax[ff]])
if yfmt is not None:
ax[ff].yaxis.set_major_formatter(yfmt)
if titles[iax[ff]] is not None:
ax[ff].set_title(titles[iax[ff]], fontsize="medium")
if ii is not "hop" and ii is not "reg":
label = pyrad.radUtils.getParamDict(ii)['label']
unit = pyrad.radUtils.getParamDict(ii)['unit']
if not zinc.has_key(ii):
zinc[ii] = 6
cb[ff] = add_colorbar(f, con, zmin[ii], zmax[ii], zinc[ii], label,
unit, loc=pos[ff])
if plot_title is not None:
f.suptitle(plot_title)
plt.subplots_adjust(hspace=.15, wspace=.15, bottom=.14, left=.15,
right=ypos-.04, top=.95)
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
return ax, cb
#-----------------------------------------------------------------------
def load_test_beams(intensity_all, intensity_sep, stime, etime, rad_bms,
rad_cp, fix_gs=dict(), marker_key="hop", password=True,
file_type="fitacf", logfile=None, log_level=logging.WARN,
min_pnts=3, region_hmin={"D":75.0,"E":115.0,"F":150.0},
region_hmax={"D":115.0,"E":150.0,"F":900.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0),tdiff=list(),
tdiff_e=list(), tdiff_time=list(), ptest=True, step=6,
strict_gs=True, beams=dict()):
"""Load data for a test period, updating the beams to include origin field-
of-view data and returning dictionaries of lists with time, range,
and intensity data for a specified radar/beam combination.
Parameters
-----------
intensity_all : (str)
Intensity attribute to plot in the top figure with unseperated fields-
of-view. Uses SuperDARN beam fit attribute names.
intensity_sep : (str)
Intensity attribute to plot in the separated fields-of-view. Uses
SuperDARN beam fit attribute names.
stime : (dt.datetime)
Starting time of plot (will pad loaded data).
etime : (dt.datetime)
Ending time of plot (will pad loaded data).
rad_bms : (dict)
Dictionary with radar code names as keys and the beam to process
as the value. (example={"han":5,"pyk":15})
fix_gs : (dict)
Dictionary with radar code names as keys and min/max range gate pairs
in a list, specifying the ranges where groundscatter should be flagged
as ionospheric backscatter (heater backscatter behaves slightly
differenntly than normal ionospheric backscatter).
(example={"han":[[20,40]]})
marker_key : (str)
key to denote what type of marker labels are being used (default="reg")
password : (boolian or str)
When downloading data from your specified SuperDARN mirror site, a
password may be needed. It may be included here or, if True is used,
a prompt will appear for you to enter it securely. (default=True)
file_type : (str)
Type of data file to download (default="fitacf")
logfile : (str or NoneType)
Name of file to hold the log output or None for stdout. (default=None)
log_level : (int)
Level of output to report. Flag values explained in logging module.
(default=logging.WARNING)
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":115.0,"E":150.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":115.0,"F":150.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_rg : (list)
Maximum range gate to use each range gate and virtual height at
(default=[5,25,40,76])
max_hop : (float)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class dt.timedelta)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (list)
A list of tdiff values (in microsec) or an empty list (to use the
hardware value) (default=list())
tdiff_e : (list)
A list containing the tdiff error (in microsec) or an empty list
(no elevation/virtual height error will be computed). (default=list())
tdiff_time : (list)
A list containing the starting time (datetimes) for each tdiff.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
step : (int)
Level of processing to perform (1-6). 6 performs all steps. (default=6)
strict_gs : (boolian)
Remove indeterminately flagged backscatter (default=True)
label_type : (str)
Type of hop label to use (frac/decimal) (default="frac")
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots. Will create this data
if it is not provided (default=dict())
Returns
---------
xtime : (dict)
yrange : (dict)
zdata : (dict)
zi : (dict)
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots
"""
import davitpy.pydarn.sdio as sdio
# Define local routines
def range_gate_limits(rg_limits, rg):
for lim in rg_limits:
if rg >= lim[0] and rg < lim[1]:
return True
return False
# Initialize the data dictionaries
xtime = dict()
yrange = dict()
zdata = {k:dict() for k in set([intensity_all, intensity_sep])}
zi = {"all":dict(), 1:dict(), 0:dict(), -1:dict()}
# For each radar, load and process the desired data
for rad in rad_bms.keys():
if not beams.has_key(rad):
# Load data for one radar, padding data based on the largest
# temporal boxcar window used in the FoV processing
rad_ptr = sdio.radDataRead.radDataOpen(stime-ut_box, rad,
eTime=etime+ut_box,
cp=rad_cp[rad],
fileType=file_type,
password=password)
if fix_gs.has_key(rad):
read_ptr = list()
i = 0
bm, i = ub.get_beam(rad_ptr, i)
while bm is not None:
if(hasattr(bm, "fit") and hasattr(bm.fit, "gflg") and
bm.fit.gflg is not None):
bm.fit.gflg = [0 if range_gate_limits(fix_gs[rad],
bm.fit.slist[j])
else gg for j,gg in
enumerate(bm.fit.gflg)]
read_ptr.append(bm)
bm, i = ub.get_beam(rad_ptr, i)
else:
read_ptr = rad_ptr
# Process the beams for this radar
beams[rad] = ub.update_backscatter(read_ptr, min_pnts=min_pnts,
region_hmax=region_hmax,
region_hmin=region_hmin,
rg_box=rg_box, vh_box=vh_box,
max_rg=max_rg, max_hop=max_hop,
ut_box=ut_box, tdiff=tdiff,
tdiff_e=tdiff_e,
tdiff_time=tdiff_time,
ptest=ptest, strict_gs=strict_gs,
logfile=logfile,
log_level=log_level, step=step)
# Load the data for this beam and radar
xtime[rad] = list()
yrange[rad] = list()
idat = dict()
for k in zdata.keys():
zdata[k][rad] = list()
idat[k] = list()
for ff in zi.keys():
zi[ff][rad] = {hh:list() for hh in mm[marker_key].keys()}
if len(beams[rad][rad_bms[rad]]) > 0:
j = 0
for bm in beams[rad][rad_bms[rad]]:
if(hasattr(bm, "fit") and hasattr(bm.fit, intensity_all)
and (hasattr(bm.fit, intensity_sep))):
for k in idat.keys():
idat[k] = getattr(bm.fit, k)
if hasattr(bm.fit, marker_key):
ikey = getattr(bm.fit, marker_key)
else:
ireg = getattr(bm.fit, "region")
ikey = ["{:.1f}{:s}".format(hh, ireg[ii])
if not np.isnan(hh) and len(ireg[ii]) == 1
else ""
for ii,hh in enumerate(getattr(bm.fit, "hop"))]
for i,s in enumerate(bm.fit.slist):
if(not np.isnan(bm.fit.hop[i]) and
len(bm.fit.region[i]) == 1 and
(not strict_gs or
(strict_gs and bm.fit.gflg[i] >= 0))):
xtime[rad].append(bm.time)
yrange[rad].append(s)
for k in idat.keys():
zdata[k][rad].append(idat[k][i])
zi[bm.fit.fovflg[i]][rad][ikey[i]].append(j)
zi["all"][rad][ikey[i]].append(j)
zi[bm.fit.fovflg[i]][rad]["all"].append(j)
zi["all"][rad]["all"].append(j)
j += 1
return(xtime, yrange, zdata, zi, beams)
#------------------------------------------------------------------------
def plot_scan_and_beam(scan, beam, fattr="felv", rattr="belv", fhop_attr="fhop",
bhop_attr="bhop", fov_attr="fovflg",
contour_color=clinear, mcolor="k", bmin=0, bmax=15,
tmin=None, tmax=None, ymin=0, ymax=75, zmin=None,
zmax=None, bfmt=None, tlabel="Universal Time (HH:MM)",
tfmt=mdates.DateFormatter("%H:%M"), yfmt=None,
binc=ticker.MultipleLocator(3),
tinc=mdates.MinuteLocator(interval=15),
yinc=ticker.MultipleLocator(15), zinc=6, plot_title="",
label_type="frac", make_plot=True, draw=True):
"""Plot a specified attribute (elevation angle or virtual height) for the
front and rear field-of-view, using a scan of beams and a single beam for
a longer period of time.
Parameters
-----------
scan : (list of beams)
List of beam class objects denoting one scan across all beams
beam : (list of beams)
List of beam class objects denoting one beam for a period of time
fattr : (str)
Front field-of-view fit attribute (default="felv")
rattr : (str)
Rear field-of-view fit attribute (default="belv")
fhop_attr : (str)
Front field-of-view fit attribute containing hop (default="fhop")
rhop_attr : (str)
Rear field-of-view fit attribute containing hop (default="bhop")
fov_attr : (str)
Beam fit attribute containing the field-of-view flag (default="fovflg")
contour_color : (str)
Contour colormap (default="Spectral_r")
mcolor : (str)
Marker color (default="k")
bmin : (float or int)
Minimum beam number to plot (default=0)
bmax : (float or int)
Maximum beam number to plot (default=15)
tmin : (datetime or NoneType)
Minimum time to plot (default=None)
tmax : (datetime or NoneType)
Maximum time to plot (default=None)
ymin : (float or int)
Minimum range gate to plot (default=0)
ymax : (float or int)
Maximum range gate to plot (default=75)
zmin : (float or int)
Minimum z value to plot (default=None)
zmax : (float or int)
Maximum z value to plot (default=None)
bfmt : (str or NoneType)
Beam axis format (default=None)
tlabel : (str)
Time axis label (default="Universal Time (HH:MM)")
tfmt : (class)
Time axis format (default=mdates.DateFormatter("%H:%M"))
yfmt : (class or NoneType)
Range gate axis format (default=None)
binc : (class)
Beam axis multiple locator (default=ticker.MultipleLocator(3))
tinc : (class)
Time axis multiple locator (default=mdates.MinuteLocator(interval=15))
yinc : (class)
Range gate axis multiple locator
(default=ticker.MultipleLocator(15))
zinc : (float or int)
Z data colorbar incriment (default=6)
plot_title : (str)
Plot title (default="")
label_type : (str)
Type of hop label to use (frac/decimal) (default="frac")
make_plot : (boolian)
Make plot (True) or just process data (False) (default=True)
draw : (boolian)
Output figure to display? (default=True)
Returns
---------
f : (pointer)
Figure handle
ax : (list)
List of axis handles
cb : (set)
Output from colorbar
"""
import davitpy.pydarn.radar as pyrad
rn = "plot_scan_and_beam"
mkey = fhop_attr if mm.has_key(fhop_attr) else fhop_attr[1:]
xpos = {7:1.1, 6:0.49, 5:0.35, 4:0.8, 3:0.7, 2:0.5, 1:0.0}
# Extract the scan data
scan_times = list()
xbeam = list()
brange = list()
fbeam = list()
rbeam = list()
bhop = {ff:{hh:list() for hh in mc[mkey].keys()} for ff in [1,0,-1]}
bfov = {ff:{hh:list() for hh in mm[mkey].keys()} for ff in [1,0,-1]}
j = 0
for bm in scan:
scan_times.append(bm.time)
if(hasattr(bm, "fit") and hasattr(bm.fit, fattr)
and hasattr(bm.fit, rattr) and hasattr(bm.fit, fov_attr)
and (hasattr(bm.fit, fhop_attr) or fhop_attr.find("reg") >= 0)
and (hasattr(bm.fit, bhop_attr) or bhop_attr.find("reg") >= 0)
and hasattr(bm.fit, "region")):
fd = getattr(bm.fit, fattr)
rd = getattr(bm.fit, rattr)
ff = getattr(bm.fit, fov_attr)
hh = dict()
if fhop_attr == "freg":
temp = getattr(bm.fit, "fhop")
hh[1] = ["{:.1f}{:s}".format(temp[i],rr)
if not np.isnan(temp[i]) and len(rr) == 1 else ""
for i,rr in enumerate(getattr(bm.fit, "fregion"))]
else:
hh[1] = getattr(bm.fit, fhop_attr)
if bhop_attr == "breg":
temp = getattr(bm.fit, "bhop")
hh[-1] = ["{:.1f}{:s}".format(temp[i],rr)
if not np.isnan(temp[i]) and len(rr) == 1 else ""
for i,rr in enumerate(getattr(bm.fit, "bregion"))]
else:
hh[-1] = getattr(bm.fit, bhop_attr)
for i,s in enumerate(bm.fit.slist):
fi = ff[i] if abs(ff[i]) == 1 else (1 if not np.isnan(fd[i])
else -1)
fe = fd[i] if fi == 1 else rd[i]
if not np.isnan(fe) and bhop[fi].has_key(hh[fi][i]):
xbeam.append(bm.bmnum)
brange.append(s)
fbeam.append(fd[i])
rbeam.append(rd[i])
bfov[ff[i]][hh[fi][i]].append(j)
bfov[ff[i]]['all'].append(j)
bhop[fi][hh[fi][i]].append(j)
if bhop[fi].has_key(hh[-fi][i]):
bhop[-fi][hh[-fi][i]].append(j)
j += 1
xbeam = np.array(xbeam)
brange = np.array(brange)
fbeam = np.array(fbeam)
rbeam = np.array(rbeam)
# Extract the beam data
bmnum = beam[0].bmnum
xtime=list()
trange=list()
ftime=list()
rtime=list()
thop={ff:{hh:list() for hh in mc[mkey].keys()} for ff in [1,0,-1]}
tfov={ff:{hh:list() for hh in mm[mkey].keys()} for ff in [1,0,-1]}
j = 0
for bm in beam:
if(hasattr(bm.fit, fattr) and hasattr(bm.fit, rattr)
and (hasattr(bm.fit, fhop_attr) or fhop_attr.find("reg") >= 0)
and (hasattr(bm.fit, bhop_attr) or bhop_attr.find("reg") >= 0)
and hasattr(bm.fit, fov_attr) and hasattr(bm.fit, "region")):
fd = getattr(bm.fit, fattr)
rd = getattr(bm.fit, rattr)
ff = getattr(bm.fit, fov_attr)
hh = dict()
if fhop_attr == "freg":
temp = getattr(bm.fit, "fhop")
hh[1] = ["{:.1f}{:s}".format(temp[i],rr)
if not np.isnan(temp[i]) and len(rr) == 1 else ""
for i,rr in enumerate(getattr(bm.fit, "fregion"))]
else:
hh[1] = getattr(bm.fit, fhop_attr)
if bhop_attr == "breg":
temp = getattr(bm.fit, "bhop")
hh[-1] = ["{:.1f}{:s}".format(temp[i],rr)
if not np.isnan(temp[i]) and len(rr) == 1 else ""
for i,rr in enumerate(getattr(bm.fit, "bregion"))]
else:
hh[-1] = getattr(bm.fit, bhop_attr)
for i,s in enumerate(bm.fit.slist):
fi = ff[i] if abs(ff[i]) == 1 else (1 if not np.isnan(fd[i])
else -1)
fe = fd[i] if fi == 1 else rd[i]
if not np.isnan(fe) and thop[fi].has_key(hh[fi][i]):
xtime.append(bm.time)
trange.append(s)
ftime.append(fd[i])
rtime.append(rd[i])
tfov[ff[i]][hh[fi][i]].append(j)
tfov[ff[i]]['all'].append(j)
thop[fi][hh[fi][i]].append(j)
if thop[-fi].has_key(hh[-fi][i]):
thop[-fi][hh[-fi][i]].append(j)
j += 1
xtime = np.array(xtime)
trange = np.array(trange)
ftime = np.array(ftime)
rtime = np.array(rtime)
if not make_plot:
return({1:[xbeam[bfov[1]['all']], xtime[tfov[1]['all']]],
-1:[xbeam[bfov[-1]['all']], xtime[tfov[-1]['all']]]},
{1:[brange[bfov[1]['all']], trange[tfov[1]['all']]],
-1:[brange[bfov[-1]['all']], trange[tfov[-1]['all']]]},
{1:[fbeam[bfov[1]['all']], ftime[tfov[1]['all']]],
-1:[rbeam[bfov[-1]['all']], rtime[tfov[-1]['all']]]})
# Initialize the figure
f = plt.figure(figsize=(12,8))
gb = gridspec.GridSpec(1, 2)
gb.update(left=0.075, wspace=0.05, right=0.48, top=.85)
gt = gridspec.GridSpec(2, 1)
gt.update(left=0.55, hspace=0.075, right=0.91, top=.85)
# Initialize the subplots
fbax = plt.subplot(gb[:,0])
rbax = plt.subplot(gb[:,1])
ftax = plt.subplot(gt[0,:])
rtax = plt.subplot(gt[1,:])
if zmin is None:
zmin = min(np.nanmin(ftime), np.nanmin(rtime), np.nanmin(fbeam),
np.nanmin(rbeam))
if zmax is None:
zmax = min(np.nanmax(ftime), np.nanmax(rtime), np.nanmax(fbeam),
np.nanmax(rbeam))
if len(xtime) > 0:
if tmin is None:
tmin = mdates.date2num(min(xtime))
if tmax is None:
tmax = mdates.date2num(max(xtime))
# Plot the contours and markers
for hh in mc[mkey].keys():
if len(bhop[1][hh]) > 0:
# Scans
con = fbax.scatter(xbeam[bhop[1][hh]], brange[bhop[1][hh]],
c=fbeam[bhop[1][hh]], vmin=zmin, vmax=zmax,
cmap=cm.get_cmap(contour_color), s=80,
edgecolor="face", marker=mm[mkey][hh])
fbax.plot(xbeam[bfov[1][hh]], brange[bfov[1][hh]], mm[mkey][hh],
ms=8, markerfacecolor="none", markeredgecolor=mcolor)
if len(bhop[-1][hh]) > 0:
con = rbax.scatter(xbeam[bhop[-1][hh]], brange[bhop[-1][hh]],
c=rbeam[bhop[-1][hh]], vmin=zmin, vmax=zmax,
cmap=cm.get_cmap(contour_color), s=80,
edgecolor="face", marker=mm[mkey][hh])
rbax.plot(xbeam[bfov[-1][hh]], brange[bfov[-1][hh]], mm[mkey][hh],
ms=8, markerfacecolor="none", markeredgecolor=mcolor)
if len(thop[1][hh]) > 0:
# Beams
try:
label = "{:.1f}".format(hh)
except:
label = "{:}".format(hh)
con = ftax.scatter(xtime[thop[1][hh]], trange[thop[1][hh]],
c=ftime[thop[1][hh]], vmin=zmin, vmax=zmax,
cmap=cm.get_cmap(contour_color), s=80,
edgecolor="face", marker=mm[mkey][hh],
label=label)
ftax.plot(xtime[tfov[1][hh]], trange[tfov[1][hh]], mm[mkey][hh],
ms=8, markerfacecolor="none", markeredgecolor=mcolor)
if len(thop[-1][hh]) > 0:
# Beams
try:
label = "{:.1f}".format(hh)
except:
label = "{:}".format(hh)
con = rtax.scatter(xtime[thop[-1][hh]], trange[thop[-1][hh]],
c=rtime[thop[-1][hh]], vmin=zmin, vmax=zmax,
cmap=cm.get_cmap(contour_color), s=80,
edgecolor="face", marker=mm[mkey][hh])
rtax.plot(xtime[tfov[-1][hh]], trange[tfov[-1][hh]], mm[mkey][hh],
ms=8, markerfacecolor="none", markeredgecolor=mcolor)
# Add lines indicating the beam plotted versus time in the scans
fbax.plot([bmnum-.5, bmnum-.5], [ymin-1, ymax+1], "k--")
rbax.plot([bmnum-.5, bmnum-.5], [ymin-1, ymax+1], "k--")
fbax.plot([bmnum+.5, bmnum+.5], [ymin-1, ymax+1], "k--")
rbax.plot([bmnum+.5, bmnum+.5], [ymin-1, ymax+1], "k--")
# Add lines indicating the time plotted in the beam vs time plots
stime = min(scan_times)
etime = max(scan_times)
ftax.plot([stime, stime], [ymin-1, ymax+1], "k--")
ftax.plot([etime, etime], [ymin-1, ymax+1], "k--")
rtax.plot([stime, stime], [ymin-1, ymax+1], "k--")
rtax.plot([etime, etime], [ymin-1, ymax+1], "k--")
# Add legend
handles, labels = get_sorted_legend_labels(ftax, mkey)
hl, ll = get_sorted_legend_labels(rtax, mkey)
for il,label in enumerate(ll):
try:
labels.index(label)
except:
# This label is not currently available, add it
lind = morder[mkey][label]
jl = 0
while lind > morder[mkey][labels[jl]]:
jl += 1
labels.insert(jl, label)
handles.insert(jl, hl[il])
if label_type.find("frac") >= 0:
flabels = get_fractional_hop_labels(labels)
else:
flabels = labels
ftax.legend(handles, flabels, fontsize="medium", scatterpoints=1,
ncol=len(flabels), title="Hop", labelspacing=.1,
columnspacing=0, markerscale=1, borderpad=0.3, handletextpad=0,
bbox_to_anchor=(xpos[len(flabels)],1.4))
# Add colorbar
label = pyrad.radUtils.getParamDict(fattr)['label']
unit = pyrad.radUtils.getParamDict(fattr)['unit']
cb = add_colorbar(f, con, zmin, zmax, zinc, label, unit,
loc=[.92,.1,.01,.75])
# Add a global title, if desired
if len(plot_title) > 0:
f.suptitle(plot_title)
# For subplot, adjust the axis and labels
fbax.set_title("Front", fontsize="medium")
fbax.set_ylabel("Range Gate")
fbax.set_ylim(ymin-.5, ymax+.5)
fbax.set_xlim(bmin-.5, bmax+.5)
fbax.text(6.0, -6, "Beams at {:}".format(stime))
rbax.set_title("Rear", fontsize="medium")
rbax.set_ylim(ymin-.5, ymax+.5)
rbax.set_xlim(bmin-.5, bmax+.5)
rbax.yaxis.set_major_formatter(ticker.FormatStrFormatter(""))
ftax.set_title("Beam {:d}".format(bmnum), fontsize="medium")
ftax.set_ylabel("Front Range Gate")
ftax.set_ylim(ymin-.5, ymax+.5)
ftax.xaxis.set_major_formatter(ticker.FormatStrFormatter(""))
rtax.set_ylabel("Rear Range Gate")
rtax.set_xlabel(tlabel)
rtax.set_ylim(ymin-.5, ymax+.5)
if tmin is not None and tmax is not None:
ftax.set_xlim(tmin, tmax)
rtax.set_xlim(tmin, tmax)
# Set the tick formats and locations
if bfmt is not None:
fbax.xaxis.set_major_formatter(bfmt)
rbax.xaxis.set_major_formatter(bfmt)
if tfmt is not None:
rtax.xaxis.set_major_formatter(tfmt)
if yfmt is not None:
fbax.yaxis.set_major_formatter(yfmt)
ftax.yaxis.set_major_formatter(yfmt)
rtax.yaxis.set_major_formatter(yfmt)
if binc is not None:
fbax.xaxis.set_major_locator(binc)
rbax.xaxis.set_major_locator(binc)
if tinc is not None:
ftax.xaxis.set_major_locator(tinc)
rtax.xaxis.set_major_locator(tinc)
if yinc is not None:
fbax.yaxis.set_major_locator(yinc)
rbax.yaxis.set_major_locator(yinc)
ftax.yaxis.set_major_locator(yinc)
rtax.yaxis.set_major_locator(yinc)
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
return f, [fbax, rbax, ftax, rtax], cb
#-------------------------------------------------------------------------
def plot_meteor_figure(fcolor="b", rcolor="m", stime=dt.datetime(2001,12,14),
etime=dt.datetime(2001,12,28), rad="sas", radar_ns=-1.0,
fbmnum=0, rbmnum=15, cp=150, malt=102.7, figname=None,
password=True, file_type="fitacf", logfile=None,
log_level=logging.WARN, min_pnts=3,
region_hmax={"D":115.0,"E":150.0,"F":900.0},
region_hmin={"D":75.0,"E":115.0,"F":150.0},
rg_box=[2,5,10,20], vh_box=[50.0,50.0,50.0,150.0],
max_rg=[5,25,40,76], max_hop=3.0,
ut_box=dt.timedelta(minutes=20.0), tdiff=list(),
tdiff_e=list(), tdiff_time=list(), ptest=True, step=6,
strict_gs=True, draw=True, beams=dict()):
"""Plot comparing HWM14 neutral winds with the line-of-site velocity
for two beams at Saskatoon
Parameters
-----------
fcolor : (str)
Front field-of-view color (default="b")
rcolor : (str)
Rear field-of-view color (default="m")
stime : (dt.datetime)
Starting time for finding meteor data (default=dt.datetime(2001,12,14))
etime : (dt.datetime)
Ending time for finding meteor data (default=dt.datetime(2001,12,28))
rad : (str)
radar code (default="sas")
radar_ns : (float)
Sign denoting whether or not the front field-of-view line-of-sight
velocity is positive to the North (positive) or not (negative)
(default=-1.0)
fbmnum : (int)
Front field-of-view beam number (default=0)
rbmnum : (int)
Rear field-of-view beam number (default=15)
cp : (int)
Radar programming code number. (default=150)
malt : (float)
Meteor altitude to use for map (default=102.7)
figname : (str or NoneType)
Figure name or None if no figure is to be saved (default=None)
password : (boolian or str)
When downloading data from your specified SuperDARN mirror site, a
password may be needed. It may be included here or, if True is used,
a prompt will appear for you to enter it securely. (default=True)
file_type : (str)
Type of data file to download (default="fitacf")
logfile : (str or NoneType)
Name of file to hold the log output or None for stdout. (default=None)
log_level : (int)
Level of output to report. Flag values explained in logging module.
(default=logging.WARNING)
min_pnts : (int)
The minimum number of points necessary to perform certain range gate
or beam specific evaluations. (default=3)
region_hmax : (dict)
Maximum virtual heights allowed in each ionospheric layer.
(default={"D":125.0,"E":200.0,"F":900.0})
region_hmin : (dict)
Minimum virtual heights allowed in each ionospheric layer.
(default={"D":75.0,"E":125.0,"F":200.0})
rg_box : (list of int)
The total number of range gates to include when examining the elevation
angle across all beams. (default=[2,5,10,20])
vh_box : (list of float)
The total width of the altitude box to consider when examining the
elevation angle across all beams at a given range gate.
(default=[50.0,50.0,50.0,150.0])
max_rg : (list)
The maximum range gates to apply the range gate and virtual height
boxes (default=[5,25,40,76])
max_hop : (float)
Maximum hop that the corresponding rg_box and vh_box values applies
to. (default=3.0)
ut_box : (class dt.timedelta)
Total width of universal time box to examine for backscatter FoV
continuity. (default=20.0 minutes)
tdiff : (list)
A list of tdiff values (in microsec) or an empty list (to use the
hardware value) (default=list())
tdiff_e : (list)
A list containing the tdiff error (in microsec) or an empty list
(no elevation/virtual height error will be computed). (default=list())
tdiff_time : (list)
A list containing the starting time (datetimes) for each tdiff.
(default=list())
ptest : (boolian)
Test to see if a propagation path is realistic (default=True)
step : (int)
Level of processing to perform (1-6). 6 performs all steps. (default=6)
strict_gs : (bool)
Use indeterminately flagged backscatter (default=True)
draw : (boolian)
Output figure to display? (default=True)
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots. Will create this data
if it is not provided (default=dict())
Returns
---------
f : (pointer)
Figure handle
ax : (dict)
Dictionary of axis handles
cb : (dict)
Dictionary of colorbar handles
beams : (dict)
Dictionary with radar codes as keys for the dictionaries containing
beams with the data used to create the plots
"""
import davitpy.pydarn.plotting as plotting
import davitpy.pydarn.radar as pyrad
import davitpy.pydarn.sdio as sdio
import davitpy.models.hwm as hwm
#-------------------------------------------------------------------------
# Define local routines
def ismeteor(p, verr, werr):
"""Gareth's threshold test for meteor scatter (Chisham and Freeman 2013)
Parameters
----------
p :
verr :
werr :
"""
# Initialize output
good = False
# Set constants for exponential function that defines the upper limit of
# velocity and spectral width error at a given power
va = 10.0
vb = -np.log(50.0) / 50.0
wa = 40.0
wb = -np.log(40.0 / .2) / 50.0
# Only test the values if the power and error values are set
if(not np.isnan(p) and p >= 0.0 and not np.isnan(verr)
and verr >= 0.0 and not np.isnan(werr) and werr >= 0.0):
# Calculate the upper limits of the error
vtest = va * np.exp(vb * p)
wtest = wa * np.exp(wb * p)
# Test the calculated errors
if verr <= vtest and werr <= wtest:
good = True
return good
def dec2001ap(bm_time):
"""Look up Ap using time. Only available for December 2001.
Data from: ftp://ftp.ngdc.noaa.gov/STP/GEOMAGNETIC_DATA/INDICES/KP_AP/
Parameters
-----------
bm_time : (datetime)
Time to find Ap
Returns
---------
bm_ap : (float)
"""
ap_times = [dt.datetime(2001,12,1) + dt.timedelta(hours=i)
for i in range(744)]
ap_vals = [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 6.0, 6.0, 6.0, 6.0,
6.0, 6.0, 6.0, 6.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0,
15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
6.0, 6.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0,
12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 5.0, 5.0,
5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 15.0, 15.0, 15.0, 15.0, 15.0,
15.0, 15.0, 15.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 9.0, 9.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 18.0, 18.0, 18.0,
18.0, 18.0, 18.0, 18.0, 18.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 2.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
2.0, 2.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
4.0, 4.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
6.0, 6.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 15.0, 15.0, 15.0, 15.0, 15.0,
15.0, 15.0, 15.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0,
12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 18.0, 18.0,
18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0,
18.0, 18.0, 18.0, 18.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0,
12.0, 12.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 22.0,
15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 22.0, 22.0,
22.0, 22.0, 22.0, 22.0, 22.0, 22.0, 7.0, 7.0, 7.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 15.0,
15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 9.0, 9.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 7.0, 7.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 15.0,
15.0, 15.0, 15.0, 15.0, 15.0, 15.0, 9.0, 9.0, 9.0, 9.0, 9.0,
9.0, 9.0, 9.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 5.0, 5.0, 5.0, 5.0, 5.0,
5.0, 5.0, 5.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 18.0,
18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 39.0, 39.0, 39.0,
39.0, 39.0, 39.0, 39.0, 39.0, 12.0, 12.0, 12.0, 12.0, 12.0,
12.0, 12.0, 12.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0,
6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 9.0, 9.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 9.0, 9.0, 9.0, 9.0,
9.0, 9.0, 9.0, 9.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0,
4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0,
6.0, 6.0, 6.0, 6.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0, 27.0,
27.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 18.0, 18.0,
18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 39.0, 39.0, 39.0, 39.0,
39.0, 39.0, 39.0, 39.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0,
18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0,
18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 18.0, 7.0, 7.0,
7.0, 7.0, 7.0, 7.0, 7.0, 7.0]
# Indices apply to the entire hour. To avoid rounding by minutes,
# recast requested time without minutes, seconds, or microseconds
htime = dt.datetime(bm_time.year, bm_time.month, bm_time.day,
bm_time.hour)
tdelta = np.array([abs((t-htime).total_seconds()) for t in ap_times])
bm_ap = ap_vals[tdelta.argmin()]
return bm_ap
# End local routines
#-------------------------------------------------------------------------
# Load and process the desired data
if not beams.has_key(fbmnum) or not beams.has_key(rbmnum):
# Load the SuperDARN data, padding data based on the largest
# temporal boxcar window used in the FoV processing
rad_ptr = sdio.radDataRead.radDataOpen(stime-ut_box, rad,
eTime=etime+ut_box, cp=cp,
fileType=file_type,
password=password)
beams = ub.update_backscatter(rad_ptr, min_pnts=min_pnts,
region_hmax=region_hmax,
region_hmin=region_hmin, rg_box=rg_box,
vh_box=vh_box, max_rg=max_rg,
max_hop=max_hop, ut_box=ut_box,
tdiff=tdiff, tdiff_e=tdiff_e,
tdiff_time=tdiff_time, ptest=ptest,
logfile=logfile, log_level=log_level,
step=step)
if not beams.has_key(fbmnum) or not beams.has_key(rbmnum):
return(None, None, None, beams)
if len(beams[fbmnum]) == 0 or len(beams[rbmnum]) == 0:
return(None, None, None, beams)
# Load the radar location data
hard = pyrad.site(code=rad, dt=stime)
fovs = {1:pyrad.radFov.fov(site=hard, ngates=5, altitude=malt, coords="geo",
fov_dir="front"),
-1:pyrad.radFov.fov(site=hard, ngates=5, altitude=malt,
coords="geo", fov_dir="back")}
# Select the meteor data
bmnum = {1:fbmnum, -1:rbmnum}
yspeed = {fbmnum:list(), rbmnum:list(), "reject":list()} # FoV speeds
hspeed = {fbmnum:list(), rbmnum:list(), "reject":list()} # HWM speeds
for ff in bmnum.keys():
for bm in beams[bmnum[ff]]:
for i,ifov in enumerate(bm.fit.fovflg):
if bm.fit.slist[i] >= 5:
break
if((ifov == ff or ifov == 0) and bm.fit.hop[i] == 0.5 and
bm.fit.vheight[i] < 125.0 and len(bm.fit.region[i]) == 1):
# Test to see if this is meteor backscatter using the
# rules outlined by Chisham and Freeman
if ismeteor(bm.fit.p_l[i], bm.fit.v_e[i], bm.fit.w_l_e[i]):
skey = None
if ifov == ff:
skey = bmnum[ff]
yspeed[skey].append(ff*bm.fit.v[i]*radar_ns)
glat = fovs[ff].latCenter[bmnum[ff],bm.fit.slist[i]]
glon = fovs[ff].lonCenter[bmnum[ff],bm.fit.slist[i]]
alt = bm.fit.vheight[i]
elif ff == 1:
skey = "reject"
yspeed[skey].append(bm.fit.v[i]*radar_ns)
glat = fovs[1].latCenter[bmnum[1],bm.fit.slist[i]]
glon = fovs[1].lonCenter[bmnum[1],bm.fit.slist[i]]
alt = bm.fit.fvheight[i]
if skey is not None:
ap = dec2001ap(bm.time)
ihwm = hwm.hwm_input.format_hwm_input(bm.time, alt,
glat, glon,
ap)
try:
winds = hwm.hwm14.hwm14(*ihwm)
except:
# The first call to hwm14 creates the module,
# but it works just the same as calling the
# module
winds = hwm.hwm14(*ihwm)
hspeed[skey].append(winds[0])
# Recast the data as numpy arrays
for skey in yspeed.keys():
yspeed[skey] = np.array(yspeed[skey])
hspeed[skey] = np.array(hspeed[skey])
# Initialize the figure
f = plt.figure(figsize=(12,8))
f.suptitle("{:} to {:}".format(stime.date(), etime.date()))
# Add a map with the field-of-view and beams highlighted
ax = f.add_subplot(1,2,1)
urlat = np.ceil(fovs[1].latFull.max())
urlon = np.ceil(max(fovs[1].lonFull.max(), fovs[-1].lonFull.max()))
lllat = np.floor(fovs[-1].latFull.min()) - 1.0
lllon = np.ceil(min(fovs[1].lonFull.min(), fovs[-1].lonFull.min())) - 1.0
m = basemap.Basemap(ax=ax, projection="stere", lon_0=hard.geolon,
lat_0=hard.geolat, llcrnrlon=lllon, llcrnrlat=lllat,
urcrnrlon=urlon, urcrnrlat=urlat, resolution="l")
m.drawcoastlines(linewidth=0.5, color="0.6")
m.fillcontinents(color="0.6", alpha=.1)
m.drawmeridians(np.arange(min(lllon, urlon), max(lllon, urlon), 2.0),
labels=[0,0,0,1])
m.drawparallels(np.arange(lllat, urlat+1.0, 2.0), labels=[1,0,0,0])
# Add the field-of-view boundaries
plotting.mapOverlay.overlayFov(m, codes=rad, dateTime=stime, beams=[fbmnum],
beamsColors=[fcolor], fovObj=fovs[1])
plotting.mapOverlay.overlayFov(m, codes=rad, dateTime=stime, beams=[rbmnum],
beamsColors=[rcolor], fovObj=fovs[-1])
# Add the radar location and name
plotting.mapOverlay.overlayRadar(m, codes=rad, dateTime=stime,
annotate=True, fontSize=16)
# Add the velocity difference histograms
diff_range = (-200.0, 200.0)
diff_inc = int((diff_range[1] - diff_range[0]) / 5.0)
fax = f.add_subplot(3,2,2)
rax = f.add_subplot(3,2,4)
nax = f.add_subplot(3,2,6)
vdiff = {skey:yspeed[skey]-hspeed[skey] for skey in yspeed.keys()}
fnum = fax.hist(vdiff[bmnum[1]], diff_inc, range=diff_range, color=fcolor)
rnum = rax.hist(vdiff[bmnum[-1]], diff_inc, range=diff_range, color=rcolor)
nnum = nax.hist(vdiff["reject"], diff_inc, range=diff_range, color="0.6")
fax.set_ylabel("Front\nNumber Points")
rax.set_ylabel("Rear\nNumber Points")
nax.set_ylabel("Removed from Front\nNumber Points")
fax.xaxis.set_major_locator(ticker.MultipleLocator(75))
rax.xaxis.set_major_locator(ticker.MultipleLocator(75))
nax.xaxis.set_major_locator(ticker.MultipleLocator(75))
fax.xaxis.set_major_formatter(ticker.FormatStrFormatter(""))
rax.xaxis.set_major_formatter(ticker.FormatStrFormatter(""))
nax.set_xlabel("LoS Velocity - HWM Meridional Wind (m s$^{-1}$)")
fax.set_xlim(diff_range[0], diff_range[1])
rax.set_xlim(diff_range[0], diff_range[1])
nax.set_xlim(diff_range[0], diff_range[1])
mnum = (int(max(fnum[0].max(), rnum[0].max(), nnum[0].max()))/10 + 1) * 10.0
fax.set_ylim(0,mnum)
rax.set_ylim(0,mnum)
nax.set_ylim(0,mnum)
fax.plot([0, 0], [0, mnum], "k:")
rax.plot([0, 0], [0, mnum], "k:")
nax.plot([0, 0], [0, mnum], "k:")
unit = "(m s$^{-1}$)"
fax.text(-190, 0.65*mnum,
"$\mu$={:.1f} {:s}\n$\sigma$={:.1f} {:s}".format( \
np.mean(vdiff[bmnum[1]]), unit, np.std(vdiff[bmnum[1]]), unit))
rax.text(-190, 0.65*mnum,
"$\mu$={:.1f} {:s}\n$\sigma$={:.1f} {:s}".format( \
np.mean(vdiff[bmnum[-1]]), unit, np.std(vdiff[bmnum[-1]]), unit))
nax.text(-190, 0.65*mnum,
"$\mu$={:.1f} {:s}\n$\sigma$={:.1f} {:s}".format( \
np.mean(vdiff["reject"]), unit, np.std(vdiff["reject"]), unit))
plt.subplots_adjust(wspace=.3, hspace=.1, right=.93)
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
# Save figure
if figname is not None:
f.savefig(figname)
return(f, [ax, fax, rax, nax], beams)
#-------------------------------------------------------------------------
def plot_map(ax, scan, hard=None, map_handle=None, fovs={1:None,-1:None},
plot_beams={1:list(),-1:list()}, color_beams={1:list(),-1:list()},
maxgates=None, fan_model='IS', model_alt=300.0, elv_attr="fovelv",
alt_attr="vheight", dat_attr="v", fov_attr="fovflg", dmax=500.0,
dmin=-500.0, dcolor=ccenter, lat_label=True, lon_label=True,
gscatter=True, draw=True):
"""Plot a fov map
Parameters
-----------
ax : (axis handle)
Axis handle
scan : (list of beams)
List of beam class objects
hard : (pydarn.radar.site or NoneType)
Hardware data (default=None)
map_handle : (basemap or NoneType)
Basemap handle or NoneType (default=None)
fovs : (dict)
Dictionary containing radar fields-of-view. If None, FoV will be
loaded. If False, FoV will not be plotted.
(default={1:None,-1:None})
plot_beams : (dict)
Dictionary containing lists of radar beam numbers to highlight
(default={1:list(),-1:list()})
color_beams : (dict)
Dictionary containing lists of colors to use to highlight radar beams
(default={1:list(),-1:list()})
maxgates : (int, float, or NoneType)
Maximum range gate to plot (default=None)
fan_model : (str or NoneType)
Type of model to use when plotting data (default="IS")
IS : Ionospheric Backscatter model
GS : Ground Backscatter model
None : No model, use elevation and altitude
model_alt : (float)
Model altitude if IS or GS is used (default=300.0)
elv_attr : (str)
Beam fit attribute containing elevation (default="fovelv")
alt_attr : (str)
Beam fit attribute containing altitude (default="vheight")
dat_attr : (str)
Beam fit attribute containing data to plot (default="v")
fov_attr : (str)
Beam fit attribute contaiing field-of-view flag (default="fovflg")
dmax : (float)
Minimum data value to plot (default=500.0)
dmin : (float)
Maximum data value to plot (default=-500.0)
dcolor : (str)
Color map for data (default="RdYlBu"
lat_label : (boolian)
Include latitude label on the y-axis (default=True)
lon_label : (boolian)
Include longitude label on the x-axis (default=True)
gscatter : (boolian)
Include groundscatter (default=True)
draw : (boolian)
Output figure to display? (default=True)
Returns
---------
map_handle : (basemap)
map handle
fov : (dict)
Dictionary of fields-of-view
hard : ( or NoneType)
Hardware data (default=None)
"""
import davitpy.pydarn.plotting as plotting
import davitpy.pydarn.radar as pyrad
fov_dir = {1:"front", -1:"back"}
# Load the radar location data, if necessary
if hard is None:
try:
hard = pyrad.site(radId=scan[0].stid, dt=scan[0].time)
except:
return None, None, None, None
# Initialize the FoV model parameters, and save the data as well
maxgates = maxgates if maxgates is not None else scan[0].prm.nrang+1
fan_data = np.ones(shape=(hard.maxbeam, maxgates), dtype=float) * np.nan
fan_fov = np.zeros(shape=(hard.maxbeam, maxgates), dtype=int)
if fan_model.find('IS') != 0 and fan_model.find('GS') != 0:
fan_model = None
fan_elv = np.ones(shape=(hard.maxbeam, maxgates), dtype=float) * np.nan
fan_alt = np.ones(shape=(hard.maxbeam, maxgates), dtype=float) * np.nan
else:
fan_elv = None
fan_alt = model_alt
for bm in scan:
try:
dat = getattr(bm.fit, dat_attr)
except:
continue
try:
fovflg = getattr(bm.fit, fov_attr)
except:
fovflg = [1 for d in dat]
if fan_model is None:
try:
elv = getattr(bm.fit, elv_attr)
alt = getattr(bm.fit, alt_attr)
except:
continue
for i,d in enumerate(dat):
if bm.fit.slist[i] >= maxgates:
break
gflg = False
if fan_model is None:
fan_elv[bm.bmnum, bm.fit.slist[i]] = elv[i]
fan_alt[bm.bmnum, bm.fit.slist[i]] = alt[i]
gflg = True
elif gscatter:
gflg = True
elif((fan_model.find('IS') == 0 and bm.fit.gflg[i] == 0)
or (fan_model.find('GS') == 0 and bm.fit.gflg[i] == 1)):
gflg = True
if gflg:
fan_data[bm.bmnum, bm.fit.slist[i]] = d
fan_fov[bm.bmnum, bm.fit.slist[i]] = fovflg[i]
# Load the field-of-view data, if necessary
for ff in fovs.keys():
if fovs[ff] is None:
fovs[ff] = pyrad.radFov.fov(site=hard, rsep=scan[0].prm.rsep,
nbeams=hard.maxbeam, ngates=maxgates,
bmsep=hard.bmsep, elevation=fan_elv,
altitude=fan_alt, model = fan_model,
coords="geo", date_time=scan[0].time,
fov_dir=fov_dir[ff])
# Add a map with the field-of-view and beams highlighted
urlat = np.ceil(fovs[1].latFull.max())
urlon = np.ceil(max(fovs[1].lonFull.max(), fovs[-1].lonFull.max()))
urlon = urlon + 15.0 if urlat > 65.0 else urlon + 1.0
lllat = np.floor(fovs[-1].latFull.min()) - 1.0
lllon = np.ceil(min(fovs[1].lonFull.min(), fovs[-1].lonFull.min()))
lllon = lllon - 15.0 if lllat < -65.0 else lllon - 1.0
if map_handle is None:
map_handle = basemap.Basemap(projection="stere", lon_0=hard.geolon,
lat_0=hard.geolat, llcrnrlon=lllon,
llcrnrlat=lllat, urcrnrlon=urlon,
urcrnrlat=urlat, resolution="l")
map_handle.ax = ax
map_handle.drawcoastlines(linewidth=0.5, color="0.6")
map_handle.fillcontinents(color="0.6", alpha=.1)
map_handle.drawmeridians(np.arange(-180.0, 180.0, 15.0),
labels=[0,0,0,lon_label])
map_handle.drawparallels(np.arange(lllat, urlat+1.0, 10.0),
labels=[lat_label,0,0,0])
# Add the field-of-view boundaries
for ff in fovs.keys():
plotting.mapOverlay.overlayFov(map_handle, ids=scan[0].stid,
dateTime=scan[0].time,
beams=plot_beams[ff],
beamsColors=color_beams[ff],
fovObj=fovs[ff])
# Add the radar location and name
norm = mcolors.Normalize(vmin=dmin, vmax=dmax)
plotting.mapOverlay.overlayRadar(map_handle, ids=scan[0].stid,
dateTime=scan[0].time, annotate=True,
fontSize=16)
# Add the data to each field-of-view
bi, si = np.where(fan_fov != 0)
verts = list()
vals = np.ones(shape=bi.shape, dtype=float) * np.nan
for ii,bb in enumerate(bi):
# Get the field-of-view
ff = fovs[fan_fov[bb,si[ii]]]
# Get the polygon vertices
x1, y1 = map_handle(ff.lonFull[bb, si[ii]], ff.latFull[bb, si[ii]])
x2, y2 = map_handle(ff.lonFull[bb, si[ii]+1], ff.latFull[bb, si[ii]+1])
x3, y3 = map_handle(ff.lonFull[bb+1, si[ii]+1],
ff.latFull[bb+1, si[ii]+1])
x4, y4 = map_handle(ff.lonFull[bb+1, si[ii]], ff.latFull[bb+1, si[ii]])
verts.append(((x1,y1),(x2,y2),(x3,y3),(x4,y4),(x1,y1)))
# Assign the data
vals[ii] = (fan_data[bb,si[ii]] if(fan_data[bb,si[ii]] < dmax and
fan_data[bb,si[ii]] > dmin)
else (dmin if fan_data[bb,si[ii]] <= dmin else dmax))
# Overlay the data over the fields-of-view
if len(verts) > 0:
inx = np.arange(len(verts))
pcoll = mcol.PolyCollection(np.array(verts)[inx], edgecolors='face',
linewidths=0, closed=False, zorder=4,
cmap=cm.get_cmap(dcolor), norm=norm)
pcoll.set_array(vals[inx])
ax.add_collection(pcoll, autolim=True)
if hasattr(scan[0], "scan_time"):
ax.set_title("{:}".format(scan[0].scan_time), fontsize="medium")
else:
ax.set_title("{:}".format(scan[0].time), fontsize="medium")
if draw:
# Draw to screen.
if plt.isinteractive():
plt.draw() #In interactive mode, you just "draw".
else:
# W/o interactive mode, "show" stops the user from typing more
# at the terminal until plots are drawn.
plt.show()
# Return
return(map_handle, fovs, hard, pcoll)
| gpl-3.0 |
Shinichi-Nakagawa/no-ball-db-server | site-cookbooks/sean_lahman/files/default/script/sabr_metrics.py | 2 | 4890 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Shinichi Nakagawa'
from script.tables import Team
class SabrMetrics(object):
OUTPUT_DATA_TYPE_JSON = 'json'
OUTPUT_DATA_TYPE_FLAME = 'flame'
def __init__(self, session=None):
# パスとか設定
self.session = session
def get_pytagorian__filter_by_league(self, year, lg, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める(リーグ指定)
:param year: year(required)
:param lg: league(required)
:param data_type: output data type(default:json)
:return:
"""
return self._get_pytagorian(
self.session.query(Team).filter(
and_(
Team.yearID == year,
Team.lgID == lg,
)
)
)
def get_pytagorian__filter_by_division(self, year, lg, div, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める(地区指定)
:param year: year(required)
:param lg: league(required)
:param div: division(required)
:param data_type: output data type(default:json)
:return:
"""
return self._get_pytagorian(
self.session.query(Team).filter(
and_(
Team.yearID == year,
Team.lgID == lg,
Team.divID == div
)
)
)
def get_pytagorian__filter_by_team(self, year, lg, team, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める(チーム指定)
:param year: year(required)
:param lg: league(required)
:param team: team name(required)
:param data_type: output data type(default:json)
:return:
"""
return self._get_pytagorian(
self.session.query(Team).filter(
and_(
Team.yearID == year,
Team.lgID == lg,
Team.teamID == team,
)
)
)
def _get_pytagorian(self, query, data_type=OUTPUT_DATA_TYPE_JSON):
"""
ピタゴラス勝率を求める
:param query: query object(required)
:param data_type: output data type(default:json)
:return:
"""
values = []
for row in query.order_by(
Team.yearID.asc(),
Team.lgID.asc(),
Team.divID.asc(),
Team.Rank.asc()
).all():
# チーム基本情報
values.append(
{
'year': row.yearID,
'team': row.teamID,
'W': row.W,
'L': row.L,
'R': row.R,
'ER': row.ER,
'pytagorian': SabrMetrics._calc_pytagorian(row.R, row.ER),
'win_percent': SabrMetrics._calc_win_percent(row.G, row.W),
}
)
if data_type == SabrMetrics.OUTPUT_DATA_TYPE_JSON:
return values
elif data_type == SabrMetrics.OUTPUT_DATA_TYPE_FLAME:
return []
else:
return values
@classmethod
def _calc_win_percent(cls, g, w):
"""
勝率計算
:param g: game
:param w: win
:return:
"""
return w / g
@classmethod
def _calc_pytagorian(cls, r, er):
"""
ピタゴラス勝率計算
:param r: 得点
:param er: 失点
:return: ピタゴラス勝率(float)
"""
return (r ** 2) / ((r ** 2) + (er ** 2))
from sqlalchemy import *
from sqlalchemy.orm import *
from script.database_config import CONNECTION_TEXT, ENCODING
import matplotlib.pyplot as plt
def main():
engine = create_engine(CONNECTION_TEXT, encoding=ENCODING)
Session = sessionmaker(bind=engine, autoflush=True)
Session.configure(bind=engine)
lh = SabrMetrics(session=Session())
values = lh.get_pytagorian__filter_by_league(2013, 'AL')
print(values)
x, y, labels = [], [], []
for value in values:
x.append(value['win_percent'])
y.append(value['pytagorian'])
labels.append({'x': value['win_percent']-0.01, 'y':value['pytagorian'], 'text': "{team}".format(**value)})
print(x)
print(y)
plt.title('Pytagorean expectation & Winning percentage')
plt.xlabel('Winning percentage')
plt.ylabel('Pythagorean expectation')
for label in labels:
plt.text(label['x'], label['y'], label['text'])
plt.plot(x, y, 'o')
plt.show()
# values = lh.get_pytagorian__filter_by_division(2013, 'NL', 'W')
# print(values)
# values = lh.get_pytagorian__filter_by_team(2011, 'AL', 'OAK')
# print(values)
if __name__ == '__main__':
main() | mit |
ltiao/scikit-learn | sklearn/linear_model/randomized_l1.py | 18 | 23449 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
timpalpant/KaggleTSTextClassification | scripts/prepare_features.py | 1 | 11216 | '''
Prepare features from "raw" data for predictors
(note the "raw" data must first be converted to npz
using csv_to_npz.py)
@author Timothy Palpant <[email protected]>
@date October 18, 2014
'''
import os, logging, gc
import cPickle as pickle
import numpy as np
from scipy import sparse
from sklearn.utils import murmurhash3_32
class EncodingCache(object):
'''Cache the encoding materializations we have generated before'''
cachedir = '/Users/timpalpant/Documents/Workspace/kaggle/TextClassification/data/materializations/'
enabled = True
@classmethod
def get(cls, encoder, data, indices):
# Check if there is a saved copy of this encoding
if cls.contains(encoder, data, indices):
return cls.cache_get(encoder, data, indices)
enc = encoder()
X = enc.prepare(data, indices)
if cls.enabled:
cls.put(encoder, data, indices, enc, X)
return enc, X
@classmethod
def cache_get(cls, encoder, data, indices):
logging.info("Loading encoded materialization from cache")
pkl, npz = cls.hash(encoder, data, indices)
with open(pkl, 'r') as fd:
enc = pickle.load(fd)
loader = np.load(npz)
try: # reconstruct sparse arrays
X = sparse.csr_matrix((loader['data'], loader['indices'], loader['indptr']),
shape=loader['shape'])
except: # dense arrays
X = loader['X']
return enc, X
@classmethod
def contains(cls, encoder, data, indices):
pkl, npz = cls.hash(encoder, data, indices)
return os.path.isfile(pkl)
@classmethod
def put(cls, encoder, data, indices, enc, X):
logging.info("Saving encoded materialization to cache")
pkl, npz = cls.hash(encoder, data, indices)
with open(pkl, 'w') as fd:
pickle.dump(enc, fd, pickle.HIGHEST_PROTOCOL)
try: # sparse arrays
np.savez(npz, data=X.data, indices=X.indices,
indptr=X.indptr, shape=X.shape)
except: # dense arrays
np.savez(npz, X=X)
@classmethod
def hash(cls, encoder, data, indices):
npzname = os.path.abspath(data.fid.name)
ih = None
if indices is not None:
tmp = indices.flags.writeable
indices.flags.writeable = False
ih = hash(indices.data)
indices.flags.writeable = tmp
h = str(abs(hash((str(encoder), npzname, ih))))
pkl = cls.cachedir+h+'.pkl'
npz = cls.cachedir+h+'.npz'
return pkl, npz
class TSFeatureEncoder(object):
'''
Takes the "raw" features from npz files
and performs various encoding / engineering operations.
If indices are provided, they should represent a subset
of the rows in the feature matrix (i.e. for cross-validation)
'''
pass
class TSRawEncoder(TSFeatureEncoder):
'''Just return all of the "raw" features, no processing'''
def prepare(self, features, indices=None):
logging.info("Preparing raw feature matrix")
bfs = features['bfeatures']
ffs = features['ffeatures']
ifs = features['ifeatures']
sfs = features['sfeatures']
if indices is not None:
bfs = bfs[indices]
ffs = ffs[indices]
ifs = ifs[indices]
sfs = sfs[indices]
X = np.hstack((bfs, ffs, ifs, sfs))
del bfs, ffs, ifs, sfs
return X
class TSOneHotAllEncoder(TSFeatureEncoder):
'''one-hot encode everything exceeding frequency cutoff'''
freq_cutoff = 5
float_decimals = 2
def __init__(self):
# The first time prepare is called, it will make a new encoder.
# Subsequent calls will re-use this encoder.
self.encoder = None
def prepare(self, features, indices=None, dtype=float):
logging.info("One-hot encoding all features")
bfs = features['bfeatures']
ffs = features['ffeatures']
ifs = features['ifeatures']
sfs = features['sfeatures']
if indices is not None:
bfs = bfs[indices]
ffs = ffs[indices]
ifs = ifs[indices]
sfs = sfs[indices]
X = np.hstack((bfs, ffs, ifs, sfs))
del bfs, ffs, ifs, sfs
if self.encoder is None:
self.encoder = OneHotEncoder()
self.encoder.fit(X, self.freq_cutoff)
X = self.encoder.transform(X, dtype)
return X
class TSOneHotHashingEncoder(TSFeatureEncoder):
'''one-hot encode everything with hashing trick'''
D = 2 ** 20
float_decimals = 2
def prepare(self, features, indices=None, dtype=float):
logging.info("One-hot hashing all features")
bfs = features['bfeatures']
ffs = features['ffeatures']
ifs = features['ifeatures']
sfs = features['sfeatures']
if indices is not None:
bfs = bfs[indices]
ffs = ffs[indices]
ifs = ifs[indices]
sfs = sfs[indices]
X = np.hstack((bfs, ffs, ifs, sfs))
del bfs, ffs, ifs, sfs
nrows = X.shape[0]
ncols = X.shape[1]
ij = np.zeros((2, nrows*ncols), dtype=int) # row, col indices
for i, row in enumerate(X):
if i % 100000 == 0: logging.debug(i)
start = i * ncols
end = (i+1) * ncols
ij[0,start:end] = i
for j, x in enumerate(row):
ij[1,start+j] = murmurhash3_32('%d_%s' % (j,x), seed=42, positive=True) % self.D
data = np.ones(ij.shape[1], dtype=dtype) # all ones
X = sparse.csr_matrix((data, ij), shape=(nrows, self.D), dtype=dtype)
return X
class TSOneHotHashingStringPairsEncoder(TSOneHotHashingEncoder):
'''one-hot encode everything with hashing trick, plus pairs of string features'''
def prepare(self, features, indices=None, dtype=float):
X1 = super(TSOneHotHashingStringPairsEncoder, self).prepare(features, indices)
logging.info("One-hot hashing pairs of string features")
sfs = features['sfeatures']
if indices is not None:
sfs = sfs[indices]
nrows = sfs.shape[0]
ncols = sfs.shape[1]*(sfs.shape[1]-1) / 2
ij = np.zeros((2, nrows*ncols), dtype=int) # row, col indices
for i, row in enumerate(sfs):
if i % 100000 == 0: logging.debug(i)
start = i * ncols
end = (i+1) * ncols
ij[0,start:end] = i
ij[1,start:end] = [murmurhash3_32('%d_%s_x_%d_%s' % (j1,x1,j2,row[j2]),
seed=42, positive=True) % self.D
for j1, x1 in enumerate(row)
for j2 in xrange(j1)]
data = np.ones(ij.shape[1], dtype=dtype) # all ones
X2 = sparse.csr_matrix((data, ij), shape=(nrows, self.D), dtype=dtype)
X = X1 + X2
X.data[X.data > 1] = 1
return X
class TSOneHotHashingPairsEncoder(TSOneHotHashingEncoder):
'''
one-hot encode everything with hashing trick,
plus pairs of string and boolean features
'''
def prepare(self, features, indices=None, dtype=float):
X1 = super(TSOneHotHashingPairsEncoder, self).prepare(features, indices)
logging.info("One-hot hashing pairs of string and boolean features")
sfs = features['sfeatures']
bfs = features['bfeatures']
if indices is not None:
sfs = sfs[indices]
bfs = bfs[indices]
X = np.hstack((sfs, bfs))
del sfs, bfs
nrows = X.shape[0]
ncols = X.shape[1]*(X.shape[1]-1) / 2
ij = np.zeros((2, nrows*ncols), dtype=int) # row, col indices
for i, row in enumerate(X):
if i % 100000 == 0: logging.debug(i)
start = i * ncols
end = (i+1) * ncols
ij[0,start:end] = i
ij[1,start:end] = [murmurhash3_32('%d_%s_x_%d_%s' % (j1,x1,j2,row[j2]),
seed=42, positive=True) % self.D
for j1, x1 in enumerate(row)
for j2 in xrange(j1)]
data = np.ones(ij.shape[1], dtype=dtype) # all ones
X2 = sparse.csr_matrix((data, ij), shape=(nrows, self.D), dtype=dtype)
X = X1 + X2
X.data[X.data > 1] = 1
return X
class OneHotEncoder(object):
'''
will transform categorical feature X into one-hot encoded features X_hot
I tried to use sklearn's OneHotEncoder, but it doesn't offer a
good way to reapply the same encoding to the test data if the
test data contains new (never seen) values. There's an open ticket
to address this.
'''
def fit(self, X, freq_cutoff=0):
'''
Fit encoder to values in @X having frequency > @freq_cutoff
'''
logging.debug("Making one-hot encoder for %dx%d feature matrix" % X.shape)
self.value_to_col = []
offset = 0
for i, x in enumerate(X.T):
logging.debug("processing column %d" % i)
values = self.unique_values(x, freq_cutoff)
d = {v: j+offset for j, v in enumerate(values)}
offset += len(d)
self.value_to_col.append(d)
self.ncols = offset + len(self.value_to_col)
def transform(self, X, dtype=np.float64):
'''
Apply encoder to values in @X.
Returns a sparse boolean matrix.
'''
# Make a sparse boolean matrix with one-hot encoded features
# one column for each categorical value of each colum in @X
# plus one column to signify 'other' for each column of @X
nrows = X.shape[0]
ncols = X.shape[1]
logging.debug("Making %dx%d one-hot matrix" % (nrows, self.ncols))
i = np.zeros(nrows*ncols, dtype=np.uint32)
j = np.zeros(nrows*ncols, dtype=np.uint32)
data = np.ones(i.shape[0], dtype=np.uint8) # all ones
for k in xrange(nrows): # for each data row in original matrix
if k % 100000 == 0:
gc.collect()
logging.debug(k)
start = k * ncols
end = (k+1) * ncols
i[start:end] = k # set row indices to current row index
for l in xrange(ncols):
j[start+l] = self.value_to_col[l].get(X[k,l], ncols-l-1)
X_hot = sparse.csr_matrix((data, (i,j)), shape=(nrows, self.ncols), dtype=dtype)
return X_hot
def unique_values(self, x, freq_cutoff=0):
'''
Return unique values in @x havingn frequency > cutoff
'''
# sort values by frequency - note scipy.stats.itemfreq is much slower
values, inv = np.unique(x, return_inverse=True)
freq = np.bincount(inv)
logging.debug("%d unique features" % len(values))
idx = np.argsort(freq)[::-1]
values = values[idx]
freq = freq[idx]
if freq_cutoff is not None:
values = values[freq > freq_cutoff]
logging.debug("%d features retained" % len(values))
return values | gpl-3.0 |
deepmind/dm_alchemy | setup.py | 1 | 4824 | # Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Install script for setuptools."""
from distutils import cmd
import imp
import os
import pkg_resources
from setuptools import find_namespace_packages
from setuptools import setup
from setuptools.command.build_ext import build_ext
from setuptools.command.build_py import build_py
_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Tuple of proto message definitions to build Python bindings for. Paths must
# be relative to root directory.
_DM_ALCHEMY_PROTOS = (
'dm_alchemy/protos/alchemy.proto',
'dm_alchemy/protos/trial.proto',
'dm_alchemy/protos/color_info.proto',
'dm_alchemy/protos/unity_types.proto',
'dm_alchemy/protos/episode_info.proto',
'dm_alchemy/protos/events.proto',
'dm_alchemy/protos/hypercube.proto',
'dm_alchemy/encode/chemistries.proto',
'dm_alchemy/encode/symbolic_actions.proto',
'dm_alchemy/encode/precomputed_maps.proto')
class _GenerateProtoFiles(cmd.Command):
"""Command to generate protobuf bindings for dm_alchemy."""
descriptions = 'Generates Python protobuf bindings.'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
# Import grpc_tools here, after setuptools has installed setup_requires
# dependencies.
from grpc_tools import protoc # pylint: disable=g-import-not-at-top
grpc_protos_include = pkg_resources.resource_filename(
'grpc_tools', '_proto')
for proto_path in _DM_ALCHEMY_PROTOS:
proto_args = [
'grpc_tools.protoc',
'--proto_path={}'.format(grpc_protos_include),
'--proto_path={}'.format(_ROOT_DIR),
'--python_out={}'.format(_ROOT_DIR),
'--grpc_python_out={}'.format(_ROOT_DIR),
os.path.join(_ROOT_DIR, proto_path),
]
if protoc.main(proto_args) != 0:
raise RuntimeError('ERROR: {}'.format(proto_args))
class _BuildExt(build_ext):
"""Generate protobuf bindings in build_ext stage."""
def run(self):
self.run_command('generate_protos')
build_ext.run(self)
class _BuildPy(build_py):
"""Generate protobuf bindings in build_py stage."""
def run(self):
self.run_command('generate_protos')
build_py.run(self)
setup(
name='dm-alchemy',
version=imp.load_source('_version',
'dm_alchemy/_version.py').__version__,
description=('DeepMind Alchemy environment, a meta-reinforcement learning'
'benchmark environment for deep RL agents.'),
author='DeepMind',
license='Apache License, Version 2.0',
keywords='reinforcement-learning python machine learning',
packages=find_namespace_packages(exclude=['examples']),
package_data={
'dm_alchemy.encode': ['*.proto'],
'dm_alchemy.protos': ['*.proto'],
'dm_alchemy.chemistries': ['**/**'],
'dm_alchemy.ideal_observer.data': ['**/**'],
'dm_alchemy.agent_events': ['**'],
},
install_requires=[
'absl-py',
'dataclasses',
'dm-env',
'dm-env-rpc>=1.0.4',
'dm-tree',
'docker',
'grpcio',
'numpy',
'scipy>=1.4.0',
'portpicker',
],
tests_require=['nose'],
python_requires='>=3.6.1',
setup_requires=['grpcio-tools'],
extras_require={
'examples': [
'pygame',
'ipykernel',
'matplotlib',
'seaborn',
]},
cmdclass={
'build_ext': _BuildExt,
'build_py': _BuildPy,
'generate_protos': _GenerateProtoFiles,
},
test_suite='nose.collector',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
)
| apache-2.0 |
maciejkula/scipy | scipy/stats/_distn_infrastructure.py | 1 | 112850 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy.lib.six import string_types, exec_
import sys
import keyword
import re
import inspect
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy.lib._util import check_random_state
from scipy.special import (comb, chndtr, gammaln, hyp0f1,
entr, kl_div)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, sum, shape,
product, reshape, zeros, floor, logical_and, log, sqrt, exp,
ndarray)
from numpy import (place, any, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _EPS, _XMAX
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'parameters': """\nParameters\n---------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(x, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative density function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative density function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of sf).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = """
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis.
Default is 'mv'.
"""
_doc_default_longsummary = """\
Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, freeze the distribution and display the frozen pdf:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'callparams': _doc_default_callparams,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'Continuous', 'Discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._ctor_param)
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None,
conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
if kwds:
raise ValueError("Discrete expect does not accept **kwds.")
return self.dist.expect(func, a, loc, lb, ub, conditional)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def valarray(shape, value=nan, typecode=None):
"""Return an array of all value.
"""
out = ones(shape, dtype=bool) * value
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
def _lazywhere(cond, arrays, f, fillvalue=None, f2=None):
"""
np.where(cond, x, fillvalue) always evaluates x even where cond is False.
This one only evaluates f(arr1[cond], arr2[cond], ...).
For example,
>>> a, b = np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8])
>>> def f(a, b):
return a*b
>>> _lazywhere(a > 2, (a, b), f, np.nan)
array([ nan, nan, 21., 32.])
Notice it assumes that all `arrays` are of the same shape, or can be
broadcasted together.
"""
if fillvalue is None:
if f2 is None:
raise ValueError("One of (fillvalue, f2) must be given.")
else:
fillvalue = np.nan
else:
if f2 is not None:
raise ValueError("Only one of (fillvalue, f2) can be given.")
arrays = np.broadcast_arrays(*arrays)
temp = tuple(np.extract(cond, arr) for arr in arrays)
out = valarray(shape(arrays[0]), value=fillvalue)
np.place(out, cond, f(*temp))
if f2 is not None:
temp = tuple(np.extract(~cond, arr) for arr in arrays)
np.place(out, ~cond, f2(*temp))
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return (%(shape_arg_str)s), %(locscale_out)s, size
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*log(x) - a*log(2) - gammaln(a)
return fac + np.nan_to_num(log(hyp0f1(a, nc * x/4.0)))
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = inspect.getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments
shapes_list = []
for meth in meths_to_inspect:
shapes_args = inspect.getargspec(meth)
shapes_list.append(shapes_args.args)
# *args or **kwargs are not allowed w/automatic shapes
# (generic methods have 'self, x' only)
if len(shapes_args.args) > 2:
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
shapes = max(shapes_list, key=lambda x: len(x))
shapes = shapes[2:] # remove self, x,
# make sure the signatures are consistent
# (generic methods have 'self, x' only)
for item in shapes_list:
if len(item) > 2 and item[2:] != shapes:
raise TypeError('Shape arguments are inconsistent.')
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join(str(_) for _ in shapes_vals)
tempdict['vals'] = vals
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates
If None, rely on self.random_state
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = np.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = rndm
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if np.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (discrete RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# I don't know when or why vecentropy got broken when numargs == 0
# 09.08.2013: is this still relevant? cf check_vecentropy test
# in tests/test_continuous_basic.py
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Methods
-------
``rvs(<shape(s)>, loc=0, scale=1, size=1)``
random variates
``pdf(x, <shape(s)>, loc=0, scale=1)``
probability density function
``logpdf(x, <shape(s)>, loc=0, scale=1)``
log of the probability density function
``cdf(x, <shape(s)>, loc=0, scale=1)``
cumulative density function
``logcdf(x, <shape(s)>, loc=0, scale=1)``
log of the cumulative density function
``sf(x, <shape(s)>, loc=0, scale=1)``
survival function (1-cdf --- sometimes more accurate)
``logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``ppf(q, <shape(s)>, loc=0, scale=1)``
percent point function (inverse of cdf --- quantiles)
``isf(q, <shape(s)>, loc=0, scale=1)``
inverse survival function (inverse of sf)
``moment(n, <shape(s)>, loc=0, scale=1)``
non-central n-th moment of the distribution. May not work for array
arguments.
``stats(<shape(s)>, loc=0, scale=1, moments='mv')``
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
``entropy(<shape(s)>, loc=0, scale=1)``
(differential) entropy of the RV.
``fit(data, <shape(s)>, loc=0, scale=1)``
Parameter estimates for generic data
``expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``median(<shape(s)>, loc=0, scale=1)``
Median of the distribution.
``mean(<shape(s)>, loc=0, scale=1)``
Mean of the distribution.
``std(<shape(s)>, loc=0, scale=1)``
Standard deviation of the distribution.
``var(<shape(s)>, loc=0, scale=1)``
Variance of the distribution.
``interval(alpha, <shape(s)>, loc=0, scale=1)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``__call__(<shape(s)>, loc=0, scale=1)``
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf(self, x):
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self._size = 1
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
# backwards compat. these were removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vecfunc = np.deprecate(self._ppfvec, "vecfunc")
self.veccdf = np.deprecate(self._cdfvec, "veccdf")
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args), axis=0)
def nnlf(self, theta, x):
'''Return negative loglikelihood function
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where theta are the
parameters (including loc and scale).
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (self.b <= x)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N * log(scale)
def _penalized_nnlf(self, theta, x):
''' Return negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0)
where theta are the parameters (including loc and scale)
'''
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
loginf = log(_XMAX)
if np.isneginf(self.a).all() and np.isinf(self.b).all():
Nbad = 0
else:
cond0 = (x <= self.a) | (self.b <= x)
Nbad = sum(cond0)
if Nbad > 0:
x = argsreduce(~cond0, x)[0]
N = len(x)
return self._nnlf(x, *args) + N*log(scale) + Nbad * 100.0 * loginf
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = list(range(Nargs))
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if key in kwds:
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead.
"""
return self.fit_loc_scale(data, *args)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
args, loc, scale = self._parse_args(*args, **kwds)
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
# np.vectorize doesn't work when numargs == 0 in numpy 1.6.2. Once the
# lowest supported numpy version is >= 1.7.0, this special case can be
# removed (see gh-4314).
if self.numargs == 0:
place(output, cond0, self._entropy() + log(scale))
else:
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
## Handlers for generic case where xk and pk are given
## The _drv prefix probably means discrete random variable.
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk > xk), axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals >= q), axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
# many changes, originally not even a return
tot = 0.0
diff = 1e100
# pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
# handle cases with infinite support
ulimit = max(1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
llimit = min(-1000, (min(self.b, 1000) + max(self.a, -1000))/2.0)
while (pos <= self.b) and ((pos <= ulimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# use pmf because _pmf does not check support in randint and there
# might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos, *args)
# using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
vec = kl_div(pk, qk)
S = sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or np.random.RandomState instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance
Default is None.
Methods
-------
``generic.rvs(<shape(s)>, loc=0, size=1)``
random variates
``generic.pmf(x, <shape(s)>, loc=0)``
probability mass function
``logpmf(x, <shape(s)>, loc=0)``
log of the probability density function
``generic.cdf(x, <shape(s)>, loc=0)``
cumulative density function
``generic.logcdf(x, <shape(s)>, loc=0)``
log of the cumulative density function
``generic.sf(x, <shape(s)>, loc=0)``
survival function (1-cdf --- sometimes more accurate)
``generic.logsf(x, <shape(s)>, loc=0, scale=1)``
log of the survival function
``generic.ppf(q, <shape(s)>, loc=0)``
percent point function (inverse of cdf --- percentiles)
``generic.isf(q, <shape(s)>, loc=0)``
inverse survival function (inverse of sf)
``generic.moment(n, <shape(s)>, loc=0)``
non-central n-th moment of the distribution. May not work for array
arguments.
``generic.stats(<shape(s)>, loc=0, moments='mv')``
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
``generic.entropy(<shape(s)>, loc=0)``
entropy of the RV
``generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
``generic.median(<shape(s)>, loc=0)``
Median of the distribution.
``generic.mean(<shape(s)>, loc=0)``
Mean of the distribution.
``generic.std(<shape(s)>, loc=0)``
Standard deviation of the distribution.
``generic.var(<shape(s)>, loc=0)``
Variance of the distribution.
``generic.interval(alpha, <shape(s)>, loc=0)``
Interval that with `alpha` percent probability contains a random
realization of this distribution.
``generic(<shape(s)>, loc=0)``
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_discrete):
# "Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, the `shapes` will be automatically deduced from the signatures of the
overridden methods.
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk), indx, 0)
self.pk = take(ravel(self.pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = dict(zip(self.xk, self.pk))
self.qvals = np.cumsum(self.pk, axis=0)
self.F = dict(zip(self.xk, self.qvals))
decreasing_keys = sorted(self.F.keys(), reverse=True)
self.Finv = dict((self.F[k], k) for k in decreasing_keys)
self._ppf = instancemethod(vectorize(_drv_ppf, otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(vectorize(_drv_pmf, otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(vectorize(_drv_cdf, otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self._construct_argparser(meths_to_inspect=[_drv_pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
else:
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# backwards compat. was removed in 0.14.0, put back but
# deprecated in 0.14.1:
self.vec_generic_moment = np.deprecate(_vec_generic_moment,
"vec_generic_moment",
"generic_moment")
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc)
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
#discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.' % longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict_discrete)
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or np.random.RandomState instance, optional
If int or RandomState, use it for drawing the random variates
If None, rely on self.random_state
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as ``1 - cdf``,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments': 'm'}))
val = self.pmf(mu, *args)
ent = entr(val)
k = 1
term = 1.0
while (abs(term) > _EPS):
val = self.pmf(mu+k, *args)
term = entr(val)
val = self.pmf(mu-k, *args)
term += entr(val)
k += 1
ent += term
return ent
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
lb, ub : numbers, optional
lower and upper bound for integration, default is set to the
support of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : bool, optional
Default is False.
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expect : float
Expected value.
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to
evaluate could be added as keyword parameter, to evaluate functions
with non-monotonic shapes, points include integers in (-suppnmin,
suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative
integers are evaluated)
"""
# moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
# avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 # minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
# work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1, *args)
else:
invfac = 1 - self.cdf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) # check limits
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
# handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: # handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while ((pos >= lb) and (diff > self.moment_tol) and
count <= maxcount):
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot/invfac
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| bsd-3-clause |
Titan-C/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
QUANTAXIS/QUANTAXIS | QUANTAXIS/QAApplication/QAAnalysis.py | 2 | 9291 | # Encoding:UTF-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Analysis Center for Backtest
we will give some function
"""
import math
import sys
import numpy
import pandas as pd
from QUANTAXIS.QAFetch.QAQuery import QA_fetch_stock_day
from QUANTAXIS.QAUtil import QA_util_log_info, trade_date_sse
def QA_backtest_analysis_backtest(client, code_list, assets_d, account_days, message, total_date, benchmark_data):
# 主要要从message_history分析
# 1.收益率
# 2.胜率
# 3.回撤
"""
Annualized Returns: 策略年化收益率。表示投资期限为一年的预期收益率。
具体计算方式为 (策略最终价值 / 策略初始价值)^(250 / 回测交易日数量) - 1
Alpha:阿尔法
具体计算方式为 (策略年化收益 - 无风险收益) - beta × (参考标准年化收益 - 无风险收益),这里的无风险收益指的是中国固定利率国债收益率曲线上10年期国债的年化到期收益率。
Beta:贝塔
具体计算方法为 策略每日收益与参考标准每日收益的协方差 / 参考标准每日收益的方差 。
Sharpe Ratio:夏普比率。表示每承受一单位总风险,会产生多少的超额报酬。
具体计算方法为 (策略年化收益率 - 回测起始交易日的无风险利率) / 策略收益波动率 。
Volatility:策略收益波动率。用来测量资产的风险性。
具体计算方法为 策略每日收益的年化标准差 。
Information Ratio:信息比率。衡量超额风险带来的超额收益。
具体计算方法为 (策略每日收益 - 参考标准每日收益)的年化均值 / 年化标准差 。
Max Drawdown:最大回撤。描述策略可能出现的最糟糕的情况。
具体计算方法为 max(1 - 策略当日价值 / 当日之前虚拟账户最高价值)
单次交易收益
收益/次数的频次直方图
单日最大持仓
"""
# 数据检查
if (len(benchmark_data)) < 1:
QA_util_log_info('Wrong with benchmark data ! ')
sys.exit()
# 计算一个benchmark
# 这个benchmark 是在开始的那天 市价买入和策略所选标的一致的所有股票,然后一直持仓
data = pd.concat([pd.DataFrame(message['body']['account']['history'],
columns=['time', 'code', 'price', 'towards', 'amount', 'order_id', 'trade_id', 'commission']),
pd.DataFrame(message['body']['account']['assets'], columns=['assets'])], axis=1)
data['time'] = pd.to_datetime(data['time'], utc=False)
data.set_index('time', drop=False, inplace=True)
trade_history = message['body']['account']['history']
cash = message['body']['account']['cash']
assets = message['body']['account']['assets']
#assets_= data.resample('D').last().dropna()
# 计算交易日
trade_date = account_days
# benchmark资产
benchmark_assets = QA_backtest_calc_benchmark(
benchmark_data, assets[0])
# d2=pd.concat([data.resample('D').last(),pd.DataFrame(benchmark_assets,columns=['benchmark'])])
# benchmark年化收益
benchmark_annualized_returns = QA_backtest_calc_profit_per_year(
benchmark_assets, len(total_date))
# 计算账户的收益
# days=len(assest_history)-1
# 策略年化收益
annualized_returns = QA_backtest_calc_profit_per_year(
assets_d, len(total_date))
# 收益矩阵
assest_profit = QA_backtest_calc_profit_matrix(assets)
benchmark_profit = QA_backtest_calc_profit_matrix(benchmark_assets)
# 策略日收益
profit_day = QA_backtest_calc_profit_matrix(assets_d)
# 胜率
win_rate = QA_backtest_calc_win_rate(assest_profit)
# 日胜率
win_rate_day = QA_backtest_calc_win_rate(profit_day)
# 年化波动率
volatility_year = QA_backtest_calc_volatility(profit_day)
benchmark_volatility_year = QA_backtest_calc_volatility(benchmark_profit)
# 夏普比率
sharpe = QA_backtest_calc_sharpe(
annualized_returns, 0.05, volatility_year)
# 最大回撤
max_drop = QA_backtest_calc_dropback_max(assets_d)
# 计算beta
beta = QA_backtest_calc_beta(profit_day, benchmark_profit)
# 计算Alpha
alpha = QA_backtest_calc_alpha(
annualized_returns, benchmark_annualized_returns, beta, 0.05)
message = {
'code': code_list,
'annualized_returns': annualized_returns,
'benchmark_annualized_returns': benchmark_annualized_returns,
'assets': assets_d[1:],
'benchmark_assets': benchmark_assets[1:],
'vol': volatility_year,
'benchmark_vol': benchmark_volatility_year,
'sharpe': sharpe,
'alpha': alpha,
'beta': beta,
'total_date': total_date,
'trade_date': trade_date,
'max_drop': max_drop,
'win_rate': win_rate}
return message
def QA_backtest_calc_assets(trade_history, assets):
assets_d = []
trade_date = []
for i in range(0, len(trade_history), 1):
if trade_history[i][0] not in trade_date:
trade_date.append(trade_history[i][0])
assets_d.append(assets[i])
else:
assets_d.pop(-1)
assets_d.append(assets[i])
return assets_d
def QA_backtest_calc_benchmark(benchmark_data, init_assets):
return list(benchmark_data['close'] / float(benchmark_data['open'][0]) * float(init_assets))
def QA_backtest_calc_alpha(annualized_returns, benchmark_annualized_returns, beta, r):
alpha = (annualized_returns - r) - (beta) * \
(benchmark_annualized_returns - r)
return alpha
def QA_backtest_calc_beta(assest_profit, benchmark_profit):
if len(assest_profit) < len(benchmark_profit):
for i in range(0, len(benchmark_profit) - len(assest_profit), 1):
assest_profit.append(0)
elif len(assest_profit) > len(benchmark_profit):
for i in range(0, len(assest_profit) - len(benchmark_profit), 1):
benchmark_profit.append(0)
calc_cov = numpy.cov(assest_profit, benchmark_profit)
beta = calc_cov[0, 1] / calc_cov[1, 1]
return beta
def QA_backtest_calc_profit(assest_history):
return (assest_history[-1] / assest_history[1]) - 1
def QA_backtest_calc_profit_per_year(assest_history, days):
return math.pow(float(assest_history[-1]) / float(assest_history[0]), 250.0 / float(days)) - 1.0
def QA_backtest_calc_profit_matrix(assest_history):
assest_profit = []
if len(assest_history) > 1:
assest_profit = [assest_history[i + 1] / assest_history[i] -
1.0 for i in range(len(assest_history) - 1)]
return assest_profit
def QA_backtest_calc_volatility(assest_profit_matrix):
# 策略每日收益的年化标准差
assest_profit = assest_profit_matrix
volatility_day = numpy.std(assest_profit)
volatility_year = volatility_day * math.sqrt(250)
return volatility_year
def QA_backtest_calc_dropback_max(history):
drops = []
for i in range(1, len(history), 1):
maxs = max(history[:i])
cur = history[i - 1]
drop = 1 - cur / maxs
drops.append(drop)
max_drop = max(drops)
return max_drop
def QA_backtest_calc_sharpe(annualized_returns, r, volatility_year):
'计算夏普比率'
return (annualized_returns - r) / volatility_year
def QA_backtest_calc_trade_date(history):
'计算交易日期'
trade_date = []
# trade_date_sse.index(history[-1][0])-trade_date_sse.index(history[0][0])
for i in range(0, len(history), 1):
if history[i][0] not in trade_date:
trade_date.append(history[i][0])
return trade_date
def calc_trade_time(history):
return len(history)
def calc_every_pnl(detail):
pass
def QA_backtest_calc_win_rate(profit_day):
# 大于0的次数
abovez = 0
belowz = 0
for i in range(0, len(profit_day) - 1, 1):
if profit_day[i] > 0:
abovez = abovez + 1
elif profit_day[i] < 0:
belowz = belowz + 1
if belowz == 0:
belowz = 1
if abovez == 0:
abovez = 1
win_rate = abovez / (abovez + belowz)
return win_rate
| mit |
jskDr/jamespy_py3 | jutil.py | 1 | 50740 | """
some utility which I made.
Editor - Sungjin Kim, 2015-4-17
"""
#Common library
from sklearn import linear_model, svm, model_selection, metrics
import matplotlib.pyplot as plt
import numpy as np
import time
#import subprocess
import pandas as pd
import itertools
import random
#My personal library
import jchem
import j3x.jpyx
from maml.gp import gaussian_process as gp
def _sleast_r0( a = '1000', ln = 10):
"It returns 0 filled string with the length of ln."
if ln > len(a):
return '0'*(ln - len(a)) + a
else:
return a[-ln:]
def sleast( a = '1000', ln = 10):
"It returns 0 filled string with the length of ln."
if ln > len(a):
return a + '0'*(ln - len(a))
else:
return a
def int_bp( b_ch):
"map '0' --> -1, '1' --> -1"
b_int = int( b_ch)
return 1 - 2 * b_int
def prange( pat, st, ed, ic=1):
ar = []
for ii in range( st, ed, ic):
ar.extend( [ii + jj for jj in pat])
return [x for x in ar if x < ed]
class Timer:
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
print(( 'Elapsed time: {}sec'.format(self.interval)))
def mlr( RM, yE, disp = True, graph = True):
clf = linear_model.LinearRegression()
clf.fit( RM, yE)
mlr_show( clf, RM, yE, disp = disp, graph = graph)
def mlr3( RM, yE, disp = True, graph = True):
clf = linear_model.LinearRegression()
clf.fit( RM, yE)
mlr_show3( clf, RM, yE, disp = disp, graph = graph)
def mlr3_coef( RM, yE, disp = True, graph = True):
clf = linear_model.LinearRegression()
clf.fit( RM, yE)
mlr_show3( clf, RM, yE, disp = disp, graph = graph)
return clf.coef_, clf.intercept_
def mlr4_coef( RM, yE, disp = True, graph = True):
clf = linear_model.LinearRegression()
clf.fit( RM, yE)
mlr_show4( clf, RM, yE, disp = disp, graph = graph)
return clf.coef_, clf.intercept_
def mlr_ridge( RM, yE, alpha = 0.5, disp = True, graph = True):
clf = linear_model.Ridge( alpha = alpha)
clf.fit( RM, yE)
mlr_show( clf, RM, yE, disp = disp, graph = graph)
def mlr3_coef_ridge( RM, yE, alpha = 0.5, disp = True, graph = True):
"""
Return regression coefficients and intercept
"""
clf = linear_model.Ridge( alpha = alpha)
clf.fit( RM, yE)
mlr_show( clf, RM, yE, disp = disp, graph = graph)
return clf.coef_, clf.intercept_
def ann_pre( RM, yE, disp = True, graph = True):
"""
In ann case, pre and post processing are used
while in mlr case, all processing is completed by one function (mlr).
ann processing will be performed by shell command
"""
jchem.gen_input_files_valid( RM, yE, RM)
def ann_post( yv, disp = True, graph = True):
"""
After ann_pre and shell command, ann_post can be used.
"""
df_ann = pd.read_csv( 'ann_out.csv')
yv_ann = np.mat( df_ann['out'].tolist()).T
r_sqr, RMSE = ann_show( yv, yv_ann, disp = disp, graph = graph)
return r_sqr, RMSE
def ann_post_range( range_tr, range_val, yv, disp = True, graph = True):
"""
After ann_pre and shell command, ann_post can be used.
"""
df_ann = pd.read_csv( 'ann_out.csv')
yv_ann = np.mat( df_ann['out'].tolist()).T
print("Traning:")
ann_show( yv[range_tr, 0], yv_ann[range_tr, 0], disp = disp, graph = graph)
print("Validation:")
r_sqr, RMSE = ann_show( yv[range_val, 0] , yv_ann[range_val, 0], disp = disp, graph = graph)
return r_sqr, RMSE
def _ann_show_r0( yEv, yEv_calc, disp = True, graph = True):
r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
if graph:
plt.scatter( yEv.tolist(), yEv_calc.tolist())
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Target')
plt.ylabel('Prediction')
plt.show()
return r_sqr, RMSE
def _regress_show_r0( yEv, yEv_calc, disp = True, graph = True, plt_title = None):
# if the output is a vector and the original is a metrix,
# the output is translated to a matrix.
if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
if graph:
plt.scatter( yEv.tolist(), yEv_calc.tolist())
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Target')
plt.ylabel('Prediction')
if plt_title:
plt.title( plt_title)
plt.show()
return r_sqr, RMSE
def regress_show( yEv, yEv_calc, disp = True, graph = True, plt_title = None):
# if the output is a vector and the original is a metrix,
# the output is translated to a matrix.
if len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
if len( np.shape(yEv)) == 1:
yEv = np.mat( yEv).T
r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
if graph:
#plt.scatter( yEv.tolist(), yEv_calc.tolist())
plt.figure()
ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
if plt_title:
plt.title( plt_title)
else:
plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}'.format( r_sqr, RMSE))
plt.show()
return r_sqr, RMSE
def regress_show3( yEv, yEv_calc, disp = True, graph = True, plt_title = None):
# if the output is a vector and the original is a metrix,
# the output is translated to a matrix.
r_sqr, RMSE, MAE = jchem.estimate_score3( yEv, yEv_calc, disp = disp)
if graph:
#plt.scatter( yEv.tolist(), yEv_calc.tolist())
plt.figure()
ms_sz = max(min( 6000 / yEv.shape[0], 8), 3)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
if plt_title:
plt.title( plt_title)
else:
plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}, MAE = {2:.2e}'.format( r_sqr, RMSE, MAE))
plt.show()
return r_sqr, RMSE, MAE
def _regress_show4_r0( yEv, yEv_calc, disp = True, graph = True, plt_title = None, ms_sz = None):
# if the output is a vector and the original is a metrix,
# the output is translated to a matrix.
r_sqr, RMSE, MAE, DAE = estimate_accuracy4( yEv, yEv_calc, disp = disp)
if graph:
#plt.scatter( yEv.tolist(), yEv_calc.tolist())
plt.figure()
if ms_sz is None:
ms_sz = max(min( 6000 / yEv.shape[0], 8), 3)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
if plt_title is None:
plt.title( '$r^2$={0:.1e}, RMSE={1:.1e}, MAE={2:.1e}, MedAE={3:.1e}'.format( r_sqr, RMSE, MAE, DAE))
elif plt_title != "":
plt.title( plt_title)
# plt.show()
return r_sqr, RMSE, MAE, DAE
def regress_show4( yEv, yEv_calc, disp = True, graph = True, plt_title = None, ms_sz = None):
# if the output is a vector and the original is a metrix,
# the output is translated to a matrix.
r_sqr, RMSE, MAE, DAE = estimate_accuracy4( yEv, yEv_calc, disp = disp)
if graph:
#plt.scatter( yEv.tolist(), yEv_calc.tolist())
plt.figure()
if ms_sz is None:
ms_sz = max(min( 6000 / yEv.shape[0], 8), 3)
# plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms
plt.scatter( yEv.tolist(), yEv_calc.tolist(), s = ms_sz)
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
if plt_title is None:
plt.title( '$r^2$={0:.1e}, RMSE={1:.1e}, MAE={2:.1e}, MedAE={3:.1e}'.format( r_sqr, RMSE, MAE, DAE))
elif plt_title != "":
plt.title( plt_title)
# plt.show()
return r_sqr, RMSE, MAE, DAE
def cv_show( yEv, yEv_calc, disp = True, graph = True, grid_std = None):
# if the output is a vector and the original is a metrix,
# the output is translated to a matrix.
if len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
if len( np.shape(yEv)) == 1:
yEv = np.mat( yEv).T
r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
if graph:
#plt.scatter( yEv.tolist(), yEv_calc.tolist())
plt.figure()
ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz) # Change ms
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
if grid_std:
plt.title( '($r^2$, std) = ({0:.2e}, {1:.2e}), RMSE = {2:.2e}'.format( r_sqr, grid_std, RMSE))
else:
plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}'.format( r_sqr, RMSE))
plt.show()
return r_sqr, RMSE
ann_show = regress_show
def mlr_show( clf, RMv, yEv, disp = True, graph = True):
yEv_calc = clf.predict( RMv)
if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
r_sqr, RMSE = jchem.estimate_accuracy( yEv, yEv_calc, disp = disp)
if graph:
plt.figure()
ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
plt.title( '$r^2$ = {0:.2e}, RMSE = {1:.2e}'.format( r_sqr, RMSE))
plt.show()
return r_sqr, RMSE
def estimate_accuracy4(yEv, yEv_calc, disp = False):
"""
It was originally located in jchem. However now it is allocated here
since the functionality is more inline with jutil than jchem.
"""
r_sqr = metrics.r2_score( yEv, yEv_calc)
RMSE = np.sqrt( metrics.mean_squared_error( yEv, yEv_calc))
MAE = metrics.mean_absolute_error( yEv, yEv_calc)
DAE = metrics.median_absolute_error( yEv, yEv_calc)
if disp:
print("r^2={0:.2e}, RMSE={1:.2e}, MAE={2:.2e}, DAE={3:.2e}".format( r_sqr, RMSE, MAE, DAE))
return r_sqr, RMSE, MAE, DAE
def mlr_show3( clf, RMv, yEv, disp = True, graph = True):
yEv_calc = clf.predict( RMv)
if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
r_sqr, RMSE, aae = jchem.estimate_accuracy3( yEv, yEv_calc, disp = disp)
if graph:
plt.figure()
ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
plt.title( '$r^2$={0:.2e}, RMSE={1:.2e}, AAE={2:.2e}'.format( r_sqr, RMSE, aae))
plt.show()
return r_sqr, RMSE, aae
def mlr_show4( clf, RMv, yEv, disp = True, graph = True):
yEv_calc = clf.predict( RMv)
if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
r_sqr, RMSE, MAE, DAE = estimate_accuracy4( yEv, yEv_calc, disp = disp)
if graph:
plt.figure()
ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
#plt.title( '$r^2$={0:.2e}, RMSE={1:.2e}, AAE={2:.2e}'.format( r_sqr, RMSE, aae))
plt.title( '$r^2$={0:.1e},$\sigma$={1:.1e},MAE={2:.1e},DAE={3:.1e}'.format( r_sqr, RMSE, MAE, DAE))
plt.show()
return r_sqr, RMSE, MAE, DAE
def _mlr_val_r0( RM, yE, disp = True, graph = True):
clf = linear_model.LinearRegression()
clf.fit( RM[::2,:], yE[::2,0])
print('Training result')
mlr_show( clf, RM[::2, :], yE[::2, 0], disp = disp, graph = graph)
print('Validation result')
mlr_show( clf, RM[1::2, :], yE[1::2, 0], disp = disp, graph = graph)
def mlr_val( RM, yE, disp = True, graph = True, rate = 2, more_train = True, center = None):
"""
Validation is peformed as much as the given ratio.
"""
RMt, yEt, RMv, yEv = jchem.get_valid_mode_data( RM, yE, rate = rate, more_train = more_train, center = center)
clf = linear_model.LinearRegression()
clf.fit( RMt, yEt)
print('Training result')
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
print('Validation result')
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
return r_sqr, RMSE
def svr_val( RM, yE, C = 1.0, epsilon = 0.1, disp = True, graph = True, rate = 2, more_train = True, center = None):
"""
Validation is peformed as much as the given ratio.
"""
RMt, yEt, RMv, yEv = jchem.get_valid_mode_data( RM, yE, rate = rate, more_train = more_train, center = center)
clf = svm.SVR( C = C, epsilon = epsilon)
clf.fit( RMt, yEt.A1)
print('Training result')
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
print('Validation result')
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
return r_sqr, RMSE
def mlr_val_ridge( RM, yE, rate = 2, more_train = True, center = None, alpha = 0.5, disp = True, graph = True):
"""
Validation is peformed as much as the given ratio.
"""
RMt, yEt, RMv, yEv = jchem.get_valid_mode_data( RM, yE, rate = rate, more_train = more_train, center = center)
print("Ridge: alpha = {}".format( alpha))
clf = linear_model.Ridge( alpha = alpha)
clf.fit( RMt, yEt)
print('Weight value')
#print clf.coef_.flatten()
plt.plot( clf.coef_.flatten())
plt.grid()
plt.xlabel('Tap')
plt.ylabel('Weight')
plt.title('Linear Regression Weights')
plt.show()
print('Training result')
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
print('Validation result')
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
return r_sqr, RMSE
def mlr_val_avg_2( RM, yE, disp = False, graph = False):
"""
Validation is peformed as much as the given ratio.
"""
r_sqr_list, RMSE_list = [], []
vseq_list = []
org_seq = list(range( len( yE)))
for v_seq in itertools.combinations( org_seq, 2):
t_seq = [x for x in org_seq if x not in v_seq]
RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0]
RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0]
#RMt, yEt, RMv, yEv = jchem.get_valid_mode_data( RM, yE, rate = rate, more_train = more_train, center = center)
clf = linear_model.LinearRegression()
clf.fit( RMt, yEt)
#print 'Training result'
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
#print 'Validation result'
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
"""
#This is blocked since vseq_list is returned.
if r_sqr < 0:
print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr
"""
r_sqr_list.append( r_sqr)
RMSE_list.append( RMSE)
vseq_list.append( v_seq)
print("average r_sqr = {0}, average RMSE = {1}".format( np.average( r_sqr_list), np.average( RMSE_list)))
return r_sqr_list, RMSE_list, v_seq
def gen_rand_seq( ln, rate):
vseq = choose( ln, int( ln / rate))
org_seq = list(range( ln))
tseq = [x for x in org_seq if x not in vseq]
return tseq, vseq
def mlr_val_vseq( RM, yE, v_seq, disp = True, graph = True):
"""
Validation is performed using vseq indexed values.
"""
org_seq = list(range( len( yE)))
t_seq = [x for x in org_seq if x not in v_seq]
RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0]
RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0]
clf = linear_model.LinearRegression()
clf.fit( RMt, yEt)
print('Weight value')
#print clf.coef_.flatten()
plt.plot( clf.coef_.flatten())
plt.grid()
plt.xlabel('Tap')
plt.ylabel('Weight')
plt.title('Linear Regression Weights')
plt.show()
if disp: print('Training result')
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
if disp: print('Validation result')
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
#if r_sqr < 0:
# print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr
return r_sqr, RMSE
def mlr_val_vseq_rand(RM, yE, disp = True, graph = True, rate = 5):
"""
Validation is peformed using vseq indexed values.
vseq is randmly selected with respect to rate.
"""
vseq = choose( len( yE), int(len( yE) / rate));
r_sqr, RMSE = mlr_val_vseq( RM, yE, vseq, disp = disp, graph = graph)
return r_sqr, RMSE
def mlr_val_vseq_ridge_rand( RM, yE, alpha = .5, rate = 2, disp = True, graph = True):
vseq = choose( len( yE), int(len( yE) / rate));
r_sqr, RMSE = mlr_val_vseq_ridge( RM, yE, vseq, alpha = alpha, disp = disp, graph = graph)
return r_sqr, RMSE
def mlr_val_vseq_lasso_rand( RM, yE, alpha = .5, rate = 2, disp = True, graph = True):
vseq = choose( len( yE), int(len( yE) / rate));
r_sqr, RMSE = mlr_val_vseq_lasso( RM, yE, vseq, alpha = alpha, disp = disp, graph = graph)
return r_sqr, RMSE
def mlr_val_vseq_MMSE_rand( RM, yE, alpha = .5, rate = 2, disp = True, graph = True):
vseq = choose( len( yE), int(len( yE) / rate));
r_sqr, RMSE = mlr_val_vseq_MMSE( RM, yE, vseq, alpha = alpha, disp = disp, graph = graph)
return r_sqr, RMSE
def mlr_val_vseq_ridge_rand_profile( RM, yE, alpha = .5, rate = 2, iterN = 10, disp = True, graph = False, hist = True):
r2_rms_list = []
for ii in range( iterN):
vseq = choose( len( yE), int(len( yE) / rate));
r_sqr, RMSE = mlr_val_vseq_ridge( RM, yE, vseq, alpha = alpha, disp = disp, graph = graph)
r2_rms_list.append( (r_sqr, RMSE))
r2_list, rms_list = list(zip( *r2_rms_list))
#Showing r2 as histogram
pd_r2 = pd.DataFrame( {'r_sqr': r2_list})
pd_r2.plot( kind = 'hist', alpha = 0.5)
#Showing rms as histogram
pd_rms = pd.DataFrame( {'rms': rms_list})
pd_rms.plot( kind = 'hist', alpha = 0.5)
print("r2: mean = {0}, std = {1}".format( np.mean( r2_list), np.std( r2_list)))
print("RMSE: mean = {0}, std = {1}".format( np.mean( rms_list), np.std( rms_list)))
return r2_list, rms_list
def mlr_val_vseq_lasso_rand_profile( RM, yE, alpha = .001, rate = 2, iterN = 10, disp = True, graph = False, hist = True):
r2_rms_list = []
for ii in range( iterN):
vseq = choose( len( yE), int(len( yE) / rate));
r_sqr, RMSE = mlr_val_vseq_lasso( RM, yE, vseq, alpha = alpha, disp = disp, graph = graph)
r2_rms_list.append( (r_sqr, RMSE))
r2_list, rms_list = list(zip( *r2_rms_list))
#Showing r2 as histogram
pd_r2 = pd.DataFrame( {'r_sqr': r2_list})
pd_r2.plot( kind = 'hist', alpha = 0.5)
#Showing rms as histogram
pd_rms = pd.DataFrame( {'rms': rms_list})
pd_rms.plot( kind = 'hist', alpha = 0.5)
print("r2: mean = {0}, std = {1}".format( np.mean( r2_list), np.std( r2_list)))
print("RMSE: mean = {0}, std = {1}".format( np.mean( rms_list), np.std( rms_list)))
return r2_list, rms_list
def mlr_val_vseq_ridge( RM, yE, v_seq, alpha = .5, disp = True, graph = True):
"""
Validation is peformed using vseq indexed values.
"""
org_seq = list(range( len( yE)))
t_seq = [x for x in org_seq if x not in v_seq]
RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0]
RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0]
clf = linear_model.Ridge( alpha = alpha)
clf.fit( RMt, yEt)
if disp: print('Training result')
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
if disp: print('Validation result')
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
#if r_sqr < 0:
# print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr
return r_sqr, RMSE
def mlr_val_vseq_lasso( RM, yE, v_seq, alpha = .5, disp = True, graph = True):
"""
Validation is peformed using vseq indexed values.
"""
org_seq = list(range( len( yE)))
t_seq = [x for x in org_seq if x not in v_seq]
RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0]
RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0]
clf = linear_model.Lasso( alpha = alpha)
clf.fit( RMt, yEt)
if disp: print('Training result')
mlr_show( clf, RMt, yEt, disp = disp, graph = graph)
if disp: print('Validation result')
r_sqr, RMSE = mlr_show( clf, RMv, yEv, disp = disp, graph = graph)
#if r_sqr < 0:
# print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr
return r_sqr, RMSE
def mlr_val_vseq_MMSE( RM, yE, v_seq, alpha = .5, disp = True, graph = True):
"""
Validation is peformed using vseq indexed values.
"""
org_seq = list(range( len( yE)))
t_seq = [x for x in org_seq if x not in v_seq]
RMt, yEt = RM[ t_seq, :], yE[ t_seq, 0]
RMv, yEv = RM[ v_seq, :], yE[ v_seq, 0]
w, RMt_1 = mmse_with_bias( RMt, yEt)
yEt_c = RMt_1*w
print('Weight values')
#print clf.coef_.flatten()
plt.plot( w.A1)
plt.grid()
plt.xlabel('Tap')
plt.ylabel('Weight')
plt.title('Linear Regression Weights')
plt.show()
RMv_1 = add_bias_xM( RMv)
yEv_c = RMv_1*w
if disp: print('Training result')
regress_show( yEt, yEt_c, disp = disp, graph = graph)
if disp: print('Validation result')
r_sqr, RMSE = regress_show( yEv, yEv_c, disp = disp, graph = graph)
#if r_sqr < 0:
# print 'v_seq:', v_seq, '--> r_sqr = ', r_sqr
return r_sqr, RMSE
def _ann_val_pre_r0( RM, yE, disp = True, graph = True):
"""
In ann case, pre and post processing are used
while in mlr case, all processing is completed by one function (mlr).
ann processing will be performed by shell command
"""
jchem.gen_input_files_valid( RM[::2,:], yE[::2,0], RM)
def ann_val_pre( RM, yE, rate = 2, more_train = True, center = None):
"""
In ann case, pre and post processing are used
while in mlr case, all processing is completed by one function (mlr).
ann processing will be performed by shell command
Now, any percentage of validation will be possible.
Later, random selection will be included, while currently
deterministic selection is applied.
"""
RMt, yEt, RMv, yEv = jchem.get_valid_mode_data( RM, yE, rate = rate, more_train = more_train, center = center)
jchem.gen_input_files_valid( RMt, yEt, RM)
def _ann_val_post_r0( yE, disp = True, graph = True):
"""
After ann_pre and shell command, ann_post can be used.
"""
df_ann = pd.read_csv( 'ann_out.csv')
yv_ann = np.mat( df_ann['out'].tolist()).T
print('Trainig result')
ann_show( yE[::2,0], yv_ann[::2,0], disp = disp, graph = graph)
print('Validation result')
r_sqr, RMSE = ann_show( yE[1::2,0], yv_ann[1::2,0], disp = disp, graph = graph)
return r_sqr, RMSE
def ann_val_post( yE, disp = True, graph = True, rate = 2, more_train = True, center = None):
"""
After ann_pre and shell command, ann_post can be used.
"""
df_ann = pd.read_csv( 'ann_out.csv')
yE_c = np.mat( df_ann['out'].tolist()).T
yEt, yEt_c, yEv, yEv_c = jchem.get_valid_mode_data( yE, yE_c, rate = rate, more_train = more_train, center = center)
print('Trainig result')
ann_show( yEt, yEt_c, disp = disp, graph = graph)
print('Validation result')
r_sqr, RMSE = ann_show( yEv, yEv_c, disp = disp, graph = graph)
return r_sqr, RMSE
def writeparam_txt( fname = 'param.txt', dic = {"num_neurons_hidden": 4, "desired_error": 0.00001}):
"save param.txt with dictionary"
with open(fname, 'w') as f:
print("Saving", fname)
for di in dic:
f.write("{} {}\n".format( di, dic[di]))
def choose(N, n):
"""
Returns n randomly chosen values between 0 to N-1.
"""
x = list(range( N))
n_list = []
for ii in range( n):
xi = random.choice( x)
n_list.append( xi)
x.remove( xi)
return n_list
def pd_remove_duplist_ID( pdr, dup_l):
pdw = pdr.copy()
for d in dup_l:
for x in d[1:]:
print(x, pdw.ID[ x], pdw.Smile[ x])
pdw = pdw[ pdw.ID != pdr.ID[ x]]
return pdw
def pd_remove_faillist_ID( pdr, fail_l):
pdw = pdr.copy()
for x in fail_l:
pdw = pdw[ pdw.ID != pdr.ID[ x]]
return pdw
def mmse( xM_1, yV):
Rxx = xM_1.T * xM_1
Rxy = xM_1.T * yV
w = np.linalg.pinv( Rxx) * Rxy
return w
def add_bias_xM( xM):
xMT_list = xM.T.tolist()
xMT_list.append( np.ones( xM.shape[0], dtype = int).tolist())
xM_1 = np.mat( xMT_list).T
return xM_1
def mmse_with_bias( xM, yV):
xM_1 = add_bias_xM( xM)
w_1 = mmse( xM_1, yV)
return w_1, xM_1
def svm_SVR_C( xM, yV, c_l, graph = True):
"""
SVR is performed iteratively with different C values
until all C in the list are used.
"""
r2_l, sd_l = [], []
for C in c_l:
print('sklearn.svm.SVR(C={})'.format( C))
clf = svm.SVR( C = C)
clf.fit( xM, yV.A1)
yV_pred = clf.predict(xM)
r2, sd = regress_show( yV, np.mat( yV_pred).T, graph = graph)
for X, x in [[r2_l, r2], [sd_l, sd]]:
X.append( x)
print('average r2, sd are', np.mean( r2_l), np.mean( sd_l))
if graph:
pdw = pd.DataFrame( { 'log10(C)': np.log10(c_l), 'r2': r2_l, 'sd': sd_l})
pdw.plot( x = 'log10(C)')
return r2_l, sd_l
def corr_xy( x_vec, y_vec):
print(type( x_vec), type( y_vec))
if type( x_vec) != np.matrixlib.defmatrix.matrix:
molw_x = np.mat( x_vec).T
else:
molw_x = x_vec
if type( y_vec) != np.matrixlib.defmatrix.matrix:
yV = np.mat( y_vec).T
else:
yV = y_vec
print(molw_x.shape, yV.shape)
normal_molw_x = molw_x / np.linalg.norm( molw_x)
yV0 = yV - np.mean( yV)
normal_yV0 = yV0 / np.linalg.norm( yV0)
return normal_molw_x.T * normal_yV0
def gs_Lasso( xM, yV, alphas_log = (-1, 1, 9)):
print(xM.shape, yV.shape)
clf = linear_model.Lasso()
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
gs = model_selection.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5, n_jobs = 1)
gs.fit( xM, yV)
return gs
def gs_Lasso_norm( xM, yV, alphas_log = (-1, 1, 9)):
print(xM.shape, yV.shape)
clf = linear_model.Lasso( normalize = True)
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
gs = model_selection.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV)
return gs
def gs_Lasso_kf( xM, yV, alphas_log_l):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Lasso Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log_l[0])
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_Lasso( xM_in_nz, yV_in, alphas_log_l[1])
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = gs2.score( xM_out_nz, yV_out)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def gs_Lasso_kf_ext( xM, yV, alphas_log_l):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Lasso Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log_l[0])
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_Lasso( xM_in_nz, yV_in, alphas_log_l[1])
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
alpha = gs2.best_params_['alpha']
clf = linear_model.Lasso( alpha = alpha)
clf.fit( xM_in_nz, yV_in)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def gs_Ridge( xM, yV, alphas_log = (1, -1, 9)):
print(xM.shape, yV.shape)
clf = linear_model.Ridge()
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
gs = model_selection.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf5, n_jobs = 1)
gs.fit( xM, yV)
return gs
def gs_Ridge( xM, yV, alphas_log = (1, -1, 9), n_folds = 5):
print(xM.shape, yV.shape)
clf = linear_model.Ridge()
#parmas = {'alpha': np.logspace(1, -1, 9)}
parmas = {'alpha': np.logspace( *alphas_log)}
kf_n = model_selection.KFold( xM.shape[0], n_folds=n_folds, shuffle=True)
gs = model_selection.GridSearchCV( clf, parmas, scoring = 'r2', cv = kf_n, n_jobs = 1)
gs.fit( xM, yV)
return gs
def _cv_LinearRegression_r0( xM, yV):
print(xM.shape, yV.shape)
clf = linear_model.Ridge()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
cv_scores = model_selection.cross_val_score( clf, xM, yV, scoring = 'r2', cv = kf5, n_jobs = -1)
return cv_scores
def _cv_LinearRegression_r1( xM, yV):
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
cv_scores = model_selection.cross_val_score( clf, xM, yV, scoring = 'r2', cv = kf5, n_jobs = -1)
print('R^2 mean, std -->', np.mean( cv_scores), np.std( cv_scores))
return cv_scores
def cv_LinearRegression( xM, yV, n_jobs = -1):
print(xM.shape, yV.shape)
clf = linear_model.LinearRegression()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
cv_scores = model_selection.cross_val_score( clf, xM, yV, scoring = 'r2', cv = kf5, n_jobs = n_jobs)
print('R^2 mean, std -->', np.mean( cv_scores), np.std( cv_scores))
return cv_scores
def cv_LinearRegression_A( xM, yV, s_l):
lr = linear_model.LinearRegression()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = A
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
print(A_molw_train.shape, yV[ train, 0].shape)
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_LinearRegression_Asupervising( xM, yV, s_l):
lr = linear_model.LinearRegression()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
#print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = A
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
print(A_molw_train.shape, yV[ train, 0].shape)
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_LinearRegression_Asupervising_molw( xM, yV, s_l):
lr = linear_model.LinearRegression()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = jchem.add_new_descriptor( A, molw_l)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_Ridge_Asupervising_molw( xM, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = jchem.add_new_descriptor( A, molw_l)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def cv_Ridge_Asupervising_2fp( xM1, xM2, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5 = model_selection.KFold( len(s_l), n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM1_shuffle = np.concatenate( (xM1[ train, :], xM1[ test, :]), axis = 0)
xM2_shuffle = np.concatenate( (xM2[ train, :], xM2[ test, :]), axis = 0)
# print xM_shuffle.shape
A1_redundant = j3x.jpyx.calc_tm_sim_M( xM1_shuffle)
A1 = A1_redundant[ :, :len(train)]
A2_redundant = j3x.jpyx.calc_tm_sim_M( xM2_shuffle)
A2 = A2_redundant[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
molwV = np.mat( molw_l).T
#A_molw = jchem.add_new_descriptor( A, molw_l)
print(A1.shape, A2.shape, molwV.shape)
# A_molw = np.concatenate( (A1, A2, molwV), axis = 1)
A_molw = np.concatenate( (A1, A2), axis = 1)
print(A_molw.shape)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def gs_Ridge_Asupervising_2fp( xM1, xM2, yV, s_l, alpha_l):
"""
This 2fp case uses two fingerprints at the same in order to
combines their preprocessing versions separately.
"""
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising_2fp( xM1, xM2, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def cv_Ridge_Asupervising_2fp_molw( xM1, xM2, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5 = model_selection.KFold( len(s_l), n_folds=5, shuffle=True)
r2_l = list()
for train, test in kf5:
xM1_shuffle = np.concatenate( (xM1[ train, :], xM1[ test, :]), axis = 0)
xM2_shuffle = np.concatenate( (xM2[ train, :], xM2[ test, :]), axis = 0)
# print xM_shuffle.shape
A1_redundant = j3x.jpyx.calc_tm_sim_M( xM1_shuffle)
A1 = A1_redundant[ :, :len(train)]
A2_redundant = j3x.jpyx.calc_tm_sim_M( xM2_shuffle)
A2 = A2_redundant[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
molwV = np.mat( molw_l).T
#A_molw = jchem.add_new_descriptor( A, molw_l)
print(A1.shape, A2.shape, molwV.shape)
A_molw = np.concatenate( (A1, A2, molwV), axis = 1)
print(A_molw.shape)
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def gs_Ridge_Asupervising_2fp_molw( xM1, xM2, yV, s_l, alpha_l):
"""
This 2fp case uses two fingerprints at the same in order to
combines their preprocessing versions separately.
"""
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising_2fp_molw( xM1, xM2, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def gs_Ridge_Asupervising_molw( xM, yV, s_l, alpha_l):
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising_molw( xM, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def gs_Ridge_Asupervising( xM, yV, s_l, alpha_l):
r2_l2 = list()
for alpha in alpha_l:
print(alpha)
r2_l = cv_Ridge_Asupervising( xM, yV, s_l, alpha)
r2_l2.append( r2_l)
return r2_l2
def cv_Ridge_Asupervising( xM, yV, s_l, alpha):
lr = linear_model.Ridge( alpha = alpha)
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
r2_l = list()
for train, test in kf5:
xM_shuffle = np.concatenate( (xM[ train, :], xM[ test, :]), axis = 0)
# print xM_shuffle.shape
A_all = j3x.jpyx.calc_tm_sim_M( xM_shuffle)
A = A_all[ :, :len(train)]
#print 'A.shape', A.shape
s_l_shuffle = [s_l[x] for x in train] #train
s_l_shuffle.extend( [s_l[x] for x in test] ) #test
molw_l = jchem.rdkit_molwt( s_l_shuffle)
A_molw = A
A_molw_train = A_molw[:len(train), :]
A_molw_test = A_molw[len(train):, :]
#print A_molw_train.shape, yV[ train, 0].shape
lr.fit( A_molw_train, yV[ train, 0])
#print A_molw_test.shape, yV[ test, 0].shape
r2_l.append( lr.score( A_molw_test, yV[ test, 0]))
print('R^2 mean, std -->', np.mean( r2_l), np.std( r2_l))
return r2_l
def gs_RidgeByLasso_kf_ext( xM, yV, alphas_log_l):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log_l[0])
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_Ridge( xM_in_nz, yV_in, alphas_log_l[1])
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
alpha = gs2.best_params_['alpha']
clf = linear_model.Ridge( alpha = alpha)
clf.fit( xM_in_nz, yV_in)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def gs_SVR( xM, yV, svr_params):
print(xM.shape, yV.shape)
clf = svm.SVR()
#parmas = {'alpha': np.logspace(1, -1, 9)}
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
gs = model_selection.GridSearchCV( clf, svr_params, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV.A1)
return gs
def gs_SVRByLasso_kf_ext( xM, yV, alphas_log, svr_params):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_SVR( xM_in_nz, yV_in, svr_params)
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
C = gs2.best_params_['C']
gamma = gs2.best_params_['gamma']
epsilon = gs2.best_params_['epsilon']
clf = svm.SVR( C = C, gamma = gamma, epsilon = epsilon)
clf.fit( xM_in_nz, yV_in.A1)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out.A1)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
return score_l
def gs_SVRByLasso( xM, yV, alphas_log, svr_params):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score1_l = []
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
score1_l.append( gs1.best_score_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_SVR( xM_in_nz, yV_in, svr_params)
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
C = gs2.best_params_['C']
gamma = gs2.best_params_['gamma']
epsilon = gs2.best_params_['epsilon']
clf = svm.SVR( C = C, gamma = gamma, epsilon = epsilon)
clf.fit( xM_in_nz, yV_in.A1)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out.A1)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
print('First stage scores', score1_l)
print('Average first stage scores', np.mean( score1_l))
return score_l, score1_l
def gs_ElasticNet( xM, yV, en_params):
print(xM.shape, yV.shape)
clf = linear_model.ElasticNet()
kf5_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5 = kf5_c.split( xM)
gs = model_selection.GridSearchCV( clf, en_params, scoring = 'r2', cv = kf5, n_jobs = -1)
gs.fit( xM, yV)
return gs
def gs_SVRByElasticNet( xM, yV, en_params, svr_params):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score1_l = []
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_ElasticNet( xM_in, yV_in, en_params)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
score1_l.append( gs1.best_score_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second Lasso Stage')
gs2 = gs_SVR( xM_in_nz, yV_in, svr_params)
print('Best score:', gs2.best_score_)
print('Best param:', gs2.best_params_)
print(gs2.grid_scores_)
print('External Validation Stage')
# Obtain prediction model by whole data including internal validation data
C = gs2.best_params_['C']
gamma = gs2.best_params_['gamma']
epsilon = gs2.best_params_['epsilon']
clf = svm.SVR( C = C, gamma = gamma, epsilon = epsilon)
clf.fit( xM_in_nz, yV_in.A1)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
score = clf.score( xM_out_nz, yV_out.A1)
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
print('First stage scores', score1_l)
print('Average first stage scores', np.mean( score1_l))
return score_l, score1_l
def gs_GPByLasso( xM, yV, alphas_log):
kf5_ext_c = model_selection.KFold( n_folds=5, shuffle=True)
kf5_ext = kf5_ext_c.split( xM)
score1_l = []
score_l = []
for ix, (tr, te) in enumerate( kf5_ext):
print('{}th fold external validation stage ============================'.format( ix + 1))
xM_in = xM[ tr, :]
yV_in = yV[ tr, 0]
print('First Ridge Stage')
gs1 = gs_Lasso( xM_in, yV_in, alphas_log)
print('Best score:', gs1.best_score_)
print('Best param:', gs1.best_params_)
print(gs1.grid_scores_)
score1_l.append( gs1.best_score_)
nz_idx = gs1.best_estimator_.sparse_coef_.indices
xM_in_nz = xM_in[ :, nz_idx]
print('Second GP Stage')
Xa_in_nz = np.array( xM_in_nz)
ya_in = np.array( yV_in)
xM_out = xM[ te, :]
yV_out = yV[ te, 0]
xM_out_nz = xM_out[:, nz_idx]
Xa_out_nz = np.array( xM_out_nz)
ya_out = np.array( yV_out)
#jgp = gp.GaussianProcess( Xa_in_nz, ya_in, Xa_out_nz, ya_out)
# the y array should be send as [:,0] form to be sent as vector array
jgp = gp.GaussianProcess( Xa_in_nz, ya_in[:,0], Xa_out_nz, ya_out[:,0])
jgp.optimize_noise_and_amp()
jgp.run_gp()
#ya_out_pred = np.mat(jgp.predicted_targets)
ya_out_pred = jgp.predicted_targets
#print ya_out[:,0].shape, jgp.predicted_targets.shape
r2, rmse = regress_show( ya_out[:,0], ya_out_pred)
score = r2
print(score)
score_l.append( score)
print('')
print('all scores:', score_l)
print('average scores:', np.mean( score_l))
print('First stage scores', score1_l)
print('Average first stage scores', np.mean( score1_l))
return score_l, score1_l
def show_gs_alpha( grid_scores):
alphas = np.array([ x[0]['alpha'] for x in grid_scores])
r2_mean = np.array([ x[1] for x in grid_scores])
r2_std = np.array([ np.std(x[2]) for x in grid_scores])
r2_mean_pos = r2_mean + r2_std
r2_mean_neg = r2_mean - r2_std
plt.semilogx( alphas, r2_mean, 'x-', label = 'E[$r^2$]')
plt.semilogx( alphas, r2_mean_pos, ':k', label = 'E[$r^2$]+$\sigma$')
plt.semilogx( alphas, r2_mean_neg, ':k', label = 'E[$r^2$]-$\sigma$')
plt.grid()
plt.legend( loc = 2)
plt.show()
best_idx = np.argmax( r2_mean)
best_r2_mean = r2_mean[ best_idx]
best_r2_std = r2_std[ best_idx]
best_alpha = alphas[ best_idx]
print("Best: r2(alpha = {0}) -> mean:{1}, std:{2}".format( best_alpha, best_r2_mean, best_r2_std))
def count( a_l, a, inverse = False):
"""
It returns the number of elements which are equal to
the target value.
In order to resolve when x is an array with more than
one dimensions, converstion from array to list is used.
"""
if inverse == False:
x = np.where( np.array( a_l) == a)
else:
x = np.where( np.array( a_l) != a)
return len(x[0].tolist())
def show_cdf( data, xlabel_str = None, label_str = ''):
"""
Show cdf graph of data which should be list or array in 1-D from.
xlabel_str is the name of x-axis.
show() is not included for aggregated plot controlling later.
"""
data_sorted = np.sort( data)
# calculate the proportional values of samples
p = 1. * np.arange(len(data)) / (len(data) - 1)
plt.plot( data_sorted, p, label = label_str)
if xlabel_str:
plt.xlabel( xlabel_str)
plt.ylabel( 'Cumulative Fraction')
def mlr_show4_pred( clf, RMv, yEv, disp = True, graph = True):
yEv_calc = clf.predict( RMv)
if len( np.shape(yEv)) == 2 and len( np.shape(yEv_calc)) == 1:
yEv_calc = np.mat( yEv_calc).T
r_sqr, RMSE, MAE, DAE = estimate_accuracy4( yEv, yEv_calc, disp = disp)
if graph:
plt.figure()
ms_sz = max(min( 4000 / yEv.shape[0], 8), 1)
plt.plot( yEv.tolist(), yEv_calc.tolist(), '.', ms = ms_sz)
ax = plt.gca()
lims = [
np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# now plot both limits against eachother
#ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)
ax.plot(lims, lims, '-', color = 'pink')
plt.xlabel('Experiment')
plt.ylabel('Prediction')
#plt.title( '$r^2$={0:.2e}, RMSE={1:.2e}, AAE={2:.2e}'.format( r_sqr, RMSE, aae))
plt.title( '$r^2$={0:.1e},$\sigma$={1:.1e},MAE={2:.1e},DAE={3:.1e}'.format( r_sqr, RMSE, MAE, DAE))
plt.show()
return (r_sqr, RMSE, MAE, DAE), yEv_calc
def mlr4_coef_pred( RM, yE, disp = True, graph = True):
"""
Return: coef_, intercept_, yEp
"""
clf = linear_model.LinearRegression()
clf.fit( RM, yE)
_, yEp = mlr_show4_pred( clf, RM, yE, disp = disp, graph = graph)
return clf.coef_, clf.intercept_, yEp | mit |
airbnb/caravel | superset/data/birth_names.py | 1 | 18557 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import gzip
import json
import os
import textwrap
import pandas as pd
from sqlalchemy import DateTime, String
from superset import db, security_manager
from superset.connectors.sqla.models import SqlMetric, TableColumn
from superset.utils.core import get_or_create_main_db
from .helpers import (
config,
Dash,
DATA_FOLDER,
get_slice_json,
merge_slice,
Slice,
TBL,
update_slice_ids,
)
def load_birth_names():
"""Loading birth name dataset from a zip file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'birth_names.json.gz')) as f:
pdf = pd.read_json(f)
pdf.ds = pd.to_datetime(pdf.ds, unit='ms')
pdf.to_sql(
'birth_names',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
'gender': String(16),
'state': String(10),
'name': String(255),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [birth_names] reference')
obj = db.session.query(TBL).filter_by(table_name='birth_names').first()
if not obj:
obj = TBL(table_name='birth_names')
obj.main_dttm_col = 'ds'
obj.database = get_or_create_main_db()
obj.filter_select_enabled = True
if not any(col.column_name == 'num_california' for col in obj.columns):
obj.columns.append(TableColumn(
column_name='num_california',
expression="CASE WHEN state = 'CA' THEN num ELSE 0 END",
))
if not any(col.metric_name == 'sum__num' for col in obj.metrics):
obj.metrics.append(SqlMetric(
metric_name='sum__num',
expression='SUM(num)',
))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
defaults = {
'compare_lag': '10',
'compare_suffix': 'o10Y',
'limit': '25',
'granularity_sqla': 'ds',
'groupby': [],
'metric': 'sum__num',
'metrics': ['sum__num'],
'row_limit': config.get('ROW_LIMIT'),
'since': '100 years ago',
'until': 'now',
'viz_type': 'table',
'where': '',
'markup_type': 'markdown',
}
admin = security_manager.find_user('admin')
print('Creating some slices')
slices = [
Slice(
slice_name='Girls',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
row_limit=50,
timeseries_limit_metric='sum__num')),
Slice(
slice_name='Boys',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['boy'],
}],
row_limit=50)),
Slice(
slice_name='Participants',
viz_type='big_number',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='big_number', granularity_sqla='ds',
compare_lag='5', compare_suffix='over 5Y')),
Slice(
slice_name='Genders',
viz_type='pie',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pie', groupby=['gender'])),
Slice(
slice_name='Genders by State',
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
adhoc_filters=[
{
'clause': 'WHERE',
'expressionType': 'SIMPLE',
'filterOptionName': '2745eae5',
'comparator': ['other'],
'operator': 'not in',
'subject': 'state',
},
],
viz_type='dist_bar',
metrics=[
{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'sum_boys',
'type': 'BIGINT(20)',
},
'aggregate': 'SUM',
'label': 'Boys',
'optionName': 'metric_11',
},
{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'sum_girls',
'type': 'BIGINT(20)',
},
'aggregate': 'SUM',
'label': 'Girls',
'optionName': 'metric_12',
},
],
groupby=['state'])),
Slice(
slice_name='Trends',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line', groupby=['name'],
granularity_sqla='ds', rich_tooltip=True, show_legend=True)),
Slice(
slice_name='Average and Sum Trends',
viz_type='dual_line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='dual_line',
metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num',
'type': 'BIGINT(20)',
},
'aggregate': 'AVG',
'label': 'AVG(num)',
'optionName': 'metric_vgops097wej_g8uff99zhk7',
},
metric_2='sum__num',
granularity_sqla='ds')),
Slice(
slice_name='Title',
viz_type='markup',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='markup', markup_type='html',
code="""\
<div style='text-align:center'>
<h1>Birth Names Dashboard</h1>
<p>
The source dataset came from
<a href='https://github.com/hadley/babynames' target='_blank'>[here]</a>
</p>
<img src='/static/assets/images/babytux.jpg'>
</div>
""")),
Slice(
slice_name='Name Cloud',
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='word_cloud', size_from='10',
series='name', size_to='70', rotation='square',
limit='100')),
Slice(
slice_name='Pivot Table',
viz_type='pivot_table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='pivot_table', metrics=['sum__num'],
groupby=['name'], columns=['state'])),
Slice(
slice_name='Number of Girls',
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='big_number_total', granularity_sqla='ds',
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
subheader='total female participants')),
Slice(
slice_name='Number of California Births',
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
viz_type='big_number_total',
granularity_sqla='ds')),
Slice(
slice_name='Top 10 California Names Timeseries',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
metrics=[{
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
}],
viz_type='line',
granularity_sqla='ds',
groupby=['name'],
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
},
limit='10')),
Slice(
slice_name='Names Sorted by Num in California',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
row_limit=50,
timeseries_limit_metric={
'expressionType': 'SIMPLE',
'column': {
'column_name': 'num_california',
'expression': "CASE WHEN state = 'CA' THEN num ELSE 0 END",
},
'aggregate': 'SUM',
'label': 'SUM(num_california)',
})),
Slice(
slice_name='Num Births Trend',
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line')),
Slice(
slice_name='Daily Totals',
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
created_by=admin,
params=get_slice_json(
defaults,
groupby=['ds'],
since='40 years ago',
until='now',
viz_type='table')),
]
for slc in slices:
merge_slice(slc)
print('Creating a dashboard')
dash = db.session.query(Dash).filter_by(dashboard_title='Births').first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-0dd270f0": {
"meta": {
"chartId": 51,
"width": 2,
"height": 50
},
"type": "CHART",
"id": "CHART-0dd270f0",
"children": []
},
"CHART-a3c21bcc": {
"meta": {
"chartId": 52,
"width": 2,
"height": 50
},
"type": "CHART",
"id": "CHART-a3c21bcc",
"children": []
},
"CHART-976960a5": {
"meta": {
"chartId": 53,
"width": 2,
"height": 25
},
"type": "CHART",
"id": "CHART-976960a5",
"children": []
},
"CHART-58575537": {
"meta": {
"chartId": 54,
"width": 2,
"height": 25
},
"type": "CHART",
"id": "CHART-58575537",
"children": []
},
"CHART-e9cd8f0b": {
"meta": {
"chartId": 55,
"width": 8,
"height": 38
},
"type": "CHART",
"id": "CHART-e9cd8f0b",
"children": []
},
"CHART-e440d205": {
"meta": {
"chartId": 56,
"width": 8,
"height": 50
},
"type": "CHART",
"id": "CHART-e440d205",
"children": []
},
"CHART-59444e0b": {
"meta": {
"chartId": 57,
"width": 3,
"height": 38
},
"type": "CHART",
"id": "CHART-59444e0b",
"children": []
},
"CHART-e2cb4997": {
"meta": {
"chartId": 59,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-e2cb4997",
"children": []
},
"CHART-e8774b49": {
"meta": {
"chartId": 60,
"width": 12,
"height": 50
},
"type": "CHART",
"id": "CHART-e8774b49",
"children": []
},
"CHART-985bfd1e": {
"meta": {
"chartId": 61,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-985bfd1e",
"children": []
},
"CHART-17f13246": {
"meta": {
"chartId": 62,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-17f13246",
"children": []
},
"CHART-729324f6": {
"meta": {
"chartId": 63,
"width": 4,
"height": 50
},
"type": "CHART",
"id": "CHART-729324f6",
"children": []
},
"COLUMN-25a865d6": {
"meta": {
"width": 4,
"background": "BACKGROUND_TRANSPARENT"
},
"type": "COLUMN",
"id": "COLUMN-25a865d6",
"children": [
"ROW-cc97c6ac",
"CHART-e2cb4997"
]
},
"COLUMN-4557b6ba": {
"meta": {
"width": 8,
"background": "BACKGROUND_TRANSPARENT"
},
"type": "COLUMN",
"id": "COLUMN-4557b6ba",
"children": [
"ROW-d2e78e59",
"CHART-e9cd8f0b"
]
},
"GRID_ID": {
"type": "GRID",
"id": "GRID_ID",
"children": [
"ROW-8515ace3",
"ROW-1890385f",
"ROW-f0b64094",
"ROW-be9526b8"
]
},
"HEADER_ID": {
"meta": {
"text": "Births"
},
"type": "HEADER",
"id": "HEADER_ID"
},
"MARKDOWN-00178c27": {
"meta": {
"width": 5,
"code": "<div style=\\"text-align:center\\">\\n <h1>Birth Names Dashboard</h1>\\n <p>\\n The source dataset came from\\n <a href=\\"https://github.com/hadley/babynames\\" target=\\"_blank\\">[here]</a>\\n </p>\\n <img src=\\"/static/assets/images/babytux.jpg\\">\\n</div>\\n",
"height": 38
},
"type": "MARKDOWN",
"id": "MARKDOWN-00178c27",
"children": []
},
"ROOT_ID": {
"type": "ROOT",
"id": "ROOT_ID",
"children": [
"GRID_ID"
]
},
"ROW-1890385f": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-1890385f",
"children": [
"CHART-e440d205",
"CHART-0dd270f0",
"CHART-a3c21bcc"
]
},
"ROW-8515ace3": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-8515ace3",
"children": [
"COLUMN-25a865d6",
"COLUMN-4557b6ba"
]
},
"ROW-be9526b8": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-be9526b8",
"children": [
"CHART-985bfd1e",
"CHART-17f13246",
"CHART-729324f6"
]
},
"ROW-cc97c6ac": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-cc97c6ac",
"children": [
"CHART-976960a5",
"CHART-58575537"
]
},
"ROW-d2e78e59": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-d2e78e59",
"children": [
"MARKDOWN-00178c27",
"CHART-59444e0b"
]
},
"ROW-f0b64094": {
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW",
"id": "ROW-f0b64094",
"children": [
"CHART-e8774b49"
]
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
# dashboard v2 doesn't allow add markup slice
dash.slices = [slc for slc in slices if slc.viz_type != 'markup']
update_slice_ids(pos, dash.slices)
dash.dashboard_title = 'Births'
dash.position_json = json.dumps(pos, indent=4)
dash.slug = 'births'
db.session.merge(dash)
db.session.commit()
| apache-2.0 |
ua-snap/downscale | old/bin/sort_files_by_rcp.py | 2 | 1564 | import os, glob, shutil
from pathos import multiprocessing as mp
import pandas as pd
import numpy as np
base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
output_base_path = '/Data/malindgren/cru_november_final/IEM/ar5'
models = [ 'IPSL-CM5A-LR', 'GISS-E2-R', 'MRI-CGCM3', 'CCSM4', 'GFDL-CM3' ]
# variables = ['rsds', 'vap' ]
for model in models:
variables = os.listdir( os.path.join( base_path, model ) )
_ = [ os.makedirs( os.path.join( base_path, model, variable ) ) for variable in variables if not os.path.exists( os.path.join( base_path, model, variable ) ) ]
for variable in variables:
print( ' '.join([model, variable]) )
output_path = os.path.join( output_base_path, model, variable, 'downscaled' )
cur_path = os.path.join( base_path, model, variable, 'downscaled' )
l = pd.Series( glob.glob( os.path.join( cur_path, '*.tif' ) ) )
grouper = [ os.path.basename(i).split( '_' )[ 5 ] for i in l ]
rcp_groups = l.groupby( grouper )
name_group = [ group for group in rcp_groups ]
names = [ i[0] for i in name_group ]
_ = [ os.makedirs( os.path.join( output_path, name ) ) for name in names if not os.path.exists( os.path.join( output_path, name ) ) ]
for count, name in enumerate( names ):
print count
group = name_group[ count ]
out_group = [ os.path.join( output_path, name, os.path.basename( i ) ) for i in group[1] ]
def run( x, y ):
import shutil
return shutil.move( x, y )
pool = mp.Pool( 15 )
out = pool.map( lambda x: run(x[0], x[1]), zip( group[1], out_group ) )
pool.close()
| mit |
mikebenfield/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 41 | 3668 | from __future__ import unicode_literals
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.feature_extraction import FeatureHasher
from sklearn.utils.testing import assert_raises, assert_true, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"foo": "bar", "dada": 42, "tzara": 37},
{"foo": "baz", "gaga": u"string1"}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_feature_hasher_pairs_with_string_values():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": "a"},
{"baz": u"abc", "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 1], x1_nz)
assert_equal([1, 1, 4], x2_nz)
raw_X = (iter(d.items()) for d in [{"bax": "abc"},
{"bax": "abc"}])
x1, x2 = h.transform(raw_X).toarray()
x1_nz = np.abs(x1[x1 != 0])
x2_nz = np.abs(x2[x2 != 0])
assert_equal([1], x1_nz)
assert_equal([1], x2_nz)
assert_array_equal(x1, x2)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
cloudmesh/book | examples/physics/number-theory/higgs_classIII.py | 2 | 19448 | '''This file contains code for the unit:Number Theory. '''
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import ndtri
import pylab
'''Method that returns returns value of the gaussian given an input array and mean and standard deviation'''
def Normal(x, mu,sigma):
return np.exp(- (x-mu)**2/(2*sigma**2))/(sigma*np.sqrt(2*np.pi))
plt.show()
'''Counting 25 events 40000 times'''
Events25 = np.random.rand(1000000) #generate 25*40000 = 1,000,000 random numbers
Counters25 = np.zeros(40000) #generate an array with 40000 entries all set to 0
for value in Events25:
Place = int(40000 * value) #Scale the random values to range between 0 to 40000
Counters25[Place] +=1 #Increment counts for the value as per the scaled value
####Plot- The result of counting 25 events 40000 times as well as the errors, one sigma, one percent, 99 percent
###See figure - Count 25 Events 40000 times
plt.figure("Count 25 Events 40000 times")
Numcounts25, binedges25, patches = plt.hist(Counters25, bins = 50, range = (0,50), color = "green", alpha = 0.5) #plot histogram with 50 bins. Store Number of counts/bin and bin edges
centers25 = 0.5*(binedges25[1:] + binedges25[:-1]) #Computing bin centers as means of the bin edge values
y25 = 40000 * Normal(centers25, 25, np.sqrt(25)) #Compute the y values(as per the gaussian function)
xbar25 = np.zeros(2)
ybar25 = np.zeros(2)
xbar25[0] = 25 - np.sqrt(25) #Compute the one sigma values as
xbar25[1] = 25 + np.sqrt(25) #mean +-error(on the mean value)
ybar25 = 40000*Normal(xbar25, 25, np.sqrt(25)) #Computing y values as per the gaussian function for the X values
plt.plot(xbar25, ybar25, color= "red", alpha = 1.0, lw =5) #plot the line joining the 2 one sigma points
plt.plot(centers25, y25, alpha = 1.0, color = "red", lw =5) #plot the gaussian function passing through the center of each bin
errors25 = np.sqrt(y25) #Compute the expected error on Y-values
plt.errorbar(centers25, y25, yerr = errors25, linestyle='None', linewidth = 3.0, markeredgewidth = 3.0, marker ='o', color = 'black', markersize= 5.0 ) #Plot the errors on Y values
prob1percent25 = 25 + np.sqrt(25) * ndtri(0.01) #compute the 1% point - x value
prob99percent25 = 25 + np.sqrt(25) * ndtri(0.99) #compute the 99% point - x value
y1percent25 = 40000*Normal(prob1percent25, 25, np.sqrt(25)) #compute the 1% point - y value
y99percent25 = 40000*Normal(prob99percent25, 25, np.sqrt(25)) #compute the 99% point - y value
#Perform labelling operations for the plots
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-75,50), xy = (prob1percent25, y1percent25))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(30,50), xy = (prob99percent25, y99percent25))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (20,ybar25[0]), xytext = (-70,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (30,ybar25[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("25 Events Counted 40000 times", backgroundcolor = "white")
'''A similar experiment as above is performed with 250 events being performed 40000 times. Refer to the documentation of the above section.'''
Events250 = np.random.rand(10000000)
Counters250 = np.zeros(40000)
for value in Events250:
Place = int(40000 * value)
Counters250[Place] +=1
####Plot- The result of counting 250 events 40000 times as well as the errors, one sigma, one percent, 99 percent. This is identocal to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 40000 times
plt.figure("Count 250 Events 40000 times")
Numcounts250, binedges250, patches = plt.hist(Counters250, bins = 200, range = (150,350), color = "green", alpha = 0.5)
centers250 = 0.5*(binedges250[1:] + binedges250[:-1])
y250 = 40000 * Normal(centers250, 250, np.sqrt(250))
errors250 = np.sqrt(y250)
xbar250 = np.zeros(2)
ybar250 = np.zeros(2)
xbar250[0] = 250 - np.sqrt(250)
xbar250[1] = 250 + np.sqrt(250)
ybar250 = 40000*Normal(xbar250, 250, np.sqrt(250))
plt.plot(xbar250, ybar250, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250, y250, alpha = 1.0, color = "red", lw =5)
plt.errorbar(centers250, y250, yerr = errors250, linestyle='None', linewidth = 3.0, markeredgewidth = 3.0, marker ='o', color = 'black', markersize= 5.0 )
prob1percent250 = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250 = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250 = 4000*Normal(prob1percent250, 250, np.sqrt(250))
y99percent250 = 4000*Normal(prob99percent250, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-75,50), xy = (prob1percent250, y1percent250))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(30,50), xy = (prob99percent250, y99percent250))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250[0],ybar250[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250[1],ybar250[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 40000 times", backgroundcolor = "white")
'''The above experiment is repeated with 250 events, each event repeating 400. It is performed with 2 different seeds for random numbers'''
####First random set
Events250A = np.random.rand(100000)
Counters250A = np.zeros(400)
for value in Events250A:
Place = int(400 * value)
Counters250A[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times I
plt.figure("Count 250 Events 400 times I")
Numcounts250A, binedges250A, patches = plt.hist(Counters250A, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250A = 0.5*(binedges250A[1:] + binedges250A[:-1])
y250A = 400 * Normal(centers250A, 250, np.sqrt(250))
xbar250A = np.zeros(2)
ybar250A = np.zeros(2)
xbar250A[0] = 250 - np.sqrt(250)
xbar250A[1] = 250 + np.sqrt(250)
ybar250A = 400*Normal(xbar250A, 250, np.sqrt(250))
plt.plot(xbar250A, ybar250A, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250A, y250A, alpha = 1.0, color = "red", lw =5)
prob1percent250A = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250A = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250A = 400*Normal(prob1percent250A, 250, np.sqrt(250))
y99percent250A = 400*Normal(prob99percent250A, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250A, y1percent250A))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250A, y99percent250A))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[0],ybar250A[0]), xytext = (-100,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[1],ybar250A[1]), xytext = (40,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. One Seed", backgroundcolor = "white")
### Second random set.
Events250A = np.random.rand(100000)
Counters250A = np.zeros(400)
for value in Events250A:
Place = int(400 * value)
Counters250A[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent with separate seed. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times II
plt.figure("Count 250 Events 400 times II")
Numcounts250A, binedges250A, patches = plt.hist(Counters250A, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250A = 0.5*(binedges250A[1:] + binedges250A[:-1])
y250A = 400 * Normal(centers250A, 250, np.sqrt(250))
xbar250A = np.zeros(2)
ybar250A = np.zeros(2)
xbar250A[0] = 250 - np.sqrt(250)
xbar250A[1] = 250 + np.sqrt(250)
ybar250A = 400*Normal(xbar250A, 250, np.sqrt(250))
plt.plot(xbar250A, ybar250A, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250A, y250A, alpha = 1.0, color = "red", lw =5)
prob1percent250A = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250A = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250A = 400*Normal(prob1percent250A, 250, np.sqrt(250))
y99percent250A = 400*Normal(prob99percent250A, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250A, y1percent250A))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250A, y99percent250A))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[0],ybar250A[0]), xytext = (-100,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250A[1],ybar250A[1]), xytext = (40,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. Another Seed", backgroundcolor = "white")
'''The above experiment is repeated with 250 events, each event repeating 400. It is performed with 2 different seeds for random numbers. The number of bins is decreased to 20.'''
###First set of random numbers
Events250C = np.random.rand(100000)
Counters250C = np.zeros(400)
for value in Events250C:
Place = int(400 * value)
Counters250C[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent. The number of bins is decreased to 20. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times Larger Bins
plt.figure("Count 250 Events 400 times Larger Bins.")
Numcounts250C, binedges250C, patches = plt.hist(Counters250C, bins = 20, range = (200,300), color = "green", alpha = 0.5)
centers250C = 0.5*(binedges250C[1:] + binedges250C[:-1])
y250C = 2000 * Normal(centers250C, 250, np.sqrt(250))
xbar250C = np.zeros(2)
ybar250C = np.zeros(2)
xbar250C[0] = 250 - np.sqrt(250)
xbar250C[1] = 250 + np.sqrt(250)
ybar250C = 2000*Normal(xbar250C, 250, np.sqrt(250))
plt.plot(xbar250C, ybar250C, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250C, y250C, alpha = 1.0, color = "red", lw =5)
prob1percent250C = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250C = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250C = 2000*Normal(prob1percent250C, 250, np.sqrt(250))
y99percent250C = 2000*Normal(prob99percent250C, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250C, y1percent250C))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250C, y99percent250C))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[0],ybar250C[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[1],ybar250C[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. Larger Bins(5).", backgroundcolor = "white")
#second set of random numbers
Events250C = np.random.rand(100000)
Counters250C = np.zeros(400)
for value in Events250C:
Place = int(400 * value)
Counters250C[Place] +=1
####Plot- The result of counting 250 events 400 times as well as the errors, one sigma, one percent, 99 percent with separate seed. The number of bins is decreased to 20 The number of bins is decreased to 20. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 400 times Larger Bins. Another Seed
plt.figure("Count 250 Events 400 times Larger Bins. Another Seed")
Numcounts250C, binedges250C, patches = plt.hist(Counters250C, bins = 20, range = (200,300), color = "green", alpha = 0.5)
centers250C = 0.5*(binedges250C[1:] + binedges250C[:-1])
y250C = 2000 * Normal(centers250C, 250, np.sqrt(250))
xbar250C = np.zeros(2)
ybar250C = np.zeros(2)
xbar250C[0] = 250 - np.sqrt(250)
xbar250C[1] = 250 + np.sqrt(250)
ybar250C = 2000*Normal(xbar250C, 250, np.sqrt(250))
plt.plot(xbar250C, ybar250C, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250C, y250C, alpha = 1.0, color = "red", lw =5)
prob1percent250C = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250C = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250C = 2000*Normal(prob1percent250C, 250, np.sqrt(250))
y99percent250C = 2000*Normal(prob99percent250C, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250C, y1percent250C))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250C, y99percent250C))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[0],ybar250C[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250C[1],ybar250C[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 400 times. Larger Bins(5). Another Seed", backgroundcolor = "white")
'''The above experiment is repeated with 250 events, each event repeating 4000. It is performed with 2 different seeds for random numbers. The number of bins is 100.'''
###Random set 1
Events250B = np.random.rand(1000000)
Counters250B = np.zeros(4000)
for value in Events250B:
Place = int(4000 * value)
Counters250B[Place] +=1
####Plot- The result of counting 250 events 4000 times as well as the errors, one sigma, one percent, 99 percent with separate seed.This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 4000 times
plt.figure("Count 250 Events 4000 times")
Numcounts250B, binedges250B, patches = plt.hist(Counters250B, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250B = 0.5*(binedges250B[1:] + binedges250B[:-1])
y250B = 4000 * Normal(centers250B, 250, np.sqrt(250))
xbar250B = np.zeros(2)
ybar250B = np.zeros(2)
xbar250B[0] = 250 - np.sqrt(250)
xbar250B[1] = 250 + np.sqrt(250)
ybar250B = 4000*Normal(xbar250B, 250, np.sqrt(250))
plt.plot(xbar250B, ybar250B, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250B, y250B, alpha = 1.0, color = "red", lw =5)
prob1percent250B = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250B = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250B = 4000*Normal(prob1percent250B, 250, np.sqrt(250))
y99percent250B = 4000*Normal(prob99percent250B, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250B, y1percent250B))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250B, y99percent250B))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[0],ybar250B[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[1],ybar250B[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 4000 times.", backgroundcolor = "white")
###Second random set
Events250B = np.random.rand(1000000)
Counters250B = np.zeros(4000)
for value in Events250B:
Place = int(4000 * value)
Counters250B[Place] +=1
####Plot- The result of counting 250 events 4000 times as well as the errors, one sigma, one percent, 99 percent with separate seed. This is identical to plotting described above. Refer to the documentation of the above section
###See figure - Count 250 Events 4000 times Another Seed
plt.figure("Count 250 Events 4000 times Another Seed")
Numcounts250B, binedges250B, patches = plt.hist(Counters250B, bins = 100, range = (200,300), color = "green", alpha = 0.5)
centers250B = 0.5*(binedges250B[1:] + binedges250B[:-1])
y250B = 4000 * Normal(centers250B, 250, np.sqrt(250))
xbar250B = np.zeros(2)
ybar250B = np.zeros(2)
xbar250B[0] = 250 - np.sqrt(250)
xbar250B[1] = 250 + np.sqrt(250)
ybar250B = 4000*Normal(xbar250B, 250, np.sqrt(250))
plt.plot(xbar250B, ybar250B, color= "red", alpha = 1.0, lw =5)
plt.plot(centers250B, y250B, alpha = 1.0, color = "red", lw =5)
prob1percent250B = 250 + np.sqrt(250) * ndtri(0.01)
prob99percent250B = 250 + np.sqrt(250) * ndtri(0.99)
y1percent250B = 4000*Normal(prob1percent250B, 250, np.sqrt(250))
y99percent250B = 4000*Normal(prob99percent250B, 250, np.sqrt(250))
plt.annotate('One percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-50,50), xy = (prob1percent250B, y1percent250B))
plt.annotate('99 percent', xycoords="data", textcoords='offset points', arrowprops=dict(facecolor='black', arrowstyle="->"), xytext =(-10,50), xy = (prob99percent250B, y99percent250B))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[0],ybar250B[0]), xytext = (-120,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.annotate('One Sigma', xycoords="data", textcoords='offset points', xy = (xbar250B[1],ybar250B[1]), xytext = (30,30), arrowprops=dict(facecolor='black', arrowstyle="->"))
plt.title("250 Events Counted 4000 times. Another Seed", backgroundcolor = "white")
#For Agg backen
pylab.show()
| apache-2.0 |
evgchz/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
anooptoffy/Masters-Course-Work-Repository | Semester_2/Machine Perception/Assignment2/question_3_Approach2.py | 2 | 6126 | import numpy as np
from sklearn.cluster import KMeans
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import sys, os
import cv2
# The data is organized as follows:
# data
# ├── test
# │ ├── bike_03.jpg
# │ ├── bike_69.jpg
# │ ├── bike_70.jpg
# │ ├── bike_75.jpg
# │ ├── horse_01.jpg
# │ ├── horse_02.jpg
# │ ├── ...
# │ ├── ...
# └── train
# ├── bike
# │ ├── bike_01.jpg
# │ ├── bike_02.jpg
# │ ├── bike_04.jpg
# │ ├── bike_05.jpg
# │ ├── bike_85.jpg
# │ ├── ...
# │ └── ...
# └── horse
# ├── horse_04.jpg
# ├── horse_05.jpg
# ├── horse_16.jpg
# ├── horse_18.jpg
# ├── ...
# └── ...
# Train folder contains the training organized as folders with each folder representing a class.
# Test folder contains images from all classes.
class BagOfWords(object):
"""
Implements Bag Of Words image classification
Uses:
* SURF : To detect keypoints and extract descriptors
* KMeans : To cluster descriptors and form the vocabulary
* Numpy.bincount : To create feature histogram
* LogisticRegression : To classify the labeled feature historgrams
"""
def __init__(self, n_clusters=15):
"""Initialize class with sane defaults"""
self.train_label = []
self.train_desc = np.array([])
self.train_desc_size = []
self.n_clusters = n_clusters
self.features = np.array([])
self.label_map = {}
self.rev_label_map = {}
self.surf = cv2.xfeatures2d.SURF_create(400)
self.surf.setExtended(True)
self.km = KMeans(n_clusters=self.n_clusters, random_state=0, n_jobs=-1)
self.logistic = LogisticRegression(C=1e5)
def extract_info(self, filepath):
"""Extract keypoints and descriptors using SURF"""
file = os.path.basename(filepath)
print('Info: Running extractor on image {}'.format(file))
label = self.label_map[file.split('_')[0]]
image = cv2.imread(filepath)
kp, desc = self.surf.detectAndCompute(image, None)
return label, desc, desc.shape[0]
def surf_extract(self, train_path):
"""Create list of descriptors for all training images"""
folders = os.listdir(train_path)
for folderpath in folders:
print('Info: Inside {}'.format(folderpath))
files = os.listdir(os.path.join(train_path, folderpath))
for f in files:
print('Info: Process {}'.format(f))
lbl, desc, dnum = self.extract_info(os.path.join(train_path, os.path.join(folderpath, f)))
self.train_label.append(lbl)
self.train_desc_size.append(dnum)
if self.train_desc.size == 0:
self.train_desc = desc
else:
self.train_desc = np.concatenate((self.train_desc, desc), axis=0)
def create_vocabulary(self):
"""Create vocabulary by running K-Means on list of training descriptors"""
print('Info: Running K-Means')
self.km.fit(self.train_desc)
labels = self.km.labels_
print('Info: Generating Feature Histogram')
num = len(self.train_desc_size)
# create feature histogram for each image by separating descriptors list
# into chunks for corresponding images
chunk_end = 0
for i in range(num):
chunk_start = chunk_end
chunk_end = chunk_end + self.train_desc_size[i]
chunk = labels[chunk_start:chunk_end]
feature_hist = np.bincount(chunk)
if self.features.size == 0:
self.features = feature_hist
else:
self.features = np.vstack((self.features, np.array(feature_hist)))
def train(self, root):
"""Reads images, creates vocabulary and trains the classifier"""
path_train = os.path.join(root, 'train')
train_classes = os.listdir(path_train)
print('Info: Starting')
self.label_map = {lbl: idx for idx, lbl in enumerate(train_classes)}
self.rev_label_map = {idx: lbl for idx, lbl in enumerate(train_classes)}
print(self.label_map)
self.surf_extract(path_train)
print('Total training files : {}'.format(len(self.train_label)))
print('Total training features : {}'.format(self.train_desc.shape[0]))
print('Generating Vocabulary')
self.create_vocabulary()
print('Info: Vocabulary size {}'.format(self.features.shape[1]))
print('Info: Training classifier')
self.logistic.fit(self.features, self.train_label)
def predict(self, path):
"""Extract descriptors from test image, create feature historgram and predicts the class"""
print('Info: Starting prediction')
print('Prediction - Actual')
labels_true = []
labels_pred = []
files = os.listdir(path)
for f in files:
true_lbl = self.label_map[f.split('_')[0]]
labels_true.append(true_lbl)
image = cv2.imread(os.path.join(path, f))
kp, desc = self.surf.detectAndCompute(image, None)
labels = self.km.predict(desc)
features_test = np.bincount(labels)
pred_lbl = self.logistic.predict([features_test])
print('{} - {}'.format(self.rev_label_map[pred_lbl[0]], self.rev_label_map[true_lbl]))
labels_pred.append(pred_lbl[0])
accuracy = accuracy_score(labels_true, labels_pred)
print('Accuracy : {}'.format(accuracy))
print('Total sample: {}'.format(len(labels_pred)))
if __name__ == "__main__":
path_root = os.path.join(sys.path[0], 'data')
bow = BagOfWords()
print('Info: Data root - {}'.format(path_root))
bow.train(path_root)
bow.predict(os.path.join(path_root, 'test'))
| mit |
michigraber/scikit-learn | examples/decomposition/plot_ica_blind_source_separation.py | 349 | 2228 | """
=====================================
Blind source separation using FastICA
=====================================
An example of estimating sources from noisy data.
:ref:`ICA` is used to estimate sources given noisy measurements.
Imagine 3 instruments playing simultaneously and 3 microphones
recording the mixed signals. ICA is used to recover the sources
ie. what is played by each instrument. Importantly, PCA fails
at recovering our `instruments` since the related signals reflect
non-Gaussian processes.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
from sklearn.decomposition import FastICA, PCA
###############################################################################
# Generate sample data
np.random.seed(0)
n_samples = 2000
time = np.linspace(0, 8, n_samples)
s1 = np.sin(2 * time) # Signal 1 : sinusoidal signal
s2 = np.sign(np.sin(3 * time)) # Signal 2 : square signal
s3 = signal.sawtooth(2 * np.pi * time) # Signal 3: saw tooth signal
S = np.c_[s1, s2, s3]
S += 0.2 * np.random.normal(size=S.shape) # Add noise
S /= S.std(axis=0) # Standardize data
# Mix data
A = np.array([[1, 1, 1], [0.5, 2, 1.0], [1.5, 1.0, 2.0]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
# Compute ICA
ica = FastICA(n_components=3)
S_ = ica.fit_transform(X) # Reconstruct signals
A_ = ica.mixing_ # Get estimated mixing matrix
# We can `prove` that the ICA model applies by reverting the unmixing.
assert np.allclose(X, np.dot(S_, A_.T) + ica.mean_)
# For comparison, compute PCA
pca = PCA(n_components=3)
H = pca.fit_transform(X) # Reconstruct signals based on orthogonal components
###############################################################################
# Plot results
plt.figure()
models = [X, S, S_, H]
names = ['Observations (mixed signal)',
'True Sources',
'ICA recovered signals',
'PCA recovered signals']
colors = ['red', 'steelblue', 'orange']
for ii, (model, name) in enumerate(zip(models, names), 1):
plt.subplot(4, 1, ii)
plt.title(name)
for sig, color in zip(model.T, colors):
plt.plot(sig, color=color)
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)
plt.show()
| bsd-3-clause |
phobson/statsmodels | statsmodels/examples/ex_kernel_regression_censored2.py | 34 | 1175 | # -*- coding: utf-8 -*-
"""script to check KernelCensoredReg based on test file
Created on Thu Jan 03 20:20:47 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
#example from test file
nobs = 200
np.random.seed(1234)
C1 = np.random.normal(size=(nobs, ))
C2 = np.random.normal(2, 1, size=(nobs, ))
noise = 0.1 * np.random.normal(size=(nobs, ))
y = 0.3 +1.2 * C1 - 0.9 * C2 + noise
y[y>0] = 0 # censor the data
model = nparam.KernelCensoredReg(endog=[y], exog=[C1, C2],
reg_type='ll', var_type='cc',
bw='cv_ls', censor_val=0)
sm_mean, sm_mfx = model.fit()
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
sortidx = np.argsort(y)
ax.plot(y[sortidx], 'o', alpha=0.5)
#ax.plot(x, y_cens, 'o', alpha=0.5)
#ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(sm_mean[sortidx], lw=2, label='model 0 mean')
#ax.plot(x, mean2, lw=2, label='model 2 mean')
ax.legend()
plt.show()
| bsd-3-clause |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/mpl_toolkits/axes_grid1/axes_grid.py | 10 | 29410 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib.cbook as cbook
import matplotlib.axes as maxes
#import matplotlib.colorbar as mcolorbar
from . import colorbar as mcolorbar
import matplotlib as mpl
import matplotlib.patches as mpatches
import matplotlib.lines as mlines
import matplotlib.ticker as ticker
from matplotlib.gridspec import SubplotSpec
from .axes_divider import Size, SubplotDivider, LocatableAxes, Divider
def _extend_axes_pad(value):
# Check whether a list/tuple/array or scalar has been passed
ret = value
if not hasattr(ret, "__getitem__"):
ret = (value, value)
return ret
def _tick_only(ax, bottom_on, left_on):
bottom_off = not bottom_on
left_off = not left_on
# [l.set_visible(bottom_off) for l in ax.get_xticklabels()]
# [l.set_visible(left_off) for l in ax.get_yticklabels()]
# ax.xaxis.label.set_visible(bottom_off)
# ax.yaxis.label.set_visible(left_off)
ax.axis["bottom"].toggle(ticklabels=bottom_off, label=bottom_off)
ax.axis["left"].toggle(ticklabels=left_off, label=left_off)
class Colorbar(mcolorbar.Colorbar):
def _config_axes_deprecated(self, X, Y):
'''
Make an axes patch and outline.
'''
ax = self.ax
ax.set_frame_on(False)
ax.set_navigate(False)
xy = self._outline(X, Y)
ax.update_datalim(xy)
ax.set_xlim(*ax.dataLim.intervalx)
ax.set_ylim(*ax.dataLim.intervaly)
self.outline = mlines.Line2D(xy[:, 0], xy[:, 1],
color=mpl.rcParams['axes.edgecolor'],
linewidth=mpl.rcParams['axes.linewidth'])
ax.add_artist(self.outline)
self.outline.set_clip_box(None)
self.outline.set_clip_path(None)
c = mpl.rcParams['axes.facecolor']
self.patch = mpatches.Polygon(xy, edgecolor=c,
facecolor=c,
linewidth=0.01,
zorder=-1)
ax.add_artist(self.patch)
ticks, ticklabels, offset_string = self._ticker()
if self.orientation == 'vertical':
ax.set_yticks(ticks)
ax.set_yticklabels(ticklabels)
ax.yaxis.get_major_formatter().set_offset_string(offset_string)
else:
ax.set_xticks(ticks)
ax.set_xticklabels(ticklabels)
ax.xaxis.get_major_formatter().set_offset_string(offset_string)
class CbarAxesBase(object):
def colorbar(self, mappable, **kwargs):
locator = kwargs.pop("locator", None)
if locator is None:
if "ticks" not in kwargs:
kwargs["ticks"] = ticker.MaxNLocator(5)
if locator is not None:
if "ticks" in kwargs:
raise ValueError("Either *locator* or *ticks* need" +
" to be given, not both")
else:
kwargs["ticks"] = locator
self._hold = True
if self.orientation in ["top", "bottom"]:
orientation = "horizontal"
else:
orientation = "vertical"
cb = Colorbar(self, mappable, orientation=orientation, **kwargs)
self._config_axes()
def on_changed(m):
#print 'calling on changed', m.get_cmap().name
cb.set_cmap(m.get_cmap())
cb.set_clim(m.get_clim())
cb.update_bruteforce(m)
self.cbid = mappable.callbacksSM.connect('changed', on_changed)
mappable.colorbar = cb
self.locator = cb.cbar_axis.get_major_locator()
return cb
def _config_axes(self):
'''
Make an axes patch and outline.
'''
ax = self
ax.set_navigate(False)
ax.axis[:].toggle(all=False)
b = self._default_label_on
ax.axis[self.orientation].toggle(all=b)
# for axis in ax.axis.values():
# axis.major_ticks.set_visible(False)
# axis.minor_ticks.set_visible(False)
# axis.major_ticklabels.set_visible(False)
# axis.minor_ticklabels.set_visible(False)
# axis.label.set_visible(False)
# axis = ax.axis[self.orientation]
# axis.major_ticks.set_visible(True)
# axis.minor_ticks.set_visible(True)
#axis.major_ticklabels.set_size(
# int(axis.major_ticklabels.get_size()*.9))
#axis.major_tick_pad = 3
# axis.major_ticklabels.set_visible(b)
# axis.minor_ticklabels.set_visible(b)
# axis.label.set_visible(b)
def toggle_label(self, b):
self._default_label_on = b
axis = self.axis[self.orientation]
axis.toggle(ticklabels=b, label=b)
#axis.major_ticklabels.set_visible(b)
#axis.minor_ticklabels.set_visible(b)
#axis.label.set_visible(b)
class CbarAxes(CbarAxesBase, LocatableAxes):
def __init__(self, *kl, **kwargs):
orientation = kwargs.pop("orientation", None)
if orientation is None:
raise ValueError("orientation must be specified")
self.orientation = orientation
self._default_label_on = True
self.locator = None
super(LocatableAxes, self).__init__(*kl, **kwargs)
def cla(self):
super(LocatableAxes, self).cla()
self._config_axes()
class Grid(object):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. AxesGrid is used in such case.
"""
_defaultLocatableAxesClass = LocatableAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
share_x=True,
share_y=True,
#aspect=True,
label_mode="L",
axes_class=None,
):
"""
Build an :class:`Grid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
share_x True [ True | False ]
share_y True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
axes_class None a type object which must be a subclass
of :class:`~matplotlib.axes.Axes`
================ ======== =========================================
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if (type(axes_class)) == type and \
issubclass(axes_class,
self._defaultLocatableAxesClass.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=False)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=False)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=False)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
sharex = self._refax
sharey = self._refax
else:
if share_x:
sharex = self._column_refax[col]
else:
sharex = None
if share_y:
sharey = self._row_refax[row]
else:
sharey = None
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
if share_all:
if self._refax is None:
self._refax = ax
else:
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all:
fig.add_axes(ax)
self.set_label_mode(label_mode)
def _init_axes_pad(self, axes_pad):
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._horiz_pad_size = Size.Fixed(axes_pad[0])
self._vert_pad_size = Size.Fixed(axes_pad[1])
def _update_locators(self):
h = []
h_ax_pos = []
for _ in self._column_refax:
#if h: h.append(Size.Fixed(self._axes_pad))
if h:
h.append(self._horiz_pad_size)
h_ax_pos.append(len(h))
sz = Size.Scaled(1)
h.append(sz)
v = []
v_ax_pos = []
for _ in self._row_refax[::-1]:
#if v: v.append(Size.Fixed(self._axes_pad))
if v:
v.append(self._vert_pad_size)
v_ax_pos.append(len(v))
sz = Size.Scaled(1)
v.append(sz)
for i in range(self.ngrids):
col, row = self._get_col_row(i)
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows - 1 - row])
self.axes_all[i].set_axes_locator(locator)
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
def _get_col_row(self, n):
if self._direction == "column":
col, row = divmod(n, self._nrows)
else:
row, col = divmod(n, self._ncols)
return col, row
# Good to propagate __len__ if we have __getitem__
def __len__(self):
return len(self.axes_all)
def __getitem__(self, i):
return self.axes_all[i]
def get_geometry(self):
"""
get geometry of the grid. Returns a tuple of two integer,
representing number of rows and number of columns.
"""
return self._nrows, self._ncols
def set_axes_pad(self, axes_pad):
"set axes_pad"
self._axes_pad = axes_pad
# These two lines actually differ from ones in _init_axes_pad
self._horiz_pad_size.fixed_size = axes_pad[0]
self._vert_pad_size.fixed_size = axes_pad[1]
def get_axes_pad(self):
"""
get axes_pad
Returns
-------
tuple
Padding in inches, (horizontal pad, vertical pad)
"""
return self._axes_pad
def set_aspect(self, aspect):
"set aspect"
self._divider.set_aspect(aspect)
def get_aspect(self):
"get aspect"
return self._divider.get_aspect()
def set_label_mode(self, mode):
"set label_mode"
if mode == "all":
for ax in self.axes_all:
_tick_only(ax, False, False)
elif mode == "L":
# left-most axes
for ax in self.axes_column[0][:-1]:
_tick_only(ax, bottom_on=True, left_on=False)
# lower-left axes
ax = self.axes_column[0][-1]
_tick_only(ax, bottom_on=False, left_on=False)
for col in self.axes_column[1:]:
# axes with no labels
for ax in col[:-1]:
_tick_only(ax, bottom_on=True, left_on=True)
# bottom
ax = col[-1]
_tick_only(ax, bottom_on=False, left_on=True)
elif mode == "1":
for ax in self.axes_all:
_tick_only(ax, bottom_on=True, left_on=True)
ax = self.axes_llc
_tick_only(ax, bottom_on=False, left_on=False)
def get_divider(self):
return self._divider
def set_axes_locator(self, locator):
self._divider.set_locator(locator)
def get_axes_locator(self):
return self._divider.get_locator()
def get_vsize_hsize(self):
return self._divider.get_vsize_hsize()
# from axes_size import AddList
# vsize = AddList(self._divider.get_vertical())
# hsize = AddList(self._divider.get_horizontal())
# return vsize, hsize
class ImageGrid(Grid):
"""
A class that creates a grid of Axes. In matplotlib, the axes
location (and size) is specified in the normalized figure
coordinates. This may not be ideal for images that needs to be
displayed with a given aspect ratio. For example, displaying
images of a same size with some fixed padding between them cannot
be easily done in matplotlib. ImageGrid is used in such case.
"""
_defaultCbarAxesClass = CbarAxes
def __init__(self, fig,
rect,
nrows_ncols,
ngrids=None,
direction="row",
axes_pad=0.02,
add_all=True,
share_all=False,
aspect=True,
label_mode="L",
cbar_mode=None,
cbar_location="right",
cbar_pad=None,
cbar_size="5%",
cbar_set_cax=True,
axes_class=None,
):
"""
Build an :class:`ImageGrid` instance with a grid nrows*ncols
:class:`~matplotlib.axes.Axes` in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* (in
:class:`~matplotlib.figure.Figure` coordinates) or
the subplot position code (e.g., "121").
Optional keyword arguments:
================ ======== =========================================
Keyword Default Description
================ ======== =========================================
direction "row" [ "row" | "column" ]
axes_pad 0.02 float| pad between axes given in inches
or tuple-like of floats,
(horizontal padding, vertical padding)
add_all True [ True | False ]
share_all False [ True | False ]
aspect True [ True | False ]
label_mode "L" [ "L" | "1" | "all" ]
cbar_mode None [ "each" | "single" | "edge" ]
cbar_location "right" [ "left" | "right" | "bottom" | "top" ]
cbar_pad None
cbar_size "5%"
cbar_set_cax True [ True | False ]
axes_class None a type object which must be a subclass
of axes_grid's subclass of
:class:`~matplotlib.axes.Axes`
================ ======== =========================================
*cbar_set_cax* : if True, each axes in the grid has a cax
attribute that is bind to associated cbar_axes.
"""
self._nrows, self._ncols = nrows_ncols
if ngrids is None:
ngrids = self._nrows * self._ncols
else:
if (ngrids > self._nrows * self._ncols) or (ngrids <= 0):
raise Exception("")
self.ngrids = ngrids
axes_pad = _extend_axes_pad(axes_pad)
self._axes_pad = axes_pad
self._colorbar_mode = cbar_mode
self._colorbar_location = cbar_location
if cbar_pad is None:
# horizontal or vertical arrangement?
if cbar_location in ("left", "right"):
self._colorbar_pad = axes_pad[0]
else:
self._colorbar_pad = axes_pad[1]
else:
self._colorbar_pad = cbar_pad
self._colorbar_size = cbar_size
self._init_axes_pad(axes_pad)
if direction not in ["column", "row"]:
raise Exception("")
self._direction = direction
if axes_class is None:
axes_class = self._defaultLocatableAxesClass
axes_class_args = {}
else:
if isinstance(axes_class, maxes.Axes):
axes_class_args = {}
else:
axes_class, axes_class_args = axes_class
self.axes_all = []
self.axes_column = [[] for _ in range(self._ncols)]
self.axes_row = [[] for _ in range(self._nrows)]
self.cbar_axes = []
h = []
v = []
if cbook.is_string_like(rect) or cbook.is_numlike(rect):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif isinstance(rect, SubplotSpec):
self._divider = SubplotDivider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
elif len(rect) == 3:
kw = dict(horizontal=h, vertical=v, aspect=aspect)
self._divider = SubplotDivider(fig, *rect, **kw)
elif len(rect) == 4:
self._divider = Divider(fig, rect, horizontal=h, vertical=v,
aspect=aspect)
else:
raise Exception("")
rect = self._divider.get_position()
# reference axes
self._column_refax = [None for _ in range(self._ncols)]
self._row_refax = [None for _ in range(self._nrows)]
self._refax = None
for i in range(self.ngrids):
col, row = self._get_col_row(i)
if share_all:
if self.axes_all:
sharex = self.axes_all[0]
sharey = self.axes_all[0]
else:
sharex = None
sharey = None
else:
sharex = self._column_refax[col]
sharey = self._row_refax[row]
ax = axes_class(fig, rect, sharex=sharex, sharey=sharey,
**axes_class_args)
self.axes_all.append(ax)
self.axes_column[col].append(ax)
self.axes_row[row].append(ax)
if share_all:
if self._refax is None:
self._refax = ax
if sharex is None:
self._column_refax[col] = ax
if sharey is None:
self._row_refax[row] = ax
cax = self._defaultCbarAxesClass(fig, rect,
orientation=self._colorbar_location)
self.cbar_axes.append(cax)
self.axes_llc = self.axes_column[0][-1]
self._update_locators()
if add_all:
for ax in self.axes_all+self.cbar_axes:
fig.add_axes(ax)
if cbar_set_cax:
if self._colorbar_mode == "single":
for ax in self.axes_all:
ax.cax = self.cbar_axes[0]
elif self._colorbar_mode == "edge":
for index, ax in enumerate(self.axes_all):
col, row = self._get_col_row(index)
if self._colorbar_location in ("left", "right"):
ax.cax = self.cbar_axes[row]
else:
ax.cax = self.cbar_axes[col]
else:
for ax, cax in zip(self.axes_all, self.cbar_axes):
ax.cax = cax
self.set_label_mode(label_mode)
def _update_locators(self):
h = []
v = []
h_ax_pos = []
h_cb_pos = []
if (self._colorbar_mode == "single" and
self._colorbar_location in ('left', 'bottom')):
if self._colorbar_location == "left":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, ny=0, ny1=-1)
elif self._colorbar_location == "bottom":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=0)
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
for col, ax in enumerate(self.axes_row[0]):
if h:
h.append(self._horiz_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesX(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesX(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == 0)) and self._colorbar_location == "left":
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
h.append(Size.from_any(self._colorbar_pad, sz))
h_ax_pos.append(len(h))
h.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
col == self._ncols - 1)) and
self._colorbar_location == "right"):
h.append(Size.from_any(self._colorbar_pad, sz))
h_cb_pos.append(len(h))
h.append(Size.from_any(self._colorbar_size, sz))
v_ax_pos = []
v_cb_pos = []
for row, ax in enumerate(self.axes_column[0][::-1]):
if v:
v.append(self._vert_pad_size) # Size.Fixed(self._axes_pad))
if ax:
sz = Size.AxesY(ax, aspect="axes", ref_ax=self.axes_all[0])
else:
sz = Size.AxesY(self.axes_all[0],
aspect="axes", ref_ax=self.axes_all[0])
if (self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == 0)) and self._colorbar_location == "bottom":
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
v.append(Size.from_any(self._colorbar_pad, sz))
v_ax_pos.append(len(v))
v.append(sz)
if ((self._colorbar_mode == "each" or
(self._colorbar_mode == 'edge' and
row == self._nrows - 1)) and
self._colorbar_location == "top"):
v.append(Size.from_any(self._colorbar_pad, sz))
v_cb_pos.append(len(v))
v.append(Size.from_any(self._colorbar_size, sz))
for i in range(self.ngrids):
col, row = self._get_col_row(i)
#locator = self._divider.new_locator(nx=4*col,
# ny=2*(self._nrows - row - 1))
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_ax_pos[self._nrows-1-row])
self.axes_all[i].set_axes_locator(locator)
if self._colorbar_mode == "each":
if self._colorbar_location in ("right", "left"):
locator = self._divider.new_locator(
nx=h_cb_pos[col], ny=v_ax_pos[self._nrows - 1 - row])
elif self._colorbar_location in ("top", "bottom"):
locator = self._divider.new_locator(
nx=h_ax_pos[col], ny=v_cb_pos[self._nrows - 1 - row])
self.cbar_axes[i].set_axes_locator(locator)
elif self._colorbar_mode == 'edge':
if ((self._colorbar_location == 'left' and col == 0) or
(self._colorbar_location == 'right'
and col == self._ncols-1)):
locator = self._divider.new_locator(
nx=h_cb_pos[0], ny=v_ax_pos[self._nrows -1 - row])
self.cbar_axes[row].set_axes_locator(locator)
elif ((self._colorbar_location == 'bottom' and
row == self._nrows - 1) or
(self._colorbar_location == 'top' and row == 0)):
locator = self._divider.new_locator(nx=h_ax_pos[col],
ny=v_cb_pos[0])
self.cbar_axes[col].set_axes_locator(locator)
if self._colorbar_mode == "single":
if self._colorbar_location == "right":
#sz = Size.Fraction(Size.AxesX(self.axes_llc), self._nrows)
sz = Size.Fraction(self._nrows, Size.AxesX(self.axes_llc))
h.append(Size.from_any(self._colorbar_pad, sz))
h.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=-2, ny=0, ny1=-1)
elif self._colorbar_location == "top":
#sz = Size.Fraction(Size.AxesY(self.axes_llc), self._ncols)
sz = Size.Fraction(self._ncols, Size.AxesY(self.axes_llc))
v.append(Size.from_any(self._colorbar_pad, sz))
v.append(Size.from_any(self._colorbar_size, sz))
locator = self._divider.new_locator(nx=0, nx1=-1, ny=-2)
if self._colorbar_location in ("right", "top"):
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[0].set_axes_locator(locator)
self.cbar_axes[0].set_visible(True)
elif self._colorbar_mode == "each":
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(True)
elif self._colorbar_mode == "edge":
if self._colorbar_location in ('right', 'left'):
count = self._nrows
else:
count = self._ncols
for i in range(count):
self.cbar_axes[i].set_visible(True)
for j in range(i + 1, self.ngrids):
self.cbar_axes[j].set_visible(False)
else:
for i in range(self.ngrids):
self.cbar_axes[i].set_visible(False)
self.cbar_axes[i].set_position([1., 1., 0.001, 0.001],
which="active")
self._divider.set_horizontal(h)
self._divider.set_vertical(v)
AxesGrid = ImageGrid
| gpl-3.0 |
Neuroglycerin/neukrill-net-tools | neukrill_net/parallel_dataset.py | 1 | 8945 | """
Dataset class that wraps the dataset class found in
image_directory_dataset to support models with branched
input layers; allowing different versions of images as
input to those layers.
Based on dev work in the Interactive Pylearn2 notebook.
"""
__authors__ = "Gavin Gray"
__copyright__ = "Copyright 2015 - University of Edinburgh"
__credits__ = ["Gavin Gray"]
__license__ = "MIT"
__maintainer__ = "Gavin Gray"
__email__ = "[email protected]"
import numpy as np
import sklearn.externals
import neukrill_net.image_directory_dataset
import neukrill_net.utils
class ParallelIterator(neukrill_net.image_directory_dataset.FlyIterator):
"""
A simple version of FlyIterator that is able to deal with multiple
images being returned by the processing function.
"""
def next(self):
# check if we reached the end yet
if self.final_iteration:
raise StopIteration
# allocate array
if len(self.final_shape) == 2:
Xbatch1,Xbatch2 = [np.array(batch).reshape(
self.batch_size, self.final_shape[0], self.final_shape[1], 1)
for batch in zip(*self.result.get(timeout=10.0))]
elif len(self.final_shape) == 3:
Xbatch1,Xbatch2 = [np.array(batch)
for batch in zip(*self.result.get(timeout=10.0))]
# make sure they're float32
Xbatch1 = Xbatch1.astype(np.float32)
Xbatch2 = Xbatch2.astype(np.float32)
# get y if we're training
if self.train_or_predict == "train":
ybatch = self.dataset.y[self.batch_indices,:].astype(np.float32)
# start processing next batch
if len(self.indices) >= self.batch_size:
self.batch_indices = [self.indices.pop(0)
for i in range(self.batch_size)]
self.result = self.dataset.pool.map_async(self.dataset.fn,
[self.dataset.X[i] for i in self.batch_indices])
else:
self.final_iteration += 1
# if training return X and y, otherwise
# we're testing so return just X
if self.train_or_predict == "train":
return Xbatch1,Xbatch2,ybatch
elif self.train_or_predict == "test":
return Xbatch1,Xbatch2
else:
raise ValueError("Invalid option for train_or_predict:"
" {0}".format(self.train_or_predict))
class ParallelDataset(neukrill_net.image_directory_dataset.ListDataset):
"""
Only difference here is that it will use the above Parallel
iterator instead of the existing iterator.
"""
def iterator(self, mode=None, batch_size=None, num_batches=None, rng=None,
data_specs=None, return_tuple=False):
"""
Returns iterator object with standard Pythonic interface; iterates
over the dataset over batches, popping off batches from a shuffled
list of indices.
Inputs:
- mode: 'sequential' or 'shuffled_sequential'.
- batch_size: required, size of the minibatches produced.
- num_batches: supply if you want, the dataset will make as many
as it can if you don't.
- rng: not used, as above.
- data_specs: not used, as above
- return_tuple: not used, as above
Outputs:
- instance of ParallelIterator, see above.
"""
if not num_batches:
# guess that we want to use all of them
num_batches = int(len(self.X)/batch_size)
iterator = ParallelIterator(dataset=self, batch_size=batch_size,
num_batches=num_batches,
final_shape=self.run_settings["final_shape"],
rng=self.rng, mode=mode,
train_or_predict=self.train_or_predict)
return iterator
class PassthroughIterator(neukrill_net.image_directory_dataset.FlyIterator):
def next(self):
# check if we reached the end yet
if self.final_iteration:
raise StopIteration
# allocate array
if len(self.final_shape) == 2:
Xbatch = np.array(self.result.get(timeout=10.0)).reshape(
self.batch_size, self.final_shape[0],
self.final_shape[1], 1)
elif len(self.final_shape) == 3:
Xbatch = np.array(self.result.get(timeout=10.0))
# make sure it's float32
Xbatch = Xbatch.astype(np.float32)
if self.train_or_predict == "train":
# get the batch for y as well
ybatch = self.dataset.y[self.batch_indices,:].astype(np.float32)
# index array for vbatch
vbatch = self.dataset.cached[self.batch_indices,:]
# start processing next batch
if len(self.indices) >= self.batch_size:
self.batch_indices = [self.indices.pop(0)
for i in range(self.batch_size)]
self.result = self.dataset.pool.map_async(self.dataset.fn,
[self.dataset.X[i] for i in self.batch_indices])
else:
self.final_iteration += 1
if self.train_or_predict == "train":
# get the batch for y as well
return Xbatch,vbatch,ybatch
elif self.train_or_predict == "test":
return Xbatch,vbatch
else:
raise ValueError("Invalid option for train_or_predict:"
" {0}".format(self.train_or_predict))
class PassthroughDataset(neukrill_net.image_directory_dataset.ListDataset):
"""
Dataset that can supply arbitrary vectors as well as the Conv2D
spaces required by the convolutional layers.
"""
def __init__(self, transformer, settings_path="settings.json",
run_settings_path="run_settings/alexnet_based.json",
training_set_mode="train",
verbose=False, force=False, prepreprocessing=None,
cached=None):
# runs inherited initialisation, but pulls out the
# supplied cached array for iteration
self.cached = sklearn.externals.joblib.load(cached).squeeze()
# load settings
# We don't save to self because super will do that
settings = neukrill_net.utils.Settings(settings_path)
run_settings = neukrill_net.utils.load_run_settings(run_settings_path,
settings,
force=force)
# get the right split from run settings
li = neukrill_net.utils.train_test_split_bool(settings.image_fnames,
training_set_mode,
train_split=run_settings['train_split'],
classes=settings.classes)
print '-----------'
print training_set_mode
print len(li)
print sum(li)
print '-----------'
# Use boolean indexing
self.cached = self.cached[li,:]
# Make sure our array has the correct
self.cached = self.cached.astype(np.float32)
# may have to remove cached before handing it in...
# ...no errors yet
super(self.__class__,self).__init__(transformer=transformer,
settings_path=settings_path,
run_settings_path=run_settings_path,
training_set_mode=training_set_mode,
verbose=verbose, force=force,
prepreprocessing=prepreprocessing)
def iterator(self, mode=None, batch_size=None, num_batches=None, rng=None,
data_specs=None, return_tuple=False):
"""
Returns iterator object with standard Pythonic interface; iterates
over the dataset over batches, popping off batches from a shuffled
list of indices.
Inputs:
- mode: 'sequential' or 'shuffled_sequential'.
- batch_size: required, size of the minibatches produced.
- num_batches: supply if you want, the dataset will make as many
as it can if you don't.
- rng: not used, as above.
- data_specs: not used, as above
- return_tuple: not used, as above
Outputs:
- instance of FlyIterator, see above.
"""
if not num_batches:
# guess that we want to use all of them
num_batches = int(len(self.X)/batch_size)
iterator = PassthroughIterator(dataset=self, batch_size=batch_size,
num_batches=num_batches,
final_shape=self.run_settings["final_shape"],
rng=self.rng, mode=mode)
return iterator
| mit |
democratech/LaPrimaire | tools/simulation/graph.py | 1 | 7389 | # graph: confiance = f( N electeurs )
import re
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
import argparse
import os,sys
import mj
Nmentions = 7
def readData(Ncandidats, Nelecteurs, test, root = "data", relog = False):
folder = mj.getFolderName(Ncandidats, Nelecteurs, test)
fname = folder + "log.txt"
if not os.path.isfile(fname + "log.txt") or relog:
mj.computeLog(Ncandidats, Nelecteurs, test, root)
f = open(fname, "r")
p = re.compile(ur'5 premiers: ([0-9]+),')
a = []
for l in f.readlines():
a = re.findall(p, l)
if a != []:
break
if a == []:
print "Unreadable data for C_%i.E_%i_%i" % (Ncandidats, Nelecteurs, test)
return a[0]
# ------------------------------
# graph
def plotBar(Ncandidats,Nelecteurs, test, Nmentions, root="data"):
import matplotlib.pyplot as plt
fname = mj.getFolderName(Ncandidats, Nelecteurs, test, root)
raw_post = np.genfromtxt(fname + "raw.results.%i.%i.txt" % (Ncandidats, Nelecteurs), dtype=float, delimiter=",").astype(float)
n_priori = np.genfromtxt(fname + "terranova.%i.txt" % Ncandidats, dtype=float, delimiter=",").astype(float)
n_post = mj.normalize(raw_post)
nameMentions = ["Excellent", "Tres bien", "Bien", "Assez bien", "Passable", "Insuffisant", "A rejeter"]
couleurs = ["DarkRed", "Crimson","Tomato","DarkOrange","Yellow","Khaki","DarkKhaki"]
abs_res = range(0,Ncandidats)
abs_prob = np.arange(0,Ncandidats) + 0.46
width = 0.45
#plt.bar(abs_res, results[:,0], width, color=couleurs[0], label=nameMentions[0])
for i in range(Nmentions):
plt.bar(abs_res, n_post[:,i], width,color=couleurs[i], label=nameMentions[i], bottom=np.sum(n_post[:,:i],axis=1), edgecolor='white')
plt.bar(abs_prob, n_priori[:,i], width,color=couleurs[i], bottom=np.sum(n_priori[:,:i],axis=1), edgecolor='white')
plt.ylabel('Mentions')
plt.xlabel('Candidats')
plt.title('Jugement majoritaire avec %i candidats et %i electeurs' % (Ncandidats, Nelecteurs))
#plt.xticks(ind + width/2., ('C1', 'G2', 'G3', 'G4', 'G5'))
plt.yticks(np.arange(0, 1, 0.1))
plt.xticks(np.arange(0, Ncandidats))
plt.legend()
plt.show()
def plotStd(res):
w = np.where(res == 0)[0]
fig = plt.figure()
ax = plt.axes()
if w.size != 0:
threshold = w[0]
ax.axvline(threshold, linestyle='--', color='b',label="Nombre minimal d'electeurs")
plt.plot(electeurs, res, color="b")
plt.plot(electeurs, res, color="b-")
plt.xlabel("Nombre d'electeurs")
plt.ylabel("Confiance")
plt.show()
def plotCandidates(Ncandidats, Nelecteurs, test):
""" plot distribution of the candidates"""
folder = root + "/C_%i.E_%i_%i/" % (Ncandidats, Nelecteurs, test)
fname = folder + "terranova." + str(Ncandidats) + ".txt"
p = np.genfromtxt(fname, delimiter = ",", dtype=float)
nameMentions = ["Excellent", "Tres bien", "Bien", "Assez bien", "Passable", "Insuffisant", "A rejeter"]
couleurs = ["DarkRed", "Crimson","Tomato","DarkOrange","Yellow","Khaki","DarkKhaki"]
abs_prob = range(0,Ncandidats)
width = 0.98
#plt.bar(abs_res, results[:,0], width, color=couleurs[0], label=nameMentions[0])
for i in range(Nmentions):
plt.bar(abs_prob, p[:,i], width,color=couleurs[i], label=nameMentions[i], bottom=np.sum(p[:,:i],axis=1), edgecolor='white')
plt.ylabel('Mentions')
plt.xlabel('Candidats')
plt.title('Jugement majoritaire avec %i candidats et %i electeurs' % (Ncandidats, Nelecteurs))
plt.yticks(np.arange(0, 1, 0.1))
plt.xticks(np.arange(0, Ncandidats))
plt.legend()
for i in range(Nmentions):
plt.figure()
plt.bar(abs_prob, np.sort(p[:,i]), width,color=couleurs[i], edgecolor='white')
plt.ylabel(nameMentions[i])
plt.xlabel('Candidats')
plt.yticks(np.arange(0, 1, 0.1))
plt.xticks(np.arange(0, Ncandidats))
plt.show()
def plotMinElecteurs(threshold, candidats, electeurs):
fig = plt.figure()
tes = threshold >= 0
idx = threshold[tes]
print electeurs[idx]
print candidats[tes]
plt.plot(candidats[tes], electeurs[idx], "b+")
plt.plot(candidats[tes], electeurs[idx], "b-")
plt.xlabel("Nombre de candidats")
plt.ylabel("Nombre min d'electeurs")
plt.show()
def computeStdToZero(Nc, electeurs,tests):
Ne = len(electeurs)
res = np.zeros(Ne)
for i in range(Ne):
e = electeurs[i]
acc = 0.0
for t in tests:
acc += float(readData(Nc,e,t))
res[i] = sqrt(acc/(len(tests)))
return res
def computeAllStd(candidats, electeurs, tests, root = "data"):
Ne = len(electeurs)
Nc = len(candidats)
res = np.zeros((Nc,Ne))
for j in range(Nc):
c = candidats[j]
res[j,:] = computeStdToZero(c, electeurs, tests)
#plotStd(res[j])
#print res
np.savetxt(root + "std_err.txt", res, delimiter = ",")
# compute min Nelecteurs for each Ncandidats
threshold = np.zeros(Nc, dtype=int)
for i in range(Nc):
c = candidats[i]
w = np.where(res[i] < 0.25)[0]
if w.size != 0:
threshold[i] = w[0]
else:
threshold[i] = -1
#print threshold
np.savetxt(root + "threshold.txt", res, delimiter = ",")
return threshold
# ------------------------------
# main
if __name__ == '__main__':
#global root, Nmentions
parser = argparse.ArgumentParser()
parser.add_argument('--ng', type=int, help='Number of grades', default=Nmentions)
parser.add_argument('--ne', type=int, help='Number of electors', default=0)
parser.add_argument('--nc', type=int, help='Number of candidates', default=0)
parser.add_argument('--root', type=str, help='Root for paths', default="root")
parser.add_argument('--priori', action='store_true', help='Plot a priori distribution')
parser.add_argument('--bar', action='store_true', help='Plot bars')
parser.add_argument('--relog', action='store_true')
parser.add_argument('--std', type=int, help='Return std for a certain number of candidate', default=0)
args = parser.parse_args()
Nmentions = args.ng
root = args.root
std = args.std
relog = args.relog
electeurs = set()
candidats = set()
tests = set()
samples = os.listdir("data")
for s in samples:
m = re.findall('C_(\d+).E_(\d+)_(\d+)', s)[0]
electeurs |= set([int(m[1])])
candidats |= set([int(m[0])])
tests |= set([int(m[2])])
electeurs = np.sort(list(electeurs))
candidats = np.sort(list(candidats))
tests = np.sort(list(tests))
print candidats
print electeurs
Nelecteurs = args.ne if args.ne else max(electeurs)
Ncandidats = args.nc if args.nc else max(candidats)
if args.priori:
test = 0
plotCandidates(Ncandidats, Nelecteurs, test)
if args.bar:
test = 0
plotBar(Ncandidats, Nelecteurs, test, Nmentions)
elif std:
print computeStdToZero(std, electeurs)
else:
threshold = computeAllStd(candidats, electeurs, tests)
print threshold
plotMinElecteurs(threshold, candidats, electeurs)
| agpl-3.0 |
florianwittkamp/FD_ACOUSTIC | Python/1D/FD_1D_DX4_DT4_ABS.py | 1 | 4773 | ## FD_1D_DX4_DT4_ABS.py 1-D acoustic Finite-Difference modelling
# GNU General Public License v3.0
#
# Author: Florian Wittkamp 2016
#
# Finite-Difference acoustic seismic wave simulation
# Discretization of the first-order acoustic wave equation
#
# Temporal second-order accuracy O(DT^4)
# Spatial fourth-order accuracy O(DX^4)
#
# Temporal discretization is based on the Adams-Basforth method
# Theory is available in:
# Bohlen, T., & Wittkamp, F. (2016).
# Three-dimensional viscoelastic time-domain finite-difference
# seismic modelling using the staggered Adams?Bashforth time integrator.
# Geophysical Journal International, 204(3), 1781-1788.
## Initialisation
print(" ")
print("Starting FD_1D_DX4_DT4_ABS")
from numpy import *
import time as tm
import matplotlib.pyplot as plt
## Input Parameter
# Discretization
c1=20 # Number of grid points per dominant wavelength
c2=0.5 # CFL-Number
nx=2000 # Number of grid points
T=10 # Total propagation time
# Source Signal
f0= 10 # Center frequency Ricker-wavelet
q0= 1 # Maximum amplitude Ricker-Wavelet
xscr = 100 # Source position (in grid points)
# Receiver
xrec1=400 # Position Reciever 1 (in grid points)
xrec2=800 # Position Reciever 2 (in grid points)
xrec3=1800 # Position Reciever 3 (in grid points)
# Velocity and density
modell_v = hstack((1000*ones((around(nx/2)),float),1500*ones((around(nx/2)),float)))
rho=hstack((1*ones((around(nx/2)),float),1.5*ones((around(nx/2)),float)))
## Preparation
# Init wavefields
vx=zeros((nx),float)
p=zeros((nx),float)
vx_x=zeros((nx),float)
p_x=zeros((nx),float)
vx_x2=zeros((nx),float)
p_x2=zeros((nx),float)
vx_x3=zeros((nx),float)
p_x3=zeros((nx),float)
vx_x4=zeros((nx),float)
p_x4=zeros((nx),float)
# Calculate first Lame-Paramter
l=rho * modell_v * modell_v
cmin=min(modell_v.flatten()) # Lowest P-wave velocity
cmax=max(modell_v.flatten()) # Highest P-wave velocity
fmax=2*f0 # Maximum frequency
dx=cmin/(fmax*c1) # Spatial discretization (in m)
dt=dx/(cmax)*c2 # Temporal discretization (in s)
lampda_min=cmin/fmax # Smallest wavelength
# Output model parameter:
print("Model size: x:",dx*nx,"in m")
print("Temporal discretization: ",dt," s")
print("Spatial discretization: ",dx," m")
print("Number of gridpoints per minimum wavelength: ",lampda_min/dx)
# Create space and time vector
x=arange(0,dx*nx,dx) # Space vector
t=arange(0,T,dt) # Time vector
nt=size(t) # Number of time steps
# Plotting model
plt.figure(1)
plt.plot(x,modell_v)
plt.ylabel('VP in m/s')
plt.xlabel('Depth in m')
plt.figure(2)
plt.plot(x,rho)
plt.ylabel('Density in g/cm^3')
plt.xlabel('Depth in m')
plt.draw()
plt.pause(0.001)
# Source signal - Ricker-wavelet
tau=pi*f0*(t-1.5/f0)
q=q0*(1-2*tau**2)*exp(-tau**2)
# Plotting source signal
plt.figure(3)
plt.plot(t,q)
plt.title('Source signal Ricker-Wavelet')
plt.ylabel('Amplitude')
plt.xlabel('Time in s')
plt.draw()
plt.pause(0.001)
# Init Seismograms
Seismogramm=zeros((3,nt),float); # Three seismograms
# Calculation of some coefficients
i_dx=1.0/(dx)
## Time stepping
print("Starting time stepping...")
tic=tm.clock()
for n in range(2,nt):
# Inject source wavelet
p[xscr]=p[xscr]+q[n]
# Update velocity
for kx in range(5,nx-4):
# Calculating spatial derivative
p_x[kx]=i_dx*9.0/8.0*(p[kx+1]-p[kx])-i_dx*1.0/24.0*(p[kx+2]-p[kx-1])
# Update velocity
vx[kx]=vx[kx]-dt/rho[kx]*(13.0/12.0*p_x[kx]-5.0/24.0*p_x2[kx]+1.0/6.0*p_x3[kx]-1.0/24.0*p_x4[kx])
# Save old spatial derivations for Adam-Bashforth method
copyto(p_x4,p_x3)
copyto(p_x3,p_x2)
copyto(p_x2,p_x)
# Update pressure
for kx in range(5,nx-4):
# Calculating spatial derivative
vx_x[kx]= i_dx*9.0/8.0*(vx[kx]-vx[kx-1])-i_dx*1.0/24.0*(vx[kx+1]-vx[kx-2])
# Update pressure
p[kx]=p[kx]-l[kx]*dt*(13.0/12.0*vx_x[kx]-5.0/24.0*vx_x2[kx]+1.0/6.0*vx_x3[kx]-1.0/24.0*vx_x4[kx])
# Save old spatial derivations for Adam-Bashforth method
copyto(vx_x4,vx_x3)
copyto(vx_x3,vx_x2)
copyto(vx_x2,vx_x)
# Save seismograms
Seismogramm[0,n]=p[xrec1]
Seismogramm[1,n]=p[xrec2]
Seismogramm[2,n]=p[xrec3]
toc = tm.clock()
time=toc-tic
## Plot seismograms
plt.figure(4)
plt.plot(t,Seismogramm[0,:])
plt.title('Seismogram 1')
plt.ylabel('Amplitude')
plt.xlabel('Time in s')
plt.figure(5)
plt.plot(t,Seismogramm[1,:])
plt.title('Seismogram 2')
plt.ylabel('Amplitude')
plt.xlabel('Time in s')
plt.figure(6)
plt.plot(t,Seismogramm[2,:])
plt.title('Seismogram 3')
plt.ylabel('Amplitude')
plt.xlabel('Time in s')
plt.draw()
plt.show()
print(" ")
| gpl-3.0 |
jprb-walton/make-temperature-movie | RainHist.py | 1 | 3741 | """
Read in dynamic surface temperature data and contour plot it.
Harry's Version for new data.
"""
import sys
import math
import time as myTime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import iris
import iris.plot as iplt
import iris.quickplot as qplt
import iris.coord_categorisation as icat
from iris.experimental.equalise_cubes import equalise_attributes
'''
def create_video():
#creating the video
SpawnCommand("ffmpeg -i image-%04d.png Temporary.mp4")
SpawnCommand('ffmpeg -i Temporary.mp4 -filter:v "setpts=2.5*PTS" 1850_Rainfall.mp4')
print ("Deleting the unneeded images...")
SpawnCommand("rm -f *.png")
SpawnCommand("rm -f TemperatureVideo1.mp4")
'''
def main():
'''
# Delete all the image files in the current directory to ensure that only those
# created in the loop end up in the movie.
print ("\nDeleting all .png files in this directory...")
SpawnCommand("rm -f *.png")
print("Loading the data...")
'''
# Read all the temperature values and create a single cube containing this data
cubeList = iris.cube.CubeList([])
for i in range(1, 11, 3):
QuarterCube = iris.load_cube('tpr_1850/bc179a.pj1850'+ str('{:02}'.format(i))+ '01.nc')
for sub_cube in QuarterCube.slices(['latitude', 'longitude']):
cubeList.append(sub_cube)
mydata = []
equalise_attributes(cubeList)
wholecube = cubeList.merge_cube()
for myint in wholecube.slices_over(['latitude', 'longitude', 'time']):
myint = myint.data
mydata.append(myint)
#for sub_cube in whole_cube.slices('latitude, longitude')
plt.hist(mydata)
plt.show
'''
yearData = cubeList.merge_cube()
print(yearData)
print("Data downloaded! Now Processing...")
# Get the range of values.
maxRain = np.amax(yearData.data)
print(maxRain)
#0.013547399
# Add a new coordinate containing the year.
icat.add_day_of_year(yearData, 'time')
days = yearData.coord('time')
# Set the limits for the loop over years.
minTime = 0
maxTime = 360
#print ("Making images from year", days[minTime].points[0], "to", days[maxTime-1].points[0], "...")
for time in range(minTime, maxTime):
iplt.contourf(cubeList[time], vmin = 0.0, vmax = 0.001354799, cmap = 'RdBu_r')
plt.gca().coastlines()
# We need to fix the boundary of the figure (otherwise we get a black border at left & top).
# Cartopy removes matplotlib's axes.patch (which normally defines the boundary) and
# replaces it with outline_patch and background_patch. It's the former which is causing
# the black border. Get the axis object and make its outline patch invisible.
ax = plt.gca()
ax.outline_patch.set_visible(False)
# Extract the year value and display it (coordinates used in locating the text are
# those of the data).
day = days[time].points[0]
plt.text(0, -60, day, horizontalalignment='center')
# Now save the plot in an image file. The files are numbered sequentially, starting
# from 000.png; this is so that the ffmpeg command can grok them.
filename = "image-%04d.png" % time
plt.savefig(filename, bbox_inches='tight', pad_inches=0)
# Discard the figure (otherwise the text will be overwritten
# by the next iteration).
plt.close()
print("images made! Now converting to .mp4...")
create_video()
print("Opening video...")
SpawnCommand('open 1850_Rainfall.mp4')
'''
if __name__ == '__main__':
main()
| lgpl-3.0 |
paladin74/neural-network-animation | matplotlib/backend_bases.py | 10 | 106046 | """
Abstract base classes define the primitives that renderers and
graphics contexts must implement to serve as a matplotlib backend
:class:`RendererBase`
An abstract base class to handle drawing/rendering operations.
:class:`FigureCanvasBase`
The abstraction layer that separates the
:class:`matplotlib.figure.Figure` from the backend specific
details like a user interface drawing area
:class:`GraphicsContextBase`
An abstract base class that provides color, line styles, etc...
:class:`Event`
The base class for all of the matplotlib event
handling. Derived classes suh as :class:`KeyEvent` and
:class:`MouseEvent` store the meta data like keys and buttons
pressed, x and y locations in pixel and
:class:`~matplotlib.axes.Axes` coordinates.
:class:`ShowBase`
The base class for the Show class of each interactive backend;
the 'show' callable is then set to Show.__call__, inherited from
ShowBase.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import os
import sys
import warnings
import time
import io
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.colors as colors
import matplotlib.transforms as transforms
import matplotlib.widgets as widgets
#import matplotlib.path as path
from matplotlib import rcParams
from matplotlib import is_interactive
from matplotlib import get_backend
from matplotlib._pylab_helpers import Gcf
from matplotlib.transforms import Bbox, TransformedBbox, Affine2D
import matplotlib.tight_bbox as tight_bbox
import matplotlib.textpath as textpath
from matplotlib.path import Path
from matplotlib.cbook import mplDeprecation
try:
from importlib import import_module
except:
# simple python 2.6 implementation (no relative imports)
def import_module(name):
__import__(name)
return sys.modules[name]
try:
from PIL import Image
_has_pil = True
except ImportError:
_has_pil = False
_default_filetypes = {
'ps': 'Postscript',
'eps': 'Encapsulated Postscript',
'pdf': 'Portable Document Format',
'pgf': 'PGF code for LaTeX',
'png': 'Portable Network Graphics',
'raw': 'Raw RGBA bitmap',
'rgba': 'Raw RGBA bitmap',
'svg': 'Scalable Vector Graphics',
'svgz': 'Scalable Vector Graphics'
}
_default_backends = {
'ps': 'matplotlib.backends.backend_ps',
'eps': 'matplotlib.backends.backend_ps',
'pdf': 'matplotlib.backends.backend_pdf',
'pgf': 'matplotlib.backends.backend_pgf',
'png': 'matplotlib.backends.backend_agg',
'raw': 'matplotlib.backends.backend_agg',
'rgba': 'matplotlib.backends.backend_agg',
'svg': 'matplotlib.backends.backend_svg',
'svgz': 'matplotlib.backends.backend_svg',
}
def register_backend(format, backend, description=None):
"""
Register a backend for saving to a given file format.
format : str
File extention
backend : module string or canvas class
Backend for handling file output
description : str, optional
Description of the file type. Defaults to an empty string
"""
if description is None:
description = ''
_default_backends[format] = backend
_default_filetypes[format] = description
def get_registered_canvas_class(format):
"""
Return the registered default canvas for given file format.
Handles deferred import of required backend.
"""
if format not in _default_backends:
return None
backend_class = _default_backends[format]
if cbook.is_string_like(backend_class):
backend_class = import_module(backend_class).FigureCanvas
_default_backends[format] = backend_class
return backend_class
class ShowBase(object):
"""
Simple base class to generate a show() callable in backends.
Subclass must override mainloop() method.
"""
def __call__(self, block=None):
"""
Show all figures. If *block* is not None, then
it is a boolean that overrides all other factors
determining whether show blocks by calling mainloop().
The other factors are:
it does not block if run inside ipython's "%pylab" mode
it does not block in interactive mode.
"""
managers = Gcf.get_all_fig_managers()
if not managers:
return
for manager in managers:
manager.show()
if block is not None:
if block:
self.mainloop()
return
else:
return
# Hack: determine at runtime whether we are
# inside ipython in pylab mode.
from matplotlib import pyplot
try:
ipython_pylab = not pyplot.show._needmain
# IPython versions >= 0.10 tack the _needmain
# attribute onto pyplot.show, and always set
# it to False, when in %pylab mode.
ipython_pylab = ipython_pylab and get_backend() != 'WebAgg'
# TODO: The above is a hack to get the WebAgg backend
# working with ipython's `%pylab` mode until proper
# integration is implemented.
except AttributeError:
ipython_pylab = False
# Leave the following as a separate step in case we
# want to control this behavior with an rcParam.
if ipython_pylab:
return
if not is_interactive() or get_backend() == 'WebAgg':
self.mainloop()
def mainloop(self):
pass
class RendererBase(object):
"""An abstract base class to handle drawing/rendering operations.
The following methods must be implemented in the backend for full
functionality (though just implementing :meth:`draw_path` alone would
give a highly capable backend):
* :meth:`draw_path`
* :meth:`draw_image`
* :meth:`draw_gouraud_triangle`
The following methods *should* be implemented in the backend for
optimization reasons:
* :meth:`draw_text`
* :meth:`draw_markers`
* :meth:`draw_path_collection`
* :meth:`draw_quad_mesh`
"""
def __init__(self):
self._texmanager = None
self._text2path = textpath.TextToPath()
def open_group(self, s, gid=None):
"""
Open a grouping element with label *s*. If *gid* is given, use
*gid* as the id of the group. Is only currently used by
:mod:`~matplotlib.backends.backend_svg`.
"""
pass
def close_group(self, s):
"""
Close a grouping element with label *s*
Is only currently used by :mod:`~matplotlib.backends.backend_svg`
"""
pass
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a :class:`~matplotlib.path.Path` instance using the
given affine transform.
"""
raise NotImplementedError
def draw_markers(self, gc, marker_path, marker_trans, path,
trans, rgbFace=None):
"""
Draws a marker at each of the vertices in path. This includes
all vertices, including control points on curves. To avoid
that behavior, those vertices should be removed before calling
this function.
*gc*
the :class:`GraphicsContextBase` instance
*marker_trans*
is an affine transform applied to the marker.
*trans*
is an affine transform applied to the path.
This provides a fallback implementation of draw_markers that
makes multiple calls to :meth:`draw_path`. Some backends may
want to override this method in order to draw the marker only
once and reuse it multiple times.
"""
for vertices, codes in path.iter_segments(trans, simplify=False):
if len(vertices):
x, y = vertices[-2:]
self.draw_path(gc, marker_path,
marker_trans +
transforms.Affine2D().translate(x, y),
rgbFace)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
"""
Draws a collection of paths selecting drawing properties from
the lists *facecolors*, *edgecolors*, *linewidths*,
*linestyles* and *antialiaseds*. *offsets* is a list of
offsets to apply to each of the paths. The offsets in
*offsets* are first transformed by *offsetTrans* before being
applied. *offset_position* may be either "screen" or "data"
depending on the space that the offsets are in.
This provides a fallback implementation of
:meth:`draw_path_collection` that makes multiple calls to
:meth:`draw_path`. Some backends may want to override this in
order to render each set of path data only once, and then
reference that path multiple times with the different offsets,
colors, styles etc. The generator methods
:meth:`_iter_collection_raw_paths` and
:meth:`_iter_collection` are provided to help with (and
standardize) the implementation across backends. It is highly
recommended to use those generators, so that changes to the
behavior of :meth:`draw_path_collection` can be made globally.
"""
path_ids = []
for path, transform in self._iter_collection_raw_paths(
master_transform, paths, all_transforms):
path_ids.append((path, transforms.Affine2D(transform)))
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_ids, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
path, transform = path_id
transform = transforms.Affine2D(
transform.get_matrix()).translate(xo, yo)
self.draw_path(gc0, path, transform, rgbFace)
def draw_quad_mesh(self, gc, master_transform, meshWidth, meshHeight,
coordinates, offsets, offsetTrans, facecolors,
antialiased, edgecolors):
"""
This provides a fallback implementation of
:meth:`draw_quad_mesh` that generates paths and then calls
:meth:`draw_path_collection`.
"""
from matplotlib.collections import QuadMesh
paths = QuadMesh.convert_mesh_to_paths(
meshWidth, meshHeight, coordinates)
if edgecolors is None:
edgecolors = facecolors
linewidths = np.array([gc.get_linewidth()], np.float_)
return self.draw_path_collection(
gc, master_transform, paths, [], offsets, offsetTrans, facecolors,
edgecolors, linewidths, [], [antialiased], [None], 'screen')
def draw_gouraud_triangle(self, gc, points, colors, transform):
"""
Draw a Gouraud-shaded triangle.
*points* is a 3x2 array of (x, y) points for the triangle.
*colors* is a 3x4 array of RGBA colors for each point of the
triangle.
*transform* is an affine transform to apply to the points.
"""
raise NotImplementedError
def draw_gouraud_triangles(self, gc, triangles_array, colors_array,
transform):
"""
Draws a series of Gouraud triangles.
*points* is a Nx3x2 array of (x, y) points for the trianglex.
*colors* is a Nx3x4 array of RGBA colors for each point of the
triangles.
*transform* is an affine transform to apply to the points.
"""
transform = transform.frozen()
for tri, col in zip(triangles_array, colors_array):
self.draw_gouraud_triangle(gc, tri, col, transform)
def _iter_collection_raw_paths(self, master_transform, paths,
all_transforms):
"""
This is a helper method (along with :meth:`_iter_collection`) to make
it easier to write a space-efficent :meth:`draw_path_collection`
implementation in a backend.
This method yields all of the base path/transform
combinations, given a master transform, a list of paths and
list of transforms.
The arguments should be exactly what is passed in to
:meth:`draw_path_collection`.
The backend should take each yielded path and transform and
create an object that can be referenced (reused) later.
"""
Npaths = len(paths)
Ntransforms = len(all_transforms)
N = max(Npaths, Ntransforms)
if Npaths == 0:
return
transform = transforms.IdentityTransform()
for i in xrange(N):
path = paths[i % Npaths]
if Ntransforms:
transform = Affine2D(all_transforms[i % Ntransforms])
yield path, transform + master_transform
def _iter_collection_uses_per_path(self, paths, all_transforms,
offsets, facecolors, edgecolors):
"""
Compute how many times each raw path object returned by
_iter_collection_raw_paths would be used when calling
_iter_collection. This is intended for the backend to decide
on the tradeoff between using the paths in-line and storing
them once and reusing. Rounds up in case the number of uses
is not the same for every path.
"""
Npaths = len(paths)
if Npaths == 0 or (len(facecolors) == 0 and len(edgecolors) == 0):
return 0
Npath_ids = max(Npaths, len(all_transforms))
N = max(Npath_ids, len(offsets))
return (N + Npath_ids - 1) // Npath_ids
def _iter_collection(self, gc, master_transform, all_transforms,
path_ids, offsets, offsetTrans, facecolors,
edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
"""
This is a helper method (along with
:meth:`_iter_collection_raw_paths`) to make it easier to write
a space-efficent :meth:`draw_path_collection` implementation in a
backend.
This method yields all of the path, offset and graphics
context combinations to draw the path collection. The caller
should already have looped over the results of
:meth:`_iter_collection_raw_paths` to draw this collection.
The arguments should be the same as that passed into
:meth:`draw_path_collection`, with the exception of
*path_ids*, which is a list of arbitrary objects that the
backend will use to reference one of the paths created in the
:meth:`_iter_collection_raw_paths` stage.
Each yielded result is of the form::
xo, yo, path_id, gc, rgbFace
where *xo*, *yo* is an offset; *path_id* is one of the elements of
*path_ids*; *gc* is a graphics context and *rgbFace* is a color to
use for filling the path.
"""
Ntransforms = len(all_transforms)
Npaths = len(path_ids)
Noffsets = len(offsets)
N = max(Npaths, Noffsets)
Nfacecolors = len(facecolors)
Nedgecolors = len(edgecolors)
Nlinewidths = len(linewidths)
Nlinestyles = len(linestyles)
Naa = len(antialiaseds)
Nurls = len(urls)
if (Nfacecolors == 0 and Nedgecolors == 0) or Npaths == 0:
return
if Noffsets:
toffsets = offsetTrans.transform(offsets)
gc0 = self.new_gc()
gc0.copy_properties(gc)
if Nfacecolors == 0:
rgbFace = None
if Nedgecolors == 0:
gc0.set_linewidth(0.0)
xo, yo = 0, 0
for i in xrange(N):
path_id = path_ids[i % Npaths]
if Noffsets:
xo, yo = toffsets[i % Noffsets]
if offset_position == 'data':
if Ntransforms:
transform = (
Affine2D(all_transforms[i % Ntransforms]) +
master_transform)
else:
transform = master_transform
xo, yo = transform.transform_point((xo, yo))
xp, yp = transform.transform_point((0, 0))
xo = -(xp - xo)
yo = -(yp - yo)
if not (np.isfinite(xo) and np.isfinite(yo)):
continue
if Nfacecolors:
rgbFace = facecolors[i % Nfacecolors]
if Nedgecolors:
if Nlinewidths:
gc0.set_linewidth(linewidths[i % Nlinewidths])
if Nlinestyles:
gc0.set_dashes(*linestyles[i % Nlinestyles])
fg = edgecolors[i % Nedgecolors]
if len(fg) == 4:
if fg[3] == 0.0:
gc0.set_linewidth(0)
else:
gc0.set_foreground(fg)
else:
gc0.set_foreground(fg)
if rgbFace is not None and len(rgbFace) == 4:
if rgbFace[3] == 0:
rgbFace = None
gc0.set_antialiased(antialiaseds[i % Naa])
if Nurls:
gc0.set_url(urls[i % Nurls])
yield xo, yo, path_id, gc0, rgbFace
gc0.restore()
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to :meth:`draw_image`.
Allows a backend to have images at a different resolution to other
artists.
"""
return 1.0
def draw_image(self, gc, x, y, im):
"""
Draw the image instance into the current axes;
*gc*
a GraphicsContext containing clipping information
*x*
is the distance in pixels from the left hand side of the canvas.
*y*
the distance from the origin. That is, if origin is
upper, y is the distance from top. If origin is lower, y
is the distance from bottom
*im*
the :class:`matplotlib._image.Image` instance
"""
raise NotImplementedError
def option_image_nocomposite(self):
"""
override this method for renderers that do not necessarily
want to rescale and composite raster images. (like SVG)
"""
return False
def option_scale_image(self):
"""
override this method for renderers that support arbitrary
scaling of image (most of the vector backend).
"""
return False
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath="TeX")
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
Draw the text instance
*gc*
the :class:`GraphicsContextBase` instance
*x*
the x location of the text in display coords
*y*
the y location of the text baseline in display coords
*s*
the text string
*prop*
a :class:`matplotlib.font_manager.FontProperties` instance
*angle*
the rotation angle in degrees
*mtext*
a :class:`matplotlib.text.Text` instance
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py::
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be plotted along with
your text.
"""
self._draw_text_as_path(gc, x, y, s, prop, angle, ismath)
def _get_text_path_transform(self, x, y, s, prop, angle, ismath):
"""
return the text path and transform
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
text2path = self._text2path
fontsize = self.points_to_pixels(prop.get_size_in_points())
if ismath == "TeX":
verts, codes = text2path.get_text_path(prop, s, ismath=False,
usetex=True)
else:
verts, codes = text2path.get_text_path(prop, s, ismath=ismath,
usetex=False)
path = Path(verts, codes)
angle = angle / 180. * 3.141592
if self.flipy():
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, self.height - y)
else:
transform = Affine2D().scale(fontsize / text2path.FONT_SCALE,
fontsize / text2path.FONT_SCALE)
transform = transform.rotate(angle).translate(x, y)
return path, transform
def _draw_text_as_path(self, gc, x, y, s, prop, angle, ismath):
"""
draw the text by converting them to paths using textpath module.
*prop*
font property
*s*
text to be converted
*usetex*
If True, use matplotlib usetex mode.
*ismath*
If True, use mathtext parser. If "TeX", use *usetex* mode.
"""
path, transform = self._get_text_path_transform(
x, y, s, prop, angle, ismath)
color = gc.get_rgb()
gc.set_linewidth(0.0)
self.draw_path(gc, path, transform, rgbFace=color)
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height, and the offset from the bottom to the
baseline (descent), in display coords of the string s with
:class:`~matplotlib.font_manager.FontProperties` prop
"""
if ismath == 'TeX':
# todo: handle props
size = prop.get_size_in_points()
texmanager = self._text2path.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
dpi = self.points_to_pixels(72)
if ismath:
dims = self._text2path.mathtext_parser.parse(s, dpi, prop)
return dims[0:3] # return width, height, descent
flags = self._text2path._get_hinting_flag()
font = self._text2path._get_font(prop)
size = prop.get_size_in_points()
font.set_size(size, dpi)
# the width and height of unrotated string
font.set_text(s, 0.0, flags=flags)
w, h = font.get_width_height()
d = font.get_descent()
w /= 64.0 # convert from subpixels
h /= 64.0
d /= 64.0
return w, h, d
def flipy(self):
"""
Return true if y small numbers are top for renderer Is used
for drawing text (:mod:`matplotlib.text`) and images
(:mod:`matplotlib.image`) only
"""
return True
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return 1, 1
def get_texmanager(self):
"""
return the :class:`matplotlib.texmanager.TexManager` instance
"""
if self._texmanager is None:
from matplotlib.texmanager import TexManager
self._texmanager = TexManager()
return self._texmanager
def new_gc(self):
"""
Return an instance of a :class:`GraphicsContextBase`
"""
return GraphicsContextBase()
def points_to_pixels(self, points):
"""
Convert points to display units
*points*
a float or a numpy array of float
return points converted to pixels
You need to override this function (unless your backend
doesn't have a dpi, e.g., postscript or svg). Some imaging
systems assume some value for pixels per inch::
points to pixels = points * pixels_per_inch/72.0 * dpi/72.0
"""
return points
def strip_math(self, s):
return cbook.strip_math(s)
def start_rasterizing(self):
"""
Used in MixedModeRenderer. Switch to the raster renderer.
"""
pass
def stop_rasterizing(self):
"""
Used in MixedModeRenderer. Switch back to the vector renderer
and draw the contents of the raster renderer as an image on
the vector renderer.
"""
pass
def start_filter(self):
"""
Used in AggRenderer. Switch to a temporary renderer for image
filtering effects.
"""
pass
def stop_filter(self, filter_func):
"""
Used in AggRenderer. Switch back to the original renderer.
The contents of the temporary renderer is processed with the
*filter_func* and is drawn on the original renderer as an
image.
"""
pass
class GraphicsContextBase:
"""
An abstract base class that provides color, line styles, etc...
"""
# a mapping from dash styles to suggested offset, dash pairs
dashd = {
'solid': (None, None),
'dashed': (0, (6.0, 6.0)),
'dashdot': (0, (3.0, 5.0, 1.0, 5.0)),
'dotted': (0, (1.0, 3.0)),
}
def __init__(self):
self._alpha = 1.0
self._forced_alpha = False # if True, _alpha overrides A from RGBA
self._antialiased = 1 # use 0,1 not True, False for extension code
self._capstyle = 'butt'
self._cliprect = None
self._clippath = None
self._dashes = None, None
self._joinstyle = 'round'
self._linestyle = 'solid'
self._linewidth = 1
self._rgb = (0.0, 0.0, 0.0, 1.0)
self._orig_color = (0.0, 0.0, 0.0, 1.0)
self._hatch = None
self._url = None
self._gid = None
self._snap = None
self._sketch = None
def copy_properties(self, gc):
'Copy properties from gc to self'
self._alpha = gc._alpha
self._forced_alpha = gc._forced_alpha
self._antialiased = gc._antialiased
self._capstyle = gc._capstyle
self._cliprect = gc._cliprect
self._clippath = gc._clippath
self._dashes = gc._dashes
self._joinstyle = gc._joinstyle
self._linestyle = gc._linestyle
self._linewidth = gc._linewidth
self._rgb = gc._rgb
self._orig_color = gc._orig_color
self._hatch = gc._hatch
self._url = gc._url
self._gid = gc._gid
self._snap = gc._snap
self._sketch = gc._sketch
def restore(self):
"""
Restore the graphics context from the stack - needed only
for backends that save graphics contexts on a stack
"""
pass
def get_alpha(self):
"""
Return the alpha value used for blending - not supported on
all backends
"""
return self._alpha
def get_antialiased(self):
"Return true if the object should try to do antialiased rendering"
return self._antialiased
def get_capstyle(self):
"""
Return the capstyle as a string in ('butt', 'round', 'projecting')
"""
return self._capstyle
def get_clip_rectangle(self):
"""
Return the clip rectangle as a :class:`~matplotlib.transforms.Bbox`
instance
"""
return self._cliprect
def get_clip_path(self):
"""
Return the clip path in the form (path, transform), where path
is a :class:`~matplotlib.path.Path` instance, and transform is
an affine transform to apply to the path before clipping.
"""
if self._clippath is not None:
return self._clippath.get_transformed_path_and_affine()
return None, None
def get_dashes(self):
"""
Return the dash information as an offset dashlist tuple.
The dash list is a even size list that gives the ink on, ink
off in pixels.
See p107 of to PostScript `BLUEBOOK
<http://www-cdf.fnal.gov/offline/PostScript/BLUEBOOK.PDF>`_
for more info.
Default value is None
"""
return self._dashes
def get_forced_alpha(self):
"""
Return whether the value given by get_alpha() should be used to
override any other alpha-channel values.
"""
return self._forced_alpha
def get_joinstyle(self):
"""
Return the line join style as one of ('miter', 'round', 'bevel')
"""
return self._joinstyle
def get_linestyle(self, style):
"""
Return the linestyle: one of ('solid', 'dashed', 'dashdot',
'dotted').
"""
return self._linestyle
def get_linewidth(self):
"""
Return the line width in points as a scalar
"""
return self._linewidth
def get_rgb(self):
"""
returns a tuple of three or four floats from 0-1.
"""
return self._rgb
def get_url(self):
"""
returns a url if one is set, None otherwise
"""
return self._url
def get_gid(self):
"""
Return the object identifier if one is set, None otherwise.
"""
return self._gid
def get_snap(self):
"""
returns the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
return self._snap
def set_alpha(self, alpha):
"""
Set the alpha value used for blending - not supported on all backends.
If ``alpha=None`` (the default), the alpha components of the
foreground and fill colors will be used to set their respective
transparencies (where applicable); otherwise, ``alpha`` will override
them.
"""
if alpha is not None:
self._alpha = alpha
self._forced_alpha = True
else:
self._alpha = 1.0
self._forced_alpha = False
self.set_foreground(self._orig_color)
def set_antialiased(self, b):
"""
True if object should be drawn with antialiased rendering
"""
# use 0, 1 to make life easier on extension code trying to read the gc
if b:
self._antialiased = 1
else:
self._antialiased = 0
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
if cs in ('butt', 'round', 'projecting'):
self._capstyle = cs
else:
raise ValueError('Unrecognized cap style. Found %s' % cs)
def set_clip_rectangle(self, rectangle):
"""
Set the clip rectangle with sequence (left, bottom, width, height)
"""
self._cliprect = rectangle
def set_clip_path(self, path):
"""
Set the clip path and transformation. Path should be a
:class:`~matplotlib.transforms.TransformedPath` instance.
"""
assert path is None or isinstance(path, transforms.TransformedPath)
self._clippath = path
def set_dashes(self, dash_offset, dash_list):
"""
Set the dash style for the gc.
*dash_offset*
is the offset (usually 0).
*dash_list*
specifies the on-off sequence as points.
``(None, None)`` specifies a solid line
"""
if dash_list is not None:
dl = np.asarray(dash_list)
if np.any(dl <= 0.0):
raise ValueError("All values in the dash list must be positive")
self._dashes = dash_offset, dash_list
def set_foreground(self, fg, isRGBA=False):
"""
Set the foreground color. fg can be a MATLAB format string, a
html hex color string, an rgb or rgba unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
If you know fg is rgba, set ``isRGBA=True`` for efficiency.
"""
self._orig_color = fg
if self._forced_alpha:
self._rgb = colors.colorConverter.to_rgba(fg, self._alpha)
elif isRGBA:
self._rgb = fg
else:
self._rgb = colors.colorConverter.to_rgba(fg)
def set_graylevel(self, frac):
"""
Set the foreground color to be a gray level with *frac*
"""
self._orig_color = frac
self._rgb = (frac, frac, frac, self._alpha)
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
if js in ('miter', 'round', 'bevel'):
self._joinstyle = js
else:
raise ValueError('Unrecognized join style. Found %s' % js)
def set_linewidth(self, w):
"""
Set the linewidth in points
"""
self._linewidth = w
def set_linestyle(self, style):
"""
Set the linestyle to be one of ('solid', 'dashed', 'dashdot',
'dotted'). One may specify customized dash styles by providing
a tuple of (offset, dash pairs). For example, the predefiend
linestyles have following values.:
'dashed' : (0, (6.0, 6.0)),
'dashdot' : (0, (3.0, 5.0, 1.0, 5.0)),
'dotted' : (0, (1.0, 3.0)),
"""
if style in self.dashd:
offset, dashes = self.dashd[style]
elif isinstance(style, tuple):
offset, dashes = style
else:
raise ValueError('Unrecognized linestyle: %s' % str(style))
self._linestyle = style
self.set_dashes(offset, dashes)
def set_url(self, url):
"""
Sets the url for links in compatible backends
"""
self._url = url
def set_gid(self, id):
"""
Sets the id.
"""
self._gid = id
def set_snap(self, snap):
"""
Sets the snap setting which may be:
* True: snap vertices to the nearest pixel center
* False: leave vertices as-is
* None: (auto) If the path contains only rectilinear line
segments, round to the nearest pixel center
"""
self._snap = snap
def set_hatch(self, hatch):
"""
Sets the hatch style for filling
"""
self._hatch = hatch
def get_hatch(self):
"""
Gets the current hatch style
"""
return self._hatch
def get_hatch_path(self, density=6.0):
"""
Returns a Path for the current hatch.
"""
if self._hatch is None:
return None
return Path.hatch(self._hatch, density)
def get_sketch_params(self):
"""
Returns the sketch parameters for the artist.
Returns
-------
sketch_params : tuple or `None`
A 3-tuple with the following elements:
* `scale`: The amplitude of the wiggle perpendicular to the
source line.
* `length`: The length of the wiggle along the line.
* `randomness`: The scale factor by which the length is
shrunken or expanded.
May return `None` if no sketch parameters were set.
"""
return self._sketch
def set_sketch_params(self, scale=None, length=None, randomness=None):
"""
Sets the the sketch parameters.
Parameters
----------
scale : float, optional
The amplitude of the wiggle perpendicular to the source
line, in pixels. If scale is `None`, or not provided, no
sketch filter will be provided.
length : float, optional
The length of the wiggle along the line, in pixels
(default 128.0)
randomness : float, optional
The scale factor by which the length is shrunken or
expanded (default 16.0)
"""
if scale is None:
self._sketch = None
else:
self._sketch = (scale, length or 128.0, randomness or 16.0)
class TimerBase(object):
'''
A base class for providing timer events, useful for things animations.
Backends need to implement a few specific methods in order to use their
own timing mechanisms so that the timer events are integrated into their
event loops.
Mandatory functions that must be implemented:
* `_timer_start`: Contains backend-specific code for starting
the timer
* `_timer_stop`: Contains backend-specific code for stopping
the timer
Optional overrides:
* `_timer_set_single_shot`: Code for setting the timer to
single shot operating mode, if supported by the timer
object. If not, the `Timer` class itself will store the flag
and the `_on_timer` method should be overridden to support
such behavior.
* `_timer_set_interval`: Code for setting the interval on the
timer, if there is a method for doing so on the timer
object.
* `_on_timer`: This is the internal function that any timer
object should call, which will handle the task of running
all callbacks that have been set.
Attributes:
* `interval`: The time between timer events in
milliseconds. Default is 1000 ms.
* `single_shot`: Boolean flag indicating whether this timer
should operate as single shot (run once and then
stop). Defaults to `False`.
* `callbacks`: Stores list of (func, args) tuples that will be
called upon timer events. This list can be manipulated
directly, or the functions `add_callback` and
`remove_callback` can be used.
'''
def __init__(self, interval=None, callbacks=None):
#Initialize empty callbacks list and setup default settings if necssary
if callbacks is None:
self.callbacks = []
else:
self.callbacks = callbacks[:] # Create a copy
if interval is None:
self._interval = 1000
else:
self._interval = interval
self._single = False
# Default attribute for holding the GUI-specific timer object
self._timer = None
def __del__(self):
'Need to stop timer and possibly disconnect timer.'
self._timer_stop()
def start(self, interval=None):
'''
Start the timer object. `interval` is optional and will be used
to reset the timer interval first if provided.
'''
if interval is not None:
self._set_interval(interval)
self._timer_start()
def stop(self):
'''
Stop the timer.
'''
self._timer_stop()
def _timer_start(self):
pass
def _timer_stop(self):
pass
def _get_interval(self):
return self._interval
def _set_interval(self, interval):
# Force to int since none of the backends actually support fractional
# milliseconds, and some error or give warnings.
interval = int(interval)
self._interval = interval
self._timer_set_interval()
interval = property(_get_interval, _set_interval)
def _get_single_shot(self):
return self._single
def _set_single_shot(self, ss=True):
self._single = ss
self._timer_set_single_shot()
single_shot = property(_get_single_shot, _set_single_shot)
def add_callback(self, func, *args, **kwargs):
'''
Register `func` to be called by timer when the event fires. Any
additional arguments provided will be passed to `func`.
'''
self.callbacks.append((func, args, kwargs))
def remove_callback(self, func, *args, **kwargs):
'''
Remove `func` from list of callbacks. `args` and `kwargs` are optional
and used to distinguish between copies of the same function registered
to be called with different arguments.
'''
if args or kwargs:
self.callbacks.remove((func, args, kwargs))
else:
funcs = [c[0] for c in self.callbacks]
if func in funcs:
self.callbacks.pop(funcs.index(func))
def _timer_set_interval(self):
'Used to set interval on underlying timer object.'
pass
def _timer_set_single_shot(self):
'Used to set single shot on underlying timer object.'
pass
def _on_timer(self):
'''
Runs all function that have been registered as callbacks. Functions
can return False (or 0) if they should not be called any more. If there
are no callbacks, the timer is automatically stopped.
'''
for func, args, kwargs in self.callbacks:
ret = func(*args, **kwargs)
# docstring above explains why we use `if ret == False` here,
# instead of `if not ret`.
if ret == False:
self.callbacks.remove((func, args, kwargs))
if len(self.callbacks) == 0:
self.stop()
class Event:
"""
A matplotlib event. Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`. The following attributes
are defined and shown with their default values
*name*
the event name
*canvas*
the FigureCanvas instance generating the event
*guiEvent*
the GUI event that triggered the matplotlib event
"""
def __init__(self, name, canvas, guiEvent=None):
self.name = name
self.canvas = canvas
self.guiEvent = guiEvent
class IdleEvent(Event):
"""
An event triggered by the GUI backend when it is idle -- useful
for passive animation
"""
pass
class DrawEvent(Event):
"""
An event triggered by a draw operation on the canvas
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*renderer*
the :class:`RendererBase` instance for the draw event
"""
def __init__(self, name, canvas, renderer):
Event.__init__(self, name, canvas)
self.renderer = renderer
class ResizeEvent(Event):
"""
An event triggered by a canvas resize
In addition to the :class:`Event` attributes, the following event
attributes are defined:
*width*
width of the canvas in pixels
*height*
height of the canvas in pixels
"""
def __init__(self, name, canvas):
Event.__init__(self, name, canvas)
self.width, self.height = canvas.get_width_height()
class CloseEvent(Event):
"""
An event triggered by a figure being closed
In addition to the :class:`Event` attributes, the following event
attributes are defined:
"""
def __init__(self, name, canvas, guiEvent=None):
Event.__init__(self, name, canvas, guiEvent)
class LocationEvent(Event):
"""
An event that has a screen location
The following additional attributes are defined and shown with
their default values.
In addition to the :class:`Event` attributes, the following
event attributes are defined:
*x*
x position - pixels from left of canvas
*y*
y position - pixels from bottom of canvas
*inaxes*
the :class:`~matplotlib.axes.Axes` instance if mouse is over axes
*xdata*
x coord of mouse in data coords
*ydata*
y coord of mouse in data coords
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
# the last event that was triggered before this one
lastevent = None
def __init__(self, name, canvas, x, y, guiEvent=None):
"""
*x*, *y* in figure coords, 0,0 = bottom, left
"""
Event.__init__(self, name, canvas, guiEvent=guiEvent)
self.x = x
self.y = y
if x is None or y is None:
# cannot check if event was in axes if no x,y info
self.inaxes = None
self._update_enter_leave()
return
# Find all axes containing the mouse
if self.canvas.mouse_grabber is None:
axes_list = [a for a in self.canvas.figure.get_axes()
if a.in_axes(self)]
else:
axes_list = [self.canvas.mouse_grabber]
if len(axes_list) == 0: # None found
self.inaxes = None
self._update_enter_leave()
return
elif (len(axes_list) > 1): # Overlap, get the highest zorder
axes_list.sort(key=lambda x: x.zorder)
self.inaxes = axes_list[-1] # Use the highest zorder
else: # Just found one hit
self.inaxes = axes_list[0]
try:
trans = self.inaxes.transData.inverted()
xdata, ydata = trans.transform_point((x, y))
except ValueError:
self.xdata = None
self.ydata = None
else:
self.xdata = xdata
self.ydata = ydata
self._update_enter_leave()
def _update_enter_leave(self):
'process the figure/axes enter leave events'
if LocationEvent.lastevent is not None:
last = LocationEvent.lastevent
if last.inaxes != self.inaxes:
# process axes enter/leave events
try:
if last.inaxes is not None:
last.canvas.callbacks.process('axes_leave_event', last)
except:
pass
# See ticket 2901582.
# I think this is a valid exception to the rule
# against catching all exceptions; if anything goes
# wrong, we simply want to move on and process the
# current event.
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
else:
# process a figure enter event
if self.inaxes is not None:
self.canvas.callbacks.process('axes_enter_event', self)
LocationEvent.lastevent = self
class MouseEvent(LocationEvent):
"""
A mouse event ('button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event').
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*button*
button pressed None, 1, 2, 3, 'up', 'down' (up and down are used
for scroll events)
*key*
the key depressed when the mouse event triggered (see
:class:`KeyEvent`)
*step*
number of scroll steps (positive for 'up', negative for 'down')
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('button_press_event', on_press)
"""
x = None # x position - pixels from left of canvas
y = None # y position - pixels from right of canvas
button = None # button pressed None, 1, 2, 3
dblclick = None # whether or not the event is the result of a double click
inaxes = None # the Axes instance if mouse us over axes
xdata = None # x coord of mouse in data coords
ydata = None # y coord of mouse in data coords
step = None # scroll steps for scroll events
def __init__(self, name, canvas, x, y, button=None, key=None,
step=0, dblclick=False, guiEvent=None):
"""
x, y in figure coords, 0,0 = bottom, left
button pressed None, 1, 2, 3, 'up', 'down'
"""
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.button = button
self.key = key
self.step = step
self.dblclick = dblclick
def __str__(self):
return ("MPL MouseEvent: xy=(%d,%d) xydata=(%s,%s) button=%s " +
"dblclick=%s inaxes=%s") % (self.x, self.y, self.xdata,
self.ydata, self.button,
self.dblclick, self.inaxes)
class PickEvent(Event):
"""
a pick event, fired when the user picks a location on the canvas
sufficiently close to an artist.
Attrs: all the :class:`Event` attributes plus
*mouseevent*
the :class:`MouseEvent` that generated the pick
*artist*
the :class:`~matplotlib.artist.Artist` picked
other
extra class dependent attrs -- e.g., a
:class:`~matplotlib.lines.Line2D` pick may define different
extra attributes than a
:class:`~matplotlib.collections.PatchCollection` pick event
Example usage::
line, = ax.plot(rand(100), 'o', picker=5) # 5 points tolerance
def on_pick(event):
thisline = event.artist
xdata, ydata = thisline.get_data()
ind = event.ind
print('on pick line:', zip(xdata[ind], ydata[ind]))
cid = fig.canvas.mpl_connect('pick_event', on_pick)
"""
def __init__(self, name, canvas, mouseevent, artist,
guiEvent=None, **kwargs):
Event.__init__(self, name, canvas, guiEvent)
self.mouseevent = mouseevent
self.artist = artist
self.__dict__.update(kwargs)
class KeyEvent(LocationEvent):
"""
A key event (key press, key release).
Attach additional attributes as defined in
:meth:`FigureCanvasBase.mpl_connect`.
In addition to the :class:`Event` and :class:`LocationEvent`
attributes, the following attributes are defined:
*key*
the key(s) pressed. Could be **None**, a single case sensitive ascii
character ("g", "G", "#", etc.), a special key
("control", "shift", "f1", "up", etc.) or a
combination of the above (e.g., "ctrl+alt+g", "ctrl+alt+G").
.. note::
Modifier keys will be prefixed to the pressed key and will be in the
order "ctrl", "alt", "super". The exception to this rule is when the
pressed key is itself a modifier key, therefore "ctrl+alt" and
"alt+control" can both be valid key values.
Example usage::
def on_key(event):
print('you pressed', event.key, event.xdata, event.ydata)
cid = fig.canvas.mpl_connect('key_press_event', on_key)
"""
def __init__(self, name, canvas, key, x=0, y=0, guiEvent=None):
LocationEvent.__init__(self, name, canvas, x, y, guiEvent=guiEvent)
self.key = key
class FigureCanvasBase(object):
"""
The canvas the figure renders into.
Public attributes
*figure*
A :class:`matplotlib.figure.Figure` instance
"""
events = [
'resize_event',
'draw_event',
'key_press_event',
'key_release_event',
'button_press_event',
'button_release_event',
'scroll_event',
'motion_notify_event',
'pick_event',
'idle_event',
'figure_enter_event',
'figure_leave_event',
'axes_enter_event',
'axes_leave_event',
'close_event'
]
supports_blit = True
fixed_dpi = None
filetypes = _default_filetypes
if _has_pil:
# JPEG support
register_backend('jpg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
register_backend('jpeg', 'matplotlib.backends.backend_agg',
'Joint Photographic Experts Group')
# TIFF support
register_backend('tif', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
register_backend('tiff', 'matplotlib.backends.backend_agg',
'Tagged Image File Format')
def __init__(self, figure):
figure.set_canvas(self)
self.figure = figure
# a dictionary from event name to a dictionary that maps cid->func
self.callbacks = cbook.CallbackRegistry()
self.widgetlock = widgets.LockDraw()
self._button = None # the button pressed
self._key = None # the key pressed
self._lastx, self._lasty = None, None
self.button_pick_id = self.mpl_connect('button_press_event', self.pick)
self.scroll_pick_id = self.mpl_connect('scroll_event', self.pick)
self.mouse_grabber = None # the axes currently grabbing mouse
self.toolbar = None # NavigationToolbar2 will set me
self._is_saving = False
if False:
## highlight the artists that are hit
self.mpl_connect('motion_notify_event', self.onHilite)
## delete the artists that are clicked on
#self.mpl_disconnect(self.button_pick_id)
#self.mpl_connect('button_press_event',self.onRemove)
def is_saving(self):
"""
Returns `True` when the renderer is in the process of saving
to a file, rather than rendering for an on-screen buffer.
"""
return self._is_saving
def onRemove(self, ev):
"""
Mouse event processor which removes the top artist
under the cursor. Connect this to the 'mouse_press_event'
using::
canvas.mpl_connect('mouse_press_event',canvas.onRemove)
"""
def sort_artists(artists):
# This depends on stable sort and artists returned
# from get_children in z order.
L = [(h.zorder, h) for h in artists]
L.sort()
return [h for zorder, h in L]
# Find the top artist under the cursor
under = sort_artists(self.figure.hitlist(ev))
h = None
if under:
h = under[-1]
# Try deleting that artist, or its parent if you
# can't delete the artist
while h:
if h.remove():
self.draw_idle()
break
parent = None
for p in under:
if h in p.get_children():
parent = p
break
h = parent
def onHilite(self, ev):
"""
Mouse event processor which highlights the artists
under the cursor. Connect this to the 'motion_notify_event'
using::
canvas.mpl_connect('motion_notify_event',canvas.onHilite)
"""
if not hasattr(self, '_active'):
self._active = dict()
under = self.figure.hitlist(ev)
enter = [a for a in under if a not in self._active]
leave = [a for a in self._active if a not in under]
#print "within:"," ".join([str(x) for x in under])
#print "entering:",[str(a) for a in enter]
#print "leaving:",[str(a) for a in leave]
# On leave restore the captured colour
for a in leave:
if hasattr(a, 'get_color'):
a.set_color(self._active[a])
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor(self._active[a][0])
a.set_facecolor(self._active[a][1])
del self._active[a]
# On enter, capture the color and repaint the artist
# with the highlight colour. Capturing colour has to
# be done first in case the parent recolouring affects
# the child.
for a in enter:
if hasattr(a, 'get_color'):
self._active[a] = a.get_color()
elif hasattr(a, 'get_edgecolor'):
self._active[a] = (a.get_edgecolor(), a.get_facecolor())
else:
self._active[a] = None
for a in enter:
if hasattr(a, 'get_color'):
a.set_color('red')
elif hasattr(a, 'get_edgecolor'):
a.set_edgecolor('red')
a.set_facecolor('lightblue')
else:
self._active[a] = None
self.draw_idle()
def pick(self, mouseevent):
if not self.widgetlock.locked():
self.figure.pick(mouseevent)
def blit(self, bbox=None):
"""
blit the canvas in bbox (default entire canvas)
"""
pass
def resize(self, w, h):
"""
set the canvas size in pixels
"""
pass
def draw_event(self, renderer):
"""
This method will be call all functions connected to the
'draw_event' with a :class:`DrawEvent`
"""
s = 'draw_event'
event = DrawEvent(s, self, renderer)
self.callbacks.process(s, event)
def resize_event(self):
"""
This method will be call all functions connected to the
'resize_event' with a :class:`ResizeEvent`
"""
s = 'resize_event'
event = ResizeEvent(s, self)
self.callbacks.process(s, event)
def close_event(self, guiEvent=None):
"""
This method will be called by all functions connected to the
'close_event' with a :class:`CloseEvent`
"""
s = 'close_event'
try:
event = CloseEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
except (TypeError, AttributeError):
pass
# Suppress the TypeError when the python session is being killed.
# It may be that a better solution would be a mechanism to
# disconnect all callbacks upon shutdown.
# AttributeError occurs on OSX with qt4agg upon exiting
# with an open window; 'callbacks' attribute no longer exists.
def key_press_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_press_event' with a :class:`KeyEvent`
"""
self._key = key
s = 'key_press_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
def key_release_event(self, key, guiEvent=None):
"""
This method will be call all functions connected to the
'key_release_event' with a :class:`KeyEvent`
"""
s = 'key_release_event'
event = KeyEvent(
s, self, key, self._lastx, self._lasty, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._key = None
def pick_event(self, mouseevent, artist, **kwargs):
"""
This method will be called by artists who are picked and will
fire off :class:`PickEvent` callbacks registered listeners
"""
s = 'pick_event'
event = PickEvent(s, self, mouseevent, artist, **kwargs)
self.callbacks.process(s, event)
def scroll_event(self, x, y, step, guiEvent=None):
"""
Backend derived classes should call this function on any
scroll wheel event. x,y are the canvas coords: 0,0 is lower,
left. button and key are as defined in MouseEvent.
This method will be call all functions connected to the
'scroll_event' with a :class:`MouseEvent` instance.
"""
if step >= 0:
self._button = 'up'
else:
self._button = 'down'
s = 'scroll_event'
mouseevent = MouseEvent(s, self, x, y, self._button, self._key,
step=step, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_press_event(self, x, y, button, dblclick=False, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button press. x,y are the canvas coords: 0,0 is lower, left.
button and key are as defined in :class:`MouseEvent`.
This method will be call all functions connected to the
'button_press_event' with a :class:`MouseEvent` instance.
"""
self._button = button
s = 'button_press_event'
mouseevent = MouseEvent(s, self, x, y, button, self._key,
dblclick=dblclick, guiEvent=guiEvent)
self.callbacks.process(s, mouseevent)
def button_release_event(self, x, y, button, guiEvent=None):
"""
Backend derived classes should call this function on any mouse
button release.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'button_release_event' with a :class:`MouseEvent` instance.
"""
s = 'button_release_event'
event = MouseEvent(s, self, x, y, button, self._key, guiEvent=guiEvent)
self.callbacks.process(s, event)
self._button = None
def motion_notify_event(self, x, y, guiEvent=None):
"""
Backend derived classes should call this function on any
motion-notify-event.
*x*
the canvas coordinates where 0=left
*y*
the canvas coordinates where 0=bottom
*guiEvent*
the native UI event that generated the mpl event
This method will be call all functions connected to the
'motion_notify_event' with a :class:`MouseEvent` instance.
"""
self._lastx, self._lasty = x, y
s = 'motion_notify_event'
event = MouseEvent(s, self, x, y, self._button, self._key,
guiEvent=guiEvent)
self.callbacks.process(s, event)
def leave_notify_event(self, guiEvent=None):
"""
Backend derived classes should call this function when leaving
canvas
*guiEvent*
the native UI event that generated the mpl event
"""
self.callbacks.process('figure_leave_event', LocationEvent.lastevent)
LocationEvent.lastevent = None
self._lastx, self._lasty = None, None
def enter_notify_event(self, guiEvent=None, xy=None):
"""
Backend derived classes should call this function when entering
canvas
*guiEvent*
the native UI event that generated the mpl event
*xy*
the coordinate location of the pointer when the canvas is
entered
"""
if xy is not None:
x, y = xy
self._lastx, self._lasty = x, y
event = Event('figure_enter_event', self, guiEvent)
self.callbacks.process('figure_enter_event', event)
def idle_event(self, guiEvent=None):
"""Called when GUI is idle."""
s = 'idle_event'
event = IdleEvent(s, self, guiEvent=guiEvent)
self.callbacks.process(s, event)
def grab_mouse(self, ax):
"""
Set the child axes which are currently grabbing the mouse events.
Usually called by the widgets themselves.
It is an error to call this if the mouse is already grabbed by
another axes.
"""
if self.mouse_grabber not in (None, ax):
raise RuntimeError('two different attempted to grab mouse input')
self.mouse_grabber = ax
def release_mouse(self, ax):
"""
Release the mouse grab held by the axes, ax.
Usually called by the widgets.
It is ok to call this even if you ax doesn't have the mouse
grab currently.
"""
if self.mouse_grabber is ax:
self.mouse_grabber = None
def draw(self, *args, **kwargs):
"""
Render the :class:`~matplotlib.figure.Figure`
"""
pass
def draw_idle(self, *args, **kwargs):
"""
:meth:`draw` only if idle; defaults to draw but backends can overrride
"""
self.draw(*args, **kwargs)
def draw_cursor(self, event):
"""
Draw a cursor in the event.axes if inaxes is not None. Use
native GUI drawing for efficiency if possible
"""
pass
def get_width_height(self):
"""
Return the figure width and height in points or pixels
(depending on the backend), truncated to integers
"""
return int(self.figure.bbox.width), int(self.figure.bbox.height)
@classmethod
def get_supported_filetypes(cls):
"""Return dict of savefig file formats supported by this backend"""
return cls.filetypes
@classmethod
def get_supported_filetypes_grouped(cls):
"""Return a dict of savefig file formats supported by this backend,
where the keys are a file type name, such as 'Joint Photographic
Experts Group', and the values are a list of filename extensions used
for that filetype, such as ['jpg', 'jpeg']."""
groupings = {}
for ext, name in six.iteritems(cls.filetypes):
groupings.setdefault(name, []).append(ext)
groupings[name].sort()
return groupings
def _get_output_canvas(self, format):
"""Return a canvas that is suitable for saving figures to a specified
file format. If necessary, this function will switch to a registered
backend that supports the format.
"""
method_name = 'print_%s' % format
# check if this canvas supports the requested format
if hasattr(self, method_name):
return self
# check if there is a default canvas for the requested format
canvas_class = get_registered_canvas_class(format)
if canvas_class:
return self.switch_backends(canvas_class)
# else report error for unsupported format
formats = sorted(self.get_supported_filetypes())
raise ValueError('Format "%s" is not supported.\n'
'Supported formats: '
'%s.' % (format, ', '.join(formats)))
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', format=None, **kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and edge
colors. This is useful because some of the GUIs have a gray figure
face color background and you'll probably want to override this on
hardcopy.
Arguments are:
*filename*
can also be a file object on image backends
*orientation*
only currently applies to PostScript printing.
*dpi*
the dots per inch to save the figure in; if None, use savefig.dpi
*facecolor*
the facecolor of the figure
*edgecolor*
the edgecolor of the figure
*orientation*
landscape' | 'portrait' (not supported on all backends)
*format*
when set, forcibly set the file format to save to
*bbox_inches*
Bbox in inches. Only the given portion of the figure is
saved. If 'tight', try to figure out the tight bbox of
the figure. If None, use savefig.bbox
*pad_inches*
Amount of padding around the figure when bbox_inches is
'tight'. If None, use savefig.pad_inches
*bbox_extra_artists*
A list of extra artists that will be considered when the
tight bbox is calculated.
"""
if format is None:
# get format from filename, or from backend's default filetype
if cbook.is_string_like(filename):
format = os.path.splitext(filename)[1][1:]
if format is None or format == '':
format = self.get_default_filetype()
if cbook.is_string_like(filename):
filename = filename.rstrip('.') + '.' + format
format = format.lower()
# get canvas object and print method for format
canvas = self._get_output_canvas(format)
print_method = getattr(canvas, 'print_%s' % format)
if dpi is None:
dpi = rcParams['savefig.dpi']
origDPI = self.figure.dpi
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.dpi = dpi
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
bbox_inches = kwargs.pop("bbox_inches", None)
if bbox_inches is None:
bbox_inches = rcParams['savefig.bbox']
if bbox_inches:
# call adjust_bbox to save only the given area
if bbox_inches == "tight":
# when bbox_inches == "tight", it saves the figure
# twice. The first save command is just to estimate
# the bounding box of the figure. A stringIO object is
# used as a temporary file object, but it causes a
# problem for some backends (ps backend with
# usetex=True) if they expect a filename, not a
# file-like object. As I think it is best to change
# the backend to support file-like object, i'm going
# to leave it as it is. However, a better solution
# than stringIO seems to be needed. -JJL
#result = getattr(self, method_name)
result = print_method(
io.BytesIO(),
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
dryrun=True,
**kwargs)
renderer = self.figure._cachedRenderer
bbox_inches = self.figure.get_tightbbox(renderer)
bbox_artists = kwargs.pop("bbox_extra_artists", None)
if bbox_artists is None:
bbox_artists = self.figure.get_default_bbox_extra_artists()
bbox_filtered = []
for a in bbox_artists:
bbox = a.get_window_extent(renderer)
if a.get_clip_on():
clip_box = a.get_clip_box()
if clip_box is not None:
bbox = Bbox.intersection(bbox, clip_box)
clip_path = a.get_clip_path()
if clip_path is not None and bbox is not None:
clip_path = clip_path.get_fully_transformed_path()
bbox = Bbox.intersection(bbox,
clip_path.get_extents())
if bbox is not None and (bbox.width != 0 or
bbox.height != 0):
bbox_filtered.append(bbox)
if bbox_filtered:
_bbox = Bbox.union(bbox_filtered)
trans = Affine2D().scale(1.0 / self.figure.dpi)
bbox_extra = TransformedBbox(_bbox, trans)
bbox_inches = Bbox.union([bbox_inches, bbox_extra])
pad = kwargs.pop("pad_inches", None)
if pad is None:
pad = rcParams['savefig.pad_inches']
bbox_inches = bbox_inches.padded(pad)
restore_bbox = tight_bbox.adjust_bbox(self.figure, bbox_inches,
canvas.fixed_dpi)
_bbox_inches_restore = (bbox_inches, restore_bbox)
else:
_bbox_inches_restore = None
self._is_saving = True
try:
#result = getattr(self, method_name)(
result = print_method(
filename,
dpi=dpi,
facecolor=facecolor,
edgecolor=edgecolor,
orientation=orientation,
bbox_inches_restore=_bbox_inches_restore,
**kwargs)
finally:
if bbox_inches and restore_bbox:
restore_bbox()
self.figure.dpi = origDPI
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
self.figure.set_canvas(self)
self._is_saving = False
#self.figure.canvas.draw() ## seems superfluous
return result
@classmethod
def get_default_filetype(cls):
"""
Get the default savefig file format as specified in rcParam
``savefig.format``. Returned string excludes period. Overridden
in backends that only support a single file type.
"""
return rcParams['savefig.format']
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
return self.manager.get_window_title()
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect if there is no window (e.g., a PS backend).
"""
if hasattr(self, "manager"):
self.manager.set_window_title(title)
def get_default_filename(self):
"""
Return a string, which includes extension, suitable for use as
a default filename.
"""
default_filename = self.get_window_title() or 'image'
default_filename = default_filename.lower().replace(' ', '_')
return default_filename + '.' + self.get_default_filetype()
def switch_backends(self, FigureCanvasClass):
"""
Instantiate an instance of FigureCanvasClass
This is used for backend switching, e.g., to instantiate a
FigureCanvasPS from a FigureCanvasGTK. Note, deep copying is
not done, so any changes to one of the instances (e.g., setting
figure size or line props), will be reflected in the other
"""
newCanvas = FigureCanvasClass(self.figure)
newCanvas._is_saving = self._is_saving
return newCanvas
def mpl_connect(self, s, func):
"""
Connect event with string *s* to *func*. The signature of *func* is::
def func(event)
where event is a :class:`matplotlib.backend_bases.Event`. The
following events are recognized
- 'button_press_event'
- 'button_release_event'
- 'draw_event'
- 'key_press_event'
- 'key_release_event'
- 'motion_notify_event'
- 'pick_event'
- 'resize_event'
- 'scroll_event'
- 'figure_enter_event',
- 'figure_leave_event',
- 'axes_enter_event',
- 'axes_leave_event'
- 'close_event'
For the location events (button and key press/release), if the
mouse is over the axes, the variable ``event.inaxes`` will be
set to the :class:`~matplotlib.axes.Axes` the event occurs is
over, and additionally, the variables ``event.xdata`` and
``event.ydata`` will be defined. This is the mouse location
in data coords. See
:class:`~matplotlib.backend_bases.KeyEvent` and
:class:`~matplotlib.backend_bases.MouseEvent` for more info.
Return value is a connection id that can be used with
:meth:`~matplotlib.backend_bases.Event.mpl_disconnect`.
Example usage::
def on_press(event):
print('you pressed', event.button, event.xdata, event.ydata)
cid = canvas.mpl_connect('button_press_event', on_press)
"""
return self.callbacks.connect(s, func)
def mpl_disconnect(self, cid):
"""
Disconnect callback id cid
Example usage::
cid = canvas.mpl_connect('button_press_event', on_press)
#...later
canvas.mpl_disconnect(cid)
"""
return self.callbacks.disconnect(cid)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only for
backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerBase(*args, **kwargs)
def flush_events(self):
"""
Flush the GUI events for the figure. Implemented only for
backends with GUIs.
"""
raise NotImplementedError
def start_event_loop(self, timeout):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def stop_event_loop(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
This is implemented only for backends with GUIs.
"""
raise NotImplementedError
def start_event_loop_default(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This function provides default event loop functionality based
on time.sleep that is meant to be used until event loop
functions for each of the GUI backends can be written. As
such, it throws a deprecated warning.
Call signature::
start_event_loop_default(self,timeout=0)
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
"""
str = "Using default event loop until function specific"
str += " to this GUI is implemented"
warnings.warn(str, mplDeprecation)
if timeout <= 0:
timeout = np.inf
timestep = 0.01
counter = 0
self._looping = True
while self._looping and counter * timestep < timeout:
self.flush_events()
time.sleep(timestep)
counter += 1
def stop_event_loop_default(self):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
Call signature::
stop_event_loop_default(self)
"""
self._looping = False
def key_press_handler(event, canvas, toolbar=None):
"""
Implement the default mpl key bindings for the canvas and toolbar
described at :ref:`key-event-handling`
*event*
a :class:`KeyEvent` instance
*canvas*
a :class:`FigureCanvasBase` instance
*toolbar*
a :class:`NavigationToolbar2` instance
"""
# these bindings happen whether you are over an axes or not
if event.key is None:
return
# Load key-mappings from your matplotlibrc file.
fullscreen_keys = rcParams['keymap.fullscreen']
home_keys = rcParams['keymap.home']
back_keys = rcParams['keymap.back']
forward_keys = rcParams['keymap.forward']
pan_keys = rcParams['keymap.pan']
zoom_keys = rcParams['keymap.zoom']
save_keys = rcParams['keymap.save']
quit_keys = rcParams['keymap.quit']
grid_keys = rcParams['keymap.grid']
toggle_yscale_keys = rcParams['keymap.yscale']
toggle_xscale_keys = rcParams['keymap.xscale']
all = rcParams['keymap.all_axes']
# toggle fullscreen mode (default key 'f')
if event.key in fullscreen_keys:
canvas.manager.full_screen_toggle()
# quit the figure (defaut key 'ctrl+w')
if event.key in quit_keys:
Gcf.destroy_fig(canvas.figure)
if toolbar is not None:
# home or reset mnemonic (default key 'h', 'home' and 'r')
if event.key in home_keys:
toolbar.home()
# forward / backward keys to enable left handed quick navigation
# (default key for backward: 'left', 'backspace' and 'c')
elif event.key in back_keys:
toolbar.back()
# (default key for forward: 'right' and 'v')
elif event.key in forward_keys:
toolbar.forward()
# pan mnemonic (default key 'p')
elif event.key in pan_keys:
toolbar.pan()
toolbar._set_cursor(event)
# zoom mnemonic (default key 'o')
elif event.key in zoom_keys:
toolbar.zoom()
toolbar._set_cursor(event)
# saving current figure (default key 's')
elif event.key in save_keys:
toolbar.save_figure()
if event.inaxes is None:
return
# these bindings require the mouse to be over an axes to trigger
# switching on/off a grid in current axes (default key 'g')
if event.key in grid_keys:
event.inaxes.grid()
canvas.draw()
# toggle scaling of y-axes between 'log and 'linear' (default key 'l')
elif event.key in toggle_yscale_keys:
ax = event.inaxes
scale = ax.get_yscale()
if scale == 'log':
ax.set_yscale('linear')
ax.figure.canvas.draw()
elif scale == 'linear':
ax.set_yscale('log')
ax.figure.canvas.draw()
# toggle scaling of x-axes between 'log and 'linear' (default key 'k')
elif event.key in toggle_xscale_keys:
ax = event.inaxes
scalex = ax.get_xscale()
if scalex == 'log':
ax.set_xscale('linear')
ax.figure.canvas.draw()
elif scalex == 'linear':
ax.set_xscale('log')
ax.figure.canvas.draw()
elif (event.key.isdigit() and event.key != '0') or event.key in all:
# keys in list 'all' enables all axes (default key 'a'),
# otherwise if key is a number only enable this particular axes
# if it was the axes, where the event was raised
if not (event.key in all):
n = int(event.key) - 1
for i, a in enumerate(canvas.figure.get_axes()):
# consider axes, in which the event was raised
# FIXME: Why only this axes?
if event.x is not None and event.y is not None \
and a.in_axes(event):
if event.key in all:
a.set_navigate(True)
else:
a.set_navigate(i == n)
class NonGuiException(Exception):
pass
class FigureManagerBase(object):
"""
Helper class for pyplot mode, wraps everything up into a neat bundle
Public attibutes:
*canvas*
A :class:`FigureCanvasBase` instance
*num*
The figure number
"""
def __init__(self, canvas, num):
self.canvas = canvas
canvas.manager = self # store a pointer to parent
self.num = num
self.key_press_handler_id = self.canvas.mpl_connect('key_press_event',
self.key_press)
"""
The returned id from connecting the default key handler via
:meth:`FigureCanvasBase.mpl_connnect`.
To disable default key press handling::
manager, canvas = figure.canvas.manager, figure.canvas
canvas.mpl_disconnect(manager.key_press_handler_id)
"""
def show(self):
"""
For GUI backends, show the figure window and redraw.
For non-GUI backends, raise an exception to be caught
by :meth:`~matplotlib.figure.Figure.show`, for an
optional warning.
"""
raise NonGuiException()
def destroy(self):
pass
def full_screen_toggle(self):
pass
def resize(self, w, h):
""""For gui backends, resize the window (in pixels)."""
pass
def key_press(self, event):
"""
Implement the default mpl key bindings defined at
:ref:`key-event-handling`
"""
key_press_handler(event, self.canvas, self.canvas.toolbar)
def show_popup(self, msg):
"""
Display message in a popup -- GUI only
"""
pass
def get_window_title(self):
"""
Get the title text of the window containing the figure.
Return None for non-GUI backends (e.g., a PS backend).
"""
return 'image'
def set_window_title(self, title):
"""
Set the title text of the window containing the figure. Note that
this has no effect for non-GUI backends (e.g., a PS backend).
"""
pass
class Cursors:
# this class is only used as a simple namespace
HAND, POINTER, SELECT_REGION, MOVE = list(range(4))
cursors = Cursors()
class NavigationToolbar2(object):
"""
Base class for the navigation cursor, version 2
backends must implement a canvas that handles connections for
'button_press_event' and 'button_release_event'. See
:meth:`FigureCanvasBase.mpl_connect` for more information
They must also define
:meth:`save_figure`
save the current figure
:meth:`set_cursor`
if you want the pointer icon to change
:meth:`_init_toolbar`
create your toolbar widget
:meth:`draw_rubberband` (optional)
draw the zoom to rect "rubberband" rectangle
:meth:`press` (optional)
whenever a mouse button is pressed, you'll be notified with
the event
:meth:`release` (optional)
whenever a mouse button is released, you'll be notified with
the event
:meth:`dynamic_update` (optional)
dynamically update the window while navigating
:meth:`set_message` (optional)
display message
:meth:`set_history_buttons` (optional)
you can change the history back / forward buttons to
indicate disabled / enabled state.
That's it, we'll do the rest!
"""
# list of toolitems to add to the toolbar, format is:
# (
# text, # the text of the button (often not visible to users)
# tooltip_text, # the tooltip shown on hover (where possible)
# image_file, # name of the image for the button (without the extension)
# name_of_method, # name of the method in NavigationToolbar2 to call
# )
toolitems = (
('Home', 'Reset original view', 'home', 'home'),
('Back', 'Back to previous view', 'back', 'back'),
('Forward', 'Forward to next view', 'forward', 'forward'),
(None, None, None, None),
('Pan', 'Pan axes with left mouse, zoom with right', 'move', 'pan'),
('Zoom', 'Zoom to rectangle', 'zoom_to_rect', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots', 'subplots', 'configure_subplots'),
('Save', 'Save the figure', 'filesave', 'save_figure'),
)
def __init__(self, canvas):
self.canvas = canvas
canvas.toolbar = self
# a dict from axes index to a list of view limits
self._views = cbook.Stack()
self._positions = cbook.Stack() # stack of subplot positions
self._xypress = None # the location and axis info at the time
# of the press
self._idPress = None
self._idRelease = None
self._active = None
self._lastCursor = None
self._init_toolbar()
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
self._ids_zoom = []
self._zoom_mode = None
self._button_pressed = None # determined by the button pressed
# at start
self.mode = '' # a mode string for the status bar
self.set_history_buttons()
def set_message(self, s):
"""Display a message on toolbar or in status bar"""
pass
def back(self, *args):
"""move back up the view lim stack"""
self._views.back()
self._positions.back()
self.set_history_buttons()
self._update_view()
def dynamic_update(self):
pass
def draw_rubberband(self, event, x0, y0, x1, y1):
"""Draw a rectangle rubberband to indicate zoom limits"""
pass
def forward(self, *args):
"""Move forward in the view lim stack"""
self._views.forward()
self._positions.forward()
self.set_history_buttons()
self._update_view()
def home(self, *args):
"""Restore the original view"""
self._views.home()
self._positions.home()
self.set_history_buttons()
self._update_view()
def _init_toolbar(self):
"""
This is where you actually build the GUI widgets (called by
__init__). The icons ``home.xpm``, ``back.xpm``, ``forward.xpm``,
``hand.xpm``, ``zoom_to_rect.xpm`` and ``filesave.xpm`` are standard
across backends (there are ppm versions in CVS also).
You just need to set the callbacks
home : self.home
back : self.back
forward : self.forward
hand : self.pan
zoom_to_rect : self.zoom
filesave : self.save_figure
You only need to define the last one - the others are in the base
class implementation.
"""
raise NotImplementedError
def _set_cursor(self, event):
if not event.inaxes or not self._active:
if self._lastCursor != cursors.POINTER:
self.set_cursor(cursors.POINTER)
self._lastCursor = cursors.POINTER
else:
if self._active == 'ZOOM':
if self._lastCursor != cursors.SELECT_REGION:
self.set_cursor(cursors.SELECT_REGION)
self._lastCursor = cursors.SELECT_REGION
elif (self._active == 'PAN' and
self._lastCursor != cursors.MOVE):
self.set_cursor(cursors.MOVE)
self._lastCursor = cursors.MOVE
def mouse_move(self, event):
self._set_cursor(event)
if event.inaxes and event.inaxes.get_navigate():
try:
s = event.inaxes.format_coord(event.xdata, event.ydata)
except (ValueError, OverflowError):
pass
else:
if len(self.mode):
self.set_message('%s, %s' % (self.mode, s))
else:
self.set_message(s)
else:
self.set_message(self.mode)
def pan(self, *args):
"""Activate the pan/zoom tool. pan with left button, zoom with right"""
# set the pointer icon and button press funcs to the
# appropriate callbacks
if self._active == 'PAN':
self._active = None
else:
self._active = 'PAN'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect(
'button_press_event', self.press_pan)
self._idRelease = self.canvas.mpl_connect(
'button_release_event', self.release_pan)
self.mode = 'pan/zoom'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def press(self, event):
"""Called whenver a mouse button is pressed."""
pass
def press_pan(self, event):
"""the press mouse button in pan/zoom mode callback"""
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_pan()):
a.start_pan(x, y, event.button)
self._xypress.append((a, i))
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect('motion_notify_event',
self.drag_pan)
self.press(event)
def press_zoom(self, event):
"""the press mouse button in zoom to rect mode callback"""
# If we're already in the middle of a zoom, pressing another
# button works to "cancel"
if self._ids_zoom != []:
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self.release(event)
self.draw()
self._xypress = None
self._button_pressed = None
self._ids_zoom = []
return
if event.button == 1:
self._button_pressed = 1
elif event.button == 3:
self._button_pressed = 3
else:
self._button_pressed = None
return
x, y = event.x, event.y
# push the current view to define home if stack is empty
if self._views.empty():
self.push_current()
self._xypress = []
for i, a in enumerate(self.canvas.figure.get_axes()):
if (x is not None and y is not None and a.in_axes(event) and
a.get_navigate() and a.can_zoom()):
self._xypress.append((x, y, a, i, a.viewLim.frozen(),
a.transData.frozen()))
id1 = self.canvas.mpl_connect('motion_notify_event', self.drag_zoom)
id2 = self.canvas.mpl_connect('key_press_event',
self._switch_on_zoom_mode)
id3 = self.canvas.mpl_connect('key_release_event',
self._switch_off_zoom_mode)
self._ids_zoom = id1, id2, id3
self._zoom_mode = event.key
self.press(event)
def _switch_on_zoom_mode(self, event):
self._zoom_mode = event.key
self.mouse_move(event)
def _switch_off_zoom_mode(self, event):
self._zoom_mode = None
self.mouse_move(event)
def push_current(self):
"""push the current view limits and position onto the stack"""
lims = []
pos = []
for a in self.canvas.figure.get_axes():
xmin, xmax = a.get_xlim()
ymin, ymax = a.get_ylim()
lims.append((xmin, xmax, ymin, ymax))
# Store both the original and modified positions
pos.append((
a.get_position(True).frozen(),
a.get_position().frozen()))
self._views.push(lims)
self._positions.push(pos)
self.set_history_buttons()
def release(self, event):
"""this will be called whenever mouse button is released"""
pass
def release_pan(self, event):
"""the release mouse button callback in pan/zoom mode"""
if self._button_pressed is None:
return
self.canvas.mpl_disconnect(self._idDrag)
self._idDrag = self.canvas.mpl_connect(
'motion_notify_event', self.mouse_move)
for a, ind in self._xypress:
a.end_pan()
if not self._xypress:
return
self._xypress = []
self._button_pressed = None
self.push_current()
self.release(event)
self.draw()
def drag_pan(self, event):
"""the drag callback in pan/zoom mode"""
for a, ind in self._xypress:
#safer to use the recorded button at the press than current button:
#multiple button can get pressed during motion...
a.drag_pan(self._button_pressed, event.key, event.x, event.y)
self.dynamic_update()
def drag_zoom(self, event):
"""the drag callback in zoom mode"""
if self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = self._xypress[0]
# adjust x, last, y, last
x1, y1, x2, y2 = a.bbox.extents
x, lastx = max(min(x, lastx), x1), min(max(x, lastx), x2)
y, lasty = max(min(y, lasty), y1), min(max(y, lasty), y2)
if self._zoom_mode == "x":
x1, y1, x2, y2 = a.bbox.extents
y, lasty = y1, y2
elif self._zoom_mode == "y":
x1, y1, x2, y2 = a.bbox.extents
x, lastx = x1, x2
self.draw_rubberband(event, x, y, lastx, lasty)
def release_zoom(self, event):
"""the release mouse button callback in zoom to rect mode"""
for zoom_id in self._ids_zoom:
self.canvas.mpl_disconnect(zoom_id)
self._ids_zoom = []
if not self._xypress:
return
last_a = []
for cur_xypress in self._xypress:
x, y = event.x, event.y
lastx, lasty, a, ind, lim, trans = cur_xypress
# ignore singular clicks - 5 pixels is a threshold
if abs(x - lastx) < 5 or abs(y - lasty) < 5:
self._xypress = None
self.release(event)
self.draw()
return
x0, y0, x1, y1 = lim.extents
# zoom to rect
inverse = a.transData.inverted()
lastx, lasty = inverse.transform_point((lastx, lasty))
x, y = inverse.transform_point((x, y))
Xmin, Xmax = a.get_xlim()
Ymin, Ymax = a.get_ylim()
# detect twinx,y axes and avoid double zooming
twinx, twiny = False, False
if last_a:
for la in last_a:
if a.get_shared_x_axes().joined(a, la):
twinx = True
if a.get_shared_y_axes().joined(a, la):
twiny = True
last_a.append(a)
if twinx:
x0, x1 = Xmin, Xmax
else:
if Xmin < Xmax:
if x < lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 < Xmin:
x0 = Xmin
if x1 > Xmax:
x1 = Xmax
else:
if x > lastx:
x0, x1 = x, lastx
else:
x0, x1 = lastx, x
if x0 > Xmin:
x0 = Xmin
if x1 < Xmax:
x1 = Xmax
if twiny:
y0, y1 = Ymin, Ymax
else:
if Ymin < Ymax:
if y < lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 < Ymin:
y0 = Ymin
if y1 > Ymax:
y1 = Ymax
else:
if y > lasty:
y0, y1 = y, lasty
else:
y0, y1 = lasty, y
if y0 > Ymin:
y0 = Ymin
if y1 < Ymax:
y1 = Ymax
if self._button_pressed == 1:
if self._zoom_mode == "x":
a.set_xlim((x0, x1))
elif self._zoom_mode == "y":
a.set_ylim((y0, y1))
else:
a.set_xlim((x0, x1))
a.set_ylim((y0, y1))
elif self._button_pressed == 3:
if a.get_xscale() == 'log':
alpha = np.log(Xmax / Xmin) / np.log(x1 / x0)
rx1 = pow(Xmin / x0, alpha) * Xmin
rx2 = pow(Xmax / x0, alpha) * Xmin
else:
alpha = (Xmax - Xmin) / (x1 - x0)
rx1 = alpha * (Xmin - x0) + Xmin
rx2 = alpha * (Xmax - x0) + Xmin
if a.get_yscale() == 'log':
alpha = np.log(Ymax / Ymin) / np.log(y1 / y0)
ry1 = pow(Ymin / y0, alpha) * Ymin
ry2 = pow(Ymax / y0, alpha) * Ymin
else:
alpha = (Ymax - Ymin) / (y1 - y0)
ry1 = alpha * (Ymin - y0) + Ymin
ry2 = alpha * (Ymax - y0) + Ymin
if self._zoom_mode == "x":
a.set_xlim((rx1, rx2))
elif self._zoom_mode == "y":
a.set_ylim((ry1, ry2))
else:
a.set_xlim((rx1, rx2))
a.set_ylim((ry1, ry2))
self.draw()
self._xypress = None
self._button_pressed = None
self._zoom_mode = None
self.push_current()
self.release(event)
def draw(self):
"""Redraw the canvases, update the locators"""
for a in self.canvas.figure.get_axes():
xaxis = getattr(a, 'xaxis', None)
yaxis = getattr(a, 'yaxis', None)
locators = []
if xaxis is not None:
locators.append(xaxis.get_major_locator())
locators.append(xaxis.get_minor_locator())
if yaxis is not None:
locators.append(yaxis.get_major_locator())
locators.append(yaxis.get_minor_locator())
for loc in locators:
loc.refresh()
self.canvas.draw_idle()
def _update_view(self):
"""Update the viewlim and position from the view and
position stack for each axes
"""
lims = self._views()
if lims is None:
return
pos = self._positions()
if pos is None:
return
for i, a in enumerate(self.canvas.figure.get_axes()):
xmin, xmax, ymin, ymax = lims[i]
a.set_xlim((xmin, xmax))
a.set_ylim((ymin, ymax))
# Restore both the original and modified positions
a.set_position(pos[i][0], 'original')
a.set_position(pos[i][1], 'active')
self.canvas.draw_idle()
def save_figure(self, *args):
"""Save the current figure"""
raise NotImplementedError
def set_cursor(self, cursor):
"""
Set the current cursor to one of the :class:`Cursors`
enums values
"""
pass
def update(self):
"""Reset the axes stack"""
self._views.clear()
self._positions.clear()
self.set_history_buttons()
def zoom(self, *args):
"""Activate zoom to rect mode"""
if self._active == 'ZOOM':
self._active = None
else:
self._active = 'ZOOM'
if self._idPress is not None:
self._idPress = self.canvas.mpl_disconnect(self._idPress)
self.mode = ''
if self._idRelease is not None:
self._idRelease = self.canvas.mpl_disconnect(self._idRelease)
self.mode = ''
if self._active:
self._idPress = self.canvas.mpl_connect('button_press_event',
self.press_zoom)
self._idRelease = self.canvas.mpl_connect('button_release_event',
self.release_zoom)
self.mode = 'zoom rect'
self.canvas.widgetlock(self)
else:
self.canvas.widgetlock.release(self)
for a in self.canvas.figure.get_axes():
a.set_navigate_mode(self._active)
self.set_message(self.mode)
def set_history_buttons(self):
"""Enable or disable back/forward button"""
pass
| mit |
daviskirk/unitframe | setup.py | 1 | 3399 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='unitframe',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1',
description='Use units in pandas DataFrames',
long_description=long_description,
# The project's main homepage.
url='https://github.com/daviskirk/unitframe',
# Author details
author='Davis Kirkendall',
author_email='[email protected]',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='pandas pint numpy development scientific engineering units',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
packages=['unitframe'],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pandas', 'numpy', 'pint'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'test': ['pytest'],
},
# # If there are data files included in your packages that need to be
# # installed, specify them here. If using Python 2.6 or less, then these
# # have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# # Although 'package_data' is the preferred approach, in some case you may
# # need to place data files outside of your packages. See:
# # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
)
| mit |
johankaito/fufuka | graph-tool/src/graph_tool/draw/graphviz_draw.py | 2 | 24109 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2015 Tiago de Paula Peixoto <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, absolute_import, print_function
import sys
import os
import os.path
import time
import warnings
import ctypes
import ctypes.util
import tempfile
from .. import PropertyMap, group_vector_property, ungroup_vector_property
import numpy.random
import copy
from .. draw import arf_layout
try:
import matplotlib.cm
import matplotlib.colors
except ImportError:
msg = "Error importing matplotlib module... graphviz_draw() will not work"
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
raise
try:
libname = ctypes.util.find_library("c")
libc = ctypes.CDLL(libname)
if hasattr(libc, "open_memstream"):
libc.open_memstream.restype = ctypes.POINTER(ctypes.c_char)
except OSError:
msg = "Error importing C standard library... graphviz_draw() will not work."
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
pass
try:
libname = ctypes.util.find_library("gvc")
if libname is None:
raise OSError()
libgv = ctypes.CDLL(libname, ctypes.RTLD_GLOBAL)
# properly set the return types of certain functions
ptype = ctypes.POINTER(ctypes.c_char)
libgv.gvContext.restype = ptype
libgv.agopen.restype = ptype
libgv.agnode.restype = ptype
libgv.agedge.restype = ptype
libgv.agget.restype = ptype
libgv.agstrdup_html.restype = ptype
# create a context to use the whole time (if we keep freeing and recreating
# it, we will hit a memory leak in graphviz)
gvc = libgv.gvContext()
try:
gv_new_api = True
libgv_directed = libgv.Agdirected
libgv_undirected = libgv.Agundirected
except AttributeError:
gv_new_api = False
libgv_directed = 1
libgv_undirected = 0
except OSError:
msg = "Error importing graphviz C library (libgvc)... graphviz_draw() will not work."
warnings.filterwarnings("always", msg, ImportWarning)
warnings.warn(msg, ImportWarning)
def htmlize(val):
if len(val) >= 2 and val[0] == "<" and val[-1] == ">":
return ctypes.string_at(libgv.agstrdup_html(val[1:-1]))
return val
def aset(elem, attr, value):
v = htmlize(str(value)).encode("utf8")
libgv.agsafeset(elem, str(attr).encode("utf8"), v, v)
def aget(elem, attr):
s = ctypes.string_at(libgv.agget(elem,
str(attr).encode("utf8")))
return s.decode("utf8")
def graphviz_draw(g, pos=None, size=(15, 15), pin=False, layout=None,
maxiter=None, ratio="fill", overlap=True, sep=None,
splines=False, vsize=0.105, penwidth=1.0, elen=None,
gprops={}, vprops={}, eprops={}, vcolor="#a40000",
ecolor="#2e3436", vcmap=None, vnorm=True, ecmap=None,
enorm=True, vorder=None, eorder=None, output="",
output_format="auto", fork=False, return_string=False):
r"""Draw a graph using graphviz.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be drawn.
pos : :class:`~graph_tool.PropertyMap` or tuple of :class:`~graph_tool.PropertyMap` (optional, default: ``None``)
Vertex property maps containing the x and y coordinates of the vertices.
size : tuple of scalars (optional, default: ``(15,15)``)
Size (in centimeters) of the canvas.
pin : bool or :class:`~graph_tool.PropertyMap` (default: ``False``)
If ``True``, the vertices are not moved from their initial position. If
a :class:`~graph_tool.PropertyMap` is passed, it is used to pin nodes
individually.
layout : string (default: ``"neato" if g.num_vertices() <= 1000 else "sfdp"``)
Layout engine to be used. Possible values are ``"neato"``, ``"fdp"``,
``"dot"``, ``"circo"``, ``"twopi"`` and ``"arf"``.
maxiter : int (default: ``None``)
If specified, limits the maximum number of iterations.
ratio : string or float (default: ``"fill"``)
Sets the aspect ratio (drawing height/drawing width) for the
drawing. Note that this is adjusted before the ``size`` attribute
constraints are enforced.
If ``ratio`` is numeric, it is taken as the desired aspect ratio. Then,
if the actual aspect ratio is less than the desired ratio, the drawing
height is scaled up to achieve the desired ratio; if the actual ratio is
greater than that desired ratio, the drawing width is scaled up.
If ``ratio == "fill"`` and the size attribute is set, node positions are
scaled, separately in both x and y, so that the final drawing exactly
fills the specified size.
If ``ratio == "compress"`` and the size attribute is set, dot attempts
to compress the initial layout to fit in the given size. This achieves a
tighter packing of nodes but reduces the balance and symmetry. This
feature only works in dot.
If ``ratio == "expand"``, the size attribute is set, and both the width
and the height of the graph are less than the value in size, node
positions are scaled uniformly until at least one dimension fits size
exactly. Note that this is distinct from using size as the desired
size, as here the drawing is expanded before edges are generated and all
node and text sizes remain unchanged.
If ``ratio == "auto"``, the page attribute is set and the graph cannot
be drawn on a single page, then size is set to an "ideal" value. In
particular, the size in a given dimension will be the smallest integral
multiple of the page size in that dimension which is at least half the
current size. The two dimensions are then scaled independently to the
new size. This feature only works in dot.
overlap : bool or string (default: ``"prism"``)
Determines if and how node overlaps should be removed. Nodes are first
enlarged using the sep attribute. If ``True``, overlaps are retained. If
the value is ``"scale"``, overlaps are removed by uniformly scaling in x
and y. If the value is ``False``, node overlaps are removed by a
Voronoi-based technique. If the value is ``"scalexy"``, x and y are
separately scaled to remove overlaps.
If sfdp is available, one can set overlap to ``"prism"`` to use a
proximity graph-based algorithm for overlap removal. This is the
preferred technique, though ``"scale"`` and ``False`` can work well with
small graphs. This technique starts with a small scaling up, controlled
by the overlap_scaling attribute, which can remove a significant portion
of the overlap. The prism option also accepts an optional non-negative
integer suffix. This can be used to control the number of attempts made
at overlap removal. By default, ``overlap == "prism"`` is equivalent to
``overlap == "prism1000"``. Setting ``overlap == "prism0"`` causes only
the scaling phase to be run.
If the value is ``"compress"``, the layout will be scaled down as much
as possible without introducing any overlaps, obviously assuming there
are none to begin with.
sep : float (default: ``None``)
Specifies margin to leave around nodes when removing node overlap. This
guarantees a minimal non-zero distance between nodes.
splines : bool (default: ``False``)
If ``True``, the edges are drawn as splines and routed around the
vertices.
vsize : float, :class:`~graph_tool.PropertyMap`, or tuple (default: ``0.105``)
Default vertex size (width and height). If a tuple is specified, the
first value should be a property map, and the second is a scale factor.
penwidth : float, :class:`~graph_tool.PropertyMap` or tuple (default: ``1.0``)
Specifies the width of the pen, in points, used to draw lines and
curves, including the boundaries of edges and clusters. It has no effect
on text. If a tuple is specified, the first value should be a property
map, and the second is a scale factor.
elen : float or :class:`~graph_tool.PropertyMap` (default: ``None``)
Preferred edge length, in inches.
gprops : dict (default: ``{}``)
Additional graph properties, as a dictionary. The keys are the property
names, and the values must be convertible to string.
vprops : dict (default: ``{}``)
Additional vertex properties, as a dictionary. The keys are the property
names, and the values must be convertible to string, or vertex property
maps, with values convertible to strings.
eprops : dict (default: ``{}``)
Additional edge properties, as a dictionary. The keys are the property
names, and the values must be convertible to string, or edge property
maps, with values convertible to strings.
vcolor : string or :class:`~graph_tool.PropertyMap` (default: ``"#a40000"``)
Drawing color for vertices. If the valued supplied is a property map,
the values must be scalar types, whose color values are obtained from
the ``vcmap`` argument.
ecolor : string or :class:`~graph_tool.PropertyMap` (default: ``"#2e3436"``)
Drawing color for edges. If the valued supplied is a property map,
the values must be scalar types, whose color values are obtained from
the ``ecmap`` argument.
vcmap : :class:`matplotlib.colors.Colormap` (default: :class:`matplotlib.cm.jet`)
Vertex color map.
vnorm : bool (default: ``True``)
Normalize vertex color values to the [0,1] range.
ecmap : :class:`matplotlib.colors.Colormap` (default: :class:`matplotlib.cm.jet`)
Edge color map.
enorm : bool (default: ``True``)
Normalize edge color values to the [0,1] range.
vorder : :class:`~graph_tool.PropertyMap` (default: ``None``)
Scalar vertex property map which specifies the order with which vertices
are drawn.
eorder : :class:`~graph_tool.PropertyMap` (default: ``None``)
Scalar edge property map which specifies the order with which edges
are drawn.
output : string (default: ``""``)
Output file name.
output_format : string (default: ``"auto"``)
Output file format. Possible values are ``"auto"``, ``"xlib"``,
``"ps"``, ``"svg"``, ``"svgz"``, ``"fig"``, ``"mif"``, ``"hpgl"``,
``"pcl"``, ``"png"``, ``"gif"``, ``"dia"``, ``"imap"``, ``"cmapx"``. If
the value is ``"auto"``, the format is guessed from the ``output``
parameter, or ``xlib`` if it is empty. If the value is ``None``, no
output is produced.
fork : bool (default: ``False``)
If ``True``, the program is forked before drawing. This is used as a
work-around for a bug in graphviz, where the ``exit()`` function is
called, which would cause the calling program to end. This is always
assumed ``True``, if ``output_format == 'xlib'``.
return_string : bool (default: ``False``)
If ``True``, a string containing the rendered graph as binary data is
returned (defaults to png format).
Returns
-------
pos : :class:`~graph_tool.PropertyMap`
Vector vertex property map with the x and y coordinates of the vertices.
gv : gv.digraph or gv.graph (optional, only if ``returngv == True``)
Internally used graphviz graph.
Notes
-----
This function is a wrapper for the [graphviz]_ routines. Extensive additional
documentation for the graph, vertex and edge properties is available at:
http://www.graphviz.org/doc/info/attrs.html.
Examples
--------
.. testcode::
:hide:
np.random.seed(42)
gt.seed_rng(42)
from numpy import sqrt
>>> g = gt.price_network(1500)
>>> deg = g.degree_property_map("in")
>>> deg.a = 2 * (sqrt(deg.a) * 0.5 + 0.4)
>>> ebet = gt.betweenness(g)[1]
>>> gt.graphviz_draw(g, vcolor=deg, vorder=deg, elen=10,
... ecolor=ebet, eorder=ebet, output="graphviz-draw.pdf")
<...>
.. testcode::
:hide:
gt.graphviz_draw(g, vcolor=deg, vorder=deg, elen=10,
ecolor=ebet, eorder=ebet, output="graphviz-draw.png")
.. figure:: graphviz-draw.*
:align: center
Kamada-Kawai force-directed layout of a Price network with 1500
nodes. The vertex size and color indicate the degree, and the edge color
corresponds to the edge betweeness centrality
References
----------
.. [graphviz] http://www.graphviz.org
"""
if output != "" and output is not None:
output = os.path.expanduser(output)
# check opening file for writing, since graphviz will bork if it is not
# possible to open file
if os.path.dirname(output) != "" and \
not os.access(os.path.dirname(output), os.W_OK):
raise IOError("cannot write to " + os.path.dirname(output))
has_layout = False
try:
if gv_new_api:
gvg = libgv.agopen("G".encode("utf8"),
libgv_directed if g.is_directed() else libgv_undirected,
None)
else:
gvg = libgv.agopen("G".encode("utf8"),
libgv_directed if g.is_directed() else libgv_undirected)
if layout is None:
if pin == False:
layout = "neato" if g.num_vertices() <= 1000 else "sfdp"
else:
layout = "neato"
if layout == "arf":
layout = "neato"
pos = arf_layout(g, pos=pos)
pin = True
if pos is not None:
# copy user-supplied property
if isinstance(pos, PropertyMap):
pos = ungroup_vector_property(pos, [0, 1])
else:
pos = (g.copy_property(pos[0]), g.copy_property(pos[1]))
if type(vsize) == tuple:
s = g.new_vertex_property("double")
g.copy_property(vsize[0], s)
s.a *= vsize[1]
vsize = s
if type(penwidth) == tuple:
s = g.new_edge_property("double")
g.copy_property(penwidth[0], s)
s.a *= penwidth[1]
penwidth = s
# main graph properties
aset(gvg, "outputorder", "edgesfirst")
aset(gvg, "mode", "major")
if type(overlap) is bool:
overlap = "true" if overlap else "false"
else:
overlap = str(overlap)
aset(gvg, "overlap", overlap)
if sep is not None:
aset(gvg, "sep", sep)
if splines:
aset(gvg, "splines", "true")
aset(gvg, "ratio", ratio)
# size is in centimeters... convert to inches
aset(gvg, "size", "%f,%f" % (size[0] / 2.54, size[1] / 2.54))
if maxiter is not None:
aset(gvg, "maxiter", maxiter)
seed = numpy.random.randint(sys.maxsize)
aset(gvg, "start", "%d" % seed)
# apply all user supplied graph properties
for k, val in gprops.items():
if isinstance(val, PropertyMap):
aset(gvg, k, val[g])
else:
aset(gvg, k, val)
# normalize color properties
if (isinstance(vcolor, PropertyMap) and
vcolor.value_type() != "string"):
minmax = [float("inf"), -float("inf")]
for v in g.vertices():
c = vcolor[v]
minmax[0] = min(c, minmax[0])
minmax[1] = max(c, minmax[1])
if minmax[0] == minmax[1]:
minmax[1] += 1
if vnorm:
vnorm = matplotlib.colors.Normalize(vmin=minmax[0], vmax=minmax[1])
else:
vnorm = lambda x: x
if (isinstance(ecolor, PropertyMap) and
ecolor.value_type() != "string"):
minmax = [float("inf"), -float("inf")]
for e in g.edges():
c = ecolor[e]
minmax[0] = min(c, minmax[0])
minmax[1] = max(c, minmax[1])
if minmax[0] == minmax[1]:
minmax[1] += 1
if enorm:
enorm = matplotlib.colors.Normalize(vmin=minmax[0],
vmax=minmax[1])
else:
enorm = lambda x: x
if vcmap is None:
vcmap = matplotlib.cm.jet
if ecmap is None:
ecmap = matplotlib.cm.jet
# add nodes
if vorder is not None:
vertices = sorted(g.vertices(), key = lambda a: vorder[a])
else:
vertices = g.vertices()
for v in vertices:
if gv_new_api:
n = libgv.agnode(gvg, str(int(v)).encode("utf8"))
else:
n = libgv.agnode(gvg, str(int(v)).encode("utf8"), True)
if type(vsize) == PropertyMap:
vw = vh = vsize[v]
else:
vw = vh = vsize
aset(n, "shape", "circle")
aset(n, "width", "%g" % vw)
aset(n, "height", "%g" % vh)
aset(n, "style", "filled")
aset(n, "color", "#2e3436")
# apply color
if isinstance(vcolor, str):
aset(n, "fillcolor", vcolor)
else:
color = vcolor[v]
if isinstance(color, str):
aset(n, "fillcolor", color)
else:
color = tuple([int(c * 255.0) for c in vcmap(vnorm(color))])
aset(n, "fillcolor", "#%.2x%.2x%.2x%.2x" % color)
aset(n, "label", "")
# user supplied position
if pos is not None:
if isinstance(pin, bool):
pin_val = pin
else:
pin_val = pin[v]
aset(n, "pos", "%f,%f%s" % (pos[0][v], pos[1][v],
"!" if pin_val else ""))
aset(n, "pin", pin_val)
# apply all user supplied properties
for k, val in vprops.items():
if isinstance(val, PropertyMap):
aset(n, k, val[v])
else:
aset(n, k, val)
# add edges
if eorder is not None:
edges = sorted(g.edges(), key = lambda a: eorder[a])
else:
edges = g.edges()
for e in edges:
if gv_new_api:
ge = libgv.agedge(gvg,
libgv.agnode(gvg, str(int(e.source())).encode("utf8"), False),
libgv.agnode(gvg, str(int(e.target())).encode("utf8"), False),
str(g.edge_index[e]).encode("utf8"), True)
else:
ge = libgv.agedge(gvg,
libgv.agnode(gvg, str(int(e.source())).encode("utf8")),
libgv.agnode(gvg, str(int(e.target())).encode("utf8")))
aset(ge, "arrowsize", "0.3")
if g.is_directed():
aset(ge, "arrowhead", "vee")
# apply color
if isinstance(ecolor, str):
aset(ge, "color", ecolor)
else:
color = ecolor[e]
if isinstance(color, str):
aset(ge, "color", color)
else:
color = tuple([int(c * 255.0) for c in ecmap(enorm(color))])
aset(ge, "color", "#%.2x%.2x%.2x%.2x" % color)
# apply edge length
if elen is not None:
if isinstance(elen, PropertyMap):
aset(ge, "len", elen[e])
else:
aset(ge, "len", elen)
# apply width
if penwidth is not None:
if isinstance(penwidth, PropertyMap):
aset(ge, "penwidth", penwidth[e])
else:
aset(ge, "penwidth", penwidth)
# apply all user supplied properties
for k, v in eprops.items():
if isinstance(v, PropertyMap):
aset(ge, k, v[e])
else:
aset(ge, k, v)
libgv.gvLayout(gvc, gvg, layout.encode("utf8"))
has_layout = True
retv = libgv.gvRender(gvc, gvg, "dot".encode("utf8"), None) # retrieve positions only
if pos == None:
pos = (g.new_vertex_property("double"),
g.new_vertex_property("double"))
for v in g.vertices():
n = libgv.agnode(gvg, str(int(v)).encode("utf8"))
p = aget(n, "pos")
p = p.split(",")
pos[0][v] = float(p[0])
pos[1][v] = float(p[1])
# I don't get this, but it seems necessary
pos[0].a /= 100
pos[1].a /= 100
pos = group_vector_property(pos)
if return_string:
if output_format == "auto":
output_format = "png"
if hasattr(libc, "open_memstream"):
buf = ctypes.c_char_p()
buf_len = ctypes.c_size_t()
fstream = libc.open_memstream(ctypes.byref(buf),
ctypes.byref(buf_len))
libgv.gvRender(gvc, gvg, output_format.encode("utf8"), fstream)
libc.fclose(fstream)
data = copy.copy(ctypes.string_at(buf, buf_len.value))
libc.free(buf)
else:
# write to temporary file, if open_memstream is not available
output = tempfile.mkstemp()[1]
libgv.gvRenderFilename(gvc, gvg, output_format.encode("utf8"),
output.encode("utf8"))
data = open(output).read()
os.remove(output)
else:
if output_format == "auto":
if output == "":
output_format = "xlib"
elif output is not None:
output_format = output.split(".")[-1]
# if using xlib we need to fork the process, otherwise good ol'
# graphviz will call exit() when the window is closed
if output_format == "xlib" or fork:
pid = os.fork()
if pid == 0:
libgv.gvRenderFilename(gvc, gvg, output_format.encode("utf8"),
output.encode("utf8"))
os._exit(0) # since we forked, it's good to be sure
if output_format != "xlib":
os.wait()
elif output is not None:
libgv.gvRenderFilename(gvc, gvg, output_format.encode("utf8"),
output.encode("utf8"))
ret = [pos]
if return_string:
ret.append(data)
finally:
if has_layout:
libgv.gvFreeLayout(gvc, gvg)
libgv.agclose(gvg)
if len(ret) > 1:
return tuple(ret)
else:
return ret[0]
| apache-2.0 |
tgquintela/ChaosFunctions | ChaosFunctions/chaotic_series.py | 1 | 1312 |
"""
Chaotic series
"""
from plotting import plot_iteration
from generic_iteration import generic_iteration
class Iterator:
"""Iterator object to compute iterative processes or magnitudes.
"""
def __init(self, iter_f, stop_f):
"""Instantiation of the iteration.
Parameters
----------
iter_f: function
the iteration function.
stop_f: function
the conditions to stop the iteration.
"""
self.iter_f, self.stop_f = iter_f, stop_f
def iterate_sequence(self, p0):
"""Comput the iteration from the initial point given.
Parameters
----------
p0: optional
initial point to start the iteration.
Returns
-------
sequence: np.ndarray
the sequence information.
"""
sequence = generic_iteration(p0, self.iter_f, self.stop_f)
return sequence
def plot_sequence(self, sequence):
"""Plot a 1d sequence.
Parameters
----------
sequence: np.ndarray
the sequence information.
Returns
-------
fig: matplotlib.pyplot.figure
the figure object which contains the plot.
"""
fig = plot_iteration(sequence)
return fig
| mit |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.