repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
larsoner/mne-python | examples/visualization/plot_evoked_topomap.py | 14 | 5554 | # -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <[email protected]>
# Tal Linzen <[email protected]>
# Denis A. Engeman <[email protected]>
# Mikołaj Magnuski <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
###############################################################################
# Basic :func:`~mne.viz.plot_topomap` options
# -------------------------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
###############################################################################
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
###############################################################################
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
###############################################################################
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
###############################################################################
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
###############################################################################
# Additional :func:`~mne.viz.plot_topomap` options
# ------------------------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
###############################################################################
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. There are three extrapolation modes:
#
# - ``extrapolate='local'`` extrapolates only to points close to the sensors.
# - ``extrapolate='head'`` extrapolates out to the head head circle.
# - ``extrapolate='box'`` extrapolates to a large box stretching beyond the
# head circle.
#
# The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors
# and ``'head'`` otherwise. Here we show each option:
extrapolations = ['local', 'head', 'box']
fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for axes_row, ch_type in zip(axes, ('mag', 'eeg')):
for ax, extr in zip(axes_row, extrapolations):
evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr,
axes=ax, show=False, colorbar=False,
sphere=(0., 0., 0., 0.09))
ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14)
fig.tight_layout()
###############################################################################
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
###############################################################################
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation,
# which animates properly only in matplotlib interactive mode.
# sphinx_gallery_thumbnail_number = 9
times = np.arange(0.05, 0.151, 0.01)
fig, anim = evoked.animate_topomap(
times=times, ch_type='mag', frame_rate=2, time_unit='s', blit=False)
| bsd-3-clause |
Achuth17/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
steven-chengji-yan/voicher | voicher.py | 1 | 1556 | import soundfile as sf
import sounddevice as sd
import matplotlib.pyplot as plt
import os
import numpy as np
def readEditWritePlayWav():
data, samplerate = sf.read("in.wav")
print data.shape
print samplerate
plt.plot(data)
plt.show()
samplerate = int(samplerate * 1.5)
sf.write("out.wav", data, samplerate)
os.system("afplay out.wav")
def recordEditPlay1():
fs = 44100
sd.default.samplerate = fs
sd.default.channels = 2
print "start recording..."
duration = 5 # seconds
myRec = sd.rec(duration * fs)
sd.wait() # wait until finish recording
print "start playing..."
sd.play(myRec)
sd.wait() # wait until finish playing
print "start recording while playing..."
myRec2 = sd.playrec(myRec)
sd.wait()
print "start playing mix..."
sd.play(myRec2)
sd.wait()
def recordEditPlay2():
fs = 44100
sd.default.samplerate = fs
sd.default.channels = 2
print "start recording..."
duration = 10 # seconds
myRec = sd.rec(duration * fs)
sd.wait()
print "start playing..."
sd.play(myRec, int(fs * 1.5))
sd.wait()
# for i, one in enumerate(data):
# one[0] *= 2
# one[1] *= 2
def PSOLA():
data, samplerate = sf.read("in.wav")
print data.shape
print samplerate
newData = np.zeros((data.shape[0] * 2, data.shape[1]))
j = 0
for i in range(data.shape[0]):
newData[j] = data[i]
newData[j+1] = data[i]
j += 2
# sd.play(data, samplerate)
# sd.wait()
sd.play(newData, samplerate * 2)
sd.wait()
if __name__ == "__main__":
# readEditWritePlayWav()
# recordEditPlay1()
# recordEditPlay2()
PSOLA()
| mit |
pianomania/scikit-learn | examples/applications/plot_prediction_latency.py | 85 | 11395 | """
==================
Prediction Latency
==================
This is an example showing the prediction latency of various scikit-learn
estimators.
The goal is to measure the latency one can expect when doing predictions
either in bulk or atomic (i.e. one by one) mode.
The plots represent the distribution of the prediction latency as a boxplot.
"""
# Authors: Eustache Diemert <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from collections import defaultdict
import time
import gc
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from scipy.stats import scoreatpercentile
from sklearn.datasets.samples_generator import make_regression
from sklearn.ensemble.forest import RandomForestRegressor
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.svm.classes import SVR
from sklearn.utils import shuffle
def _not_in_sphinx():
# Hack to detect whether we are running by the sphinx builder
return '__file__' in globals()
def atomic_benchmark_estimator(estimator, X_test, verbose=False):
"""Measure runtime prediction of each instance."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_instances, dtype=np.float)
for i in range(n_instances):
instance = X_test[[i], :]
start = time.time()
estimator.predict(instance)
runtimes[i] = time.time() - start
if verbose:
print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose):
"""Measure runtime prediction of the whole input."""
n_instances = X_test.shape[0]
runtimes = np.zeros(n_bulk_repeats, dtype=np.float)
for i in range(n_bulk_repeats):
start = time.time()
estimator.predict(X_test)
runtimes[i] = time.time() - start
runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes)))
if verbose:
print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile(
runtimes, 50), max(runtimes))
return runtimes
def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False):
"""
Measure runtimes of prediction in both atomic and bulk mode.
Parameters
----------
estimator : already trained estimator supporting `predict()`
X_test : test input
n_bulk_repeats : how many times to repeat when evaluating bulk mode
Returns
-------
atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the
runtimes in seconds.
"""
atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose)
bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats,
verbose)
return atomic_runtimes, bulk_runtimes
def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False):
"""Generate a regression dataset with the given parameters."""
if verbose:
print("generating dataset...")
X, y, coef = make_regression(n_samples=n_train + n_test,
n_features=n_features, noise=noise, coef=True)
random_seed = 13
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=n_train, random_state=random_seed)
X_train, y_train = shuffle(X_train, y_train, random_state=random_seed)
X_scaler = StandardScaler()
X_train = X_scaler.fit_transform(X_train)
X_test = X_scaler.transform(X_test)
y_scaler = StandardScaler()
y_train = y_scaler.fit_transform(y_train[:, None])[:, 0]
y_test = y_scaler.transform(y_test[:, None])[:, 0]
gc.collect()
if verbose:
print("ok")
return X_train, y_train, X_test, y_test
def boxplot_runtimes(runtimes, pred_type, configuration):
"""
Plot a new `Figure` with boxplots of prediction runtimes.
Parameters
----------
runtimes : list of `np.array` of latencies in micro-seconds
cls_names : list of estimator class names that generated the runtimes
pred_type : 'bulk' or 'atomic'
"""
fig, ax1 = plt.subplots(figsize=(10, 6))
bp = plt.boxplot(runtimes, )
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
plt.setp(ax1, xticklabels=cls_infos)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Prediction Time per Instance - %s, %d feats.' % (
pred_type.capitalize(),
configuration['n_features']))
ax1.set_ylabel('Prediction Time (us)')
plt.show()
def benchmark(configuration):
"""Run the whole benchmark."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
stats = {}
for estimator_conf in configuration['estimators']:
print("Benchmarking", estimator_conf['instance'])
estimator_conf['instance'].fit(X_train, y_train)
gc.collect()
a, b = benchmark_estimator(estimator_conf['instance'], X_test)
stats[estimator_conf['name']] = {'atomic': a, 'bulk': b}
cls_names = [estimator_conf['name'] for estimator_conf in configuration[
'estimators']]
runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'atomic', configuration)
runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names]
boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'],
configuration)
def n_feature_influence(estimators, n_train, n_test, n_features, percentile):
"""
Estimate influence of the number of features on prediction time.
Parameters
----------
estimators : dict of (name (str), estimator) to benchmark
n_train : nber of training instances (int)
n_test : nber of testing instances (int)
n_features : list of feature-space dimensionality to test (int)
percentile : percentile at which to measure the speed (int [0-100])
Returns:
--------
percentiles : dict(estimator_name,
dict(n_features, percentile_perf_in_us))
"""
percentiles = defaultdict(defaultdict)
for n in n_features:
print("benchmarking with %d features" % n)
X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n)
for cls_name, estimator in estimators.items():
estimator.fit(X_train, y_train)
gc.collect()
runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False)
percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes,
percentile)
return percentiles
def plot_n_features_influence(percentiles, percentile):
fig, ax1 = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
for i, cls_name in enumerate(percentiles.keys()):
x = np.array(sorted([n for n in percentiles[cls_name].keys()]))
y = np.array([percentiles[cls_name][n] for n in x])
plt.plot(x, y, color=colors[i], )
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
ax1.set_axisbelow(True)
ax1.set_title('Evolution of Prediction Time with #Features')
ax1.set_xlabel('#Features')
ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile)
plt.show()
def benchmark_throughputs(configuration, duration_secs=0.1):
"""benchmark throughput for different estimators."""
X_train, y_train, X_test, y_test = generate_dataset(
configuration['n_train'], configuration['n_test'],
configuration['n_features'])
throughputs = dict()
for estimator_config in configuration['estimators']:
estimator_config['instance'].fit(X_train, y_train)
start_time = time.time()
n_predictions = 0
while (time.time() - start_time) < duration_secs:
estimator_config['instance'].predict(X_test[[0]])
n_predictions += 1
throughputs[estimator_config['name']] = n_predictions / duration_secs
return throughputs
def plot_benchmark_throughput(throughputs, configuration):
fig, ax = plt.subplots(figsize=(10, 6))
colors = ['r', 'g', 'b']
cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'],
estimator_conf['complexity_computer'](
estimator_conf['instance']),
estimator_conf['complexity_label']) for
estimator_conf in configuration['estimators']]
cls_values = [throughputs[estimator_conf['name']] for estimator_conf in
configuration['estimators']]
plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors)
ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs)))
ax.set_xticklabels(cls_infos, fontsize=10)
ymax = max(cls_values) * 1.2
ax.set_ylim((0, ymax))
ax.set_ylabel('Throughput (predictions/sec)')
ax.set_title('Prediction Throughput for different estimators (%d '
'features)' % configuration['n_features'])
plt.show()
###############################################################################
# main code
start_time = time.time()
# benchmark bulk/atomic prediction speed for various regressors
configuration = {
'n_train': int(1e3),
'n_test': int(1e2),
'n_features': int(1e2),
'estimators': [
{'name': 'Linear Model',
'instance': SGDRegressor(penalty='elasticnet', alpha=0.01,
l1_ratio=0.25, fit_intercept=True),
'complexity_label': 'non-zero coefficients',
'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)},
{'name': 'RandomForest',
'instance': RandomForestRegressor(),
'complexity_label': 'estimators',
'complexity_computer': lambda clf: clf.n_estimators},
{'name': 'SVR',
'instance': SVR(kernel='rbf'),
'complexity_label': 'support vectors',
'complexity_computer': lambda clf: len(clf.support_vectors_)},
]
}
benchmark(configuration)
# benchmark n_features influence on prediction speed
percentile = 90
percentiles = n_feature_influence({'ridge': Ridge()},
configuration['n_train'],
configuration['n_test'],
[100, 250, 500], percentile)
plot_n_features_influence(percentiles, percentile)
# benchmark throughput
throughputs = benchmark_throughputs(configuration)
plot_benchmark_throughput(throughputs, configuration)
stop_time = time.time()
print("example run in %.2fs" % (stop_time - start_time))
| bsd-3-clause |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/table.py | 69 | 16757 | """
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <[email protected]>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import division
import warnings
import artist
from artist import Artist
from patches import Rectangle
from cbook import is_string_like
from text import Text
from transforms import Bbox
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor,
)
self.set_clip_on(False)
# Create text object
if loc is None: loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
def draw(self, renderer):
if not self.get_visible(): return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l,b,w,h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specifified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best' : 0,
'upper right' : 1, # default
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'center left' : 5,
'center right' : 6,
'lower center' : 7,
'upper center' : 8,
'center' : 9,
'top right' : 10,
'top left' : 11,
'bottom left' : 12,
'bottom right' : 13,
'right' : 14,
'left' : 15,
'top' : 16,
'bottom' : 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None):
Artist.__init__(self)
if is_string_like(loc) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on bottom; valid locations are\n%s\t' %(loc, '\n\t'.join(self.codes.keys())))
loc = 'bottom'
if is_string_like(loc): loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self._cachedRenderer = None
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0,0)
cell = Cell(xy, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[(row, col)] = cell
def _approx_text_height(self):
return self.FONTSIZE/72.0*self.figure.dpi/self._axes.bbox.height * 1.2
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one will do
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
self._cachedRenderer = renderer
if not self.get_visible(): return
renderer.open_group('table')
self._update_positions(renderer)
keys = self._cells.keys()
keys.sort()
for key in keys:
self._cells[key].draw(renderer)
#for c in self._cells.itervalues():
# c.draw(renderer)
renderer.close_group('table')
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [self._cells[pos].get_window_extent(renderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
if self._cachedRenderer is not None:
boxes = [self._cells[pos].get_window_extent(self._cachedRenderer)
for pos in self._cells.keys()
if pos[0] >= 0 and pos[1] >= 0]
bbox = bbox_all(boxes)
return bbox.contains(mouseevent.x,mouseevent.y),{}
else:
return False,{}
def get_children(self):
'Return the Artists contained by the table'
return self._cells.values()
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [c.get_window_extent(renderer) for c in self._cells]
return bbox_all(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in self._cells.iteritems():
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
cols = widths.keys()
cols.sort()
for col in cols:
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
rows = heights.keys()
rows.sort()
rows.reverse()
for row in rows:
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in self._cells.iteritems():
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
self._autoColumns.append(col)
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = self._cells.values()[0].get_fontsize()
cells = []
for key, cell in self._cells.iteritems():
# ignore auto-sized columns
if key[1] in self._autoColumns: continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in self._cells.itervalues():
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in self._cells.itervalues():
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in self._cells.itervalues():
cell.set_fontsize(size)
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in self._cells.itervalues():
x, y = c.get_x(), c.get_y()
c.set_x(x+ox)
c.set_y(y+oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l,b,w,h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw/w, rh/h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = range(len(self.codes))
# defaults for center
ox = (0.5-w/2)-l
oy = (0.5-h/2)-b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5-w/2)-l
if self._loc in (CL, CR, C): # center y
oy = (0.5-h/2)-b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None)
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * rows] * cols
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
assert len(row) == cols
if cellColours is not None:
assert len(cellColours) == rows
for row in cellColours:
assert len(row) == cols
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0/cols] * cols
# Check row and column labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * cols
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
assert len(rowLabels) == rows
offset = 0
if colLabels is None:
if colColours is not None:
colLabels = [''] * rows
offset = 1
elif colColours is None:
colColours = 'w' * cols
offset = 1
if rowLabels is not None:
assert len(rowLabels) == rows
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox)
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row+offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row+offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
artist.kwdocd['Table'] = artist.kwdoc(Table)
| agpl-3.0 |
potash/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 33 | 4515 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
plt.matshow(cm)
plt.title('Confusion matrix of the %s classifier' % name)
plt.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
plt.show()
| bsd-3-clause |
JosmanPS/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
HPCC-Cloud-Computing/press | prediction/predict/feedforward/main.py | 1 | 1191 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from feedforward import FeedForwardNN
window_size = 4
df = pd.read_csv('data-10min_workload.csv', names=['request'])
data = df['request'].values
data_train = data[40 * 144:47 * 144]
data_test = data[46 * 144:48 * 144]
x_train = []
for i in range(len(data_train) - window_size):
x_train.append(data_train[i:i + window_size])
x_train = np.array(x_train)
y_train = data_train[window_size:]
x_test = []
for i in range(len(data_test) - window_size):
x_test.append(data_test[i:i + window_size])
x_test = np.array(x_test)
y_test = data_test[window_size:]
# Preproccessing
min_value = min(data_train)
max_value = max(data_train)
x_train = (x_train - min_value) / (max_value - min_value)
y_train = (y_train - min_value) / (max_value - min_value)
x_test = (x_test - min_value) / (max_value - min_value)
nn = FeedForwardNN(x_train, y_train, loss_func='mean_absolute_error')
nn.fit()
y_predict = nn.predict(x_test) * (max_value - min_value) + min_value
y_predict = np.array(list(map(int, y_predict)))
plt.figure()
plt.plot(y_test, label='Actual')
plt.plot(y_predict, 'r-', label='Predict')
plt.legend()
plt.show()
| mit |
dsullivan7/scikit-learn | sklearn/externals/joblib/__init__.py | 35 | 4382 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://packages.python.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
__version__ = '0.8.4'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
danijar/layered | layered/plot.py | 1 | 4739 | # pylint: disable=wrong-import-position
import collections
import time
import warnings
import inspect
import threading
import matplotlib
# Don't call the code if Sphinx inspects the file mocking external imports.
if inspect.ismodule(matplotlib): # noqa
# On Mac force backend that works with threading.
if matplotlib.get_backend() == 'MacOSX':
matplotlib.use('TkAgg')
# Hide matplotlib deprecation message.
warnings.filterwarnings('ignore', category=matplotlib.cbook.mplDeprecation)
# Ensure available interactive backend.
if matplotlib.get_backend() not in matplotlib.rcsetup.interactive_bk:
print('No visual backend available. Maybe you are inside a virtualenv '
'that was created without --system-site-packages.')
import matplotlib.pyplot as plt
class Interface:
def __init__(self, title='', xlabel='', ylabel='', style=None):
self._style = style or {}
self._title = title
self._xlabel = xlabel
self._ylabel = ylabel
self.xdata = []
self.ydata = []
self.width = 0
self.height = 0
@property
def style(self):
return self._style
@property
def title(self):
return self._title
@property
def xlabel(self):
return self._xlabel
@property
def ylabel(self):
return self._ylabel
class State:
def __init__(self):
self.running = False
class Window:
def __init__(self, refresh=0.5):
self.refresh = refresh
self.thread = None
self.state = State()
self.figure = plt.figure()
self.interfaces = []
plt.ion()
plt.show()
def register(self, position, interface):
axis = self.figure.add_subplot(
position, title=interface.title,
xlabel=interface.xlabel, ylabel=interface.ylabel)
axis.get_xaxis().set_ticks([])
line, = axis.plot(interface.xdata, interface.ydata, **interface.style)
self.interfaces.append((axis, line, interface))
def start(self, work):
"""
Hand the main thread to the window and continue work in the provided
function. A state is passed as the first argument that contains a
`running` flag. The function is expected to exit if the flag becomes
false. The flag can also be set to false to stop the window event loop
and continue in the main thread after the `start()` call.
"""
assert threading.current_thread() == threading.main_thread()
assert not self.state.running
self.state.running = True
self.thread = threading.Thread(target=work, args=(self.state,))
self.thread.start()
while self.state.running:
try:
before = time.time()
self.update()
duration = time.time() - before
plt.pause(max(0.001, self.refresh - duration))
except KeyboardInterrupt:
self.state.running = False
self.thread.join()
return
def stop(self):
"""
Close the window and stops the worker thread. The main thread will
resume with the next command after the `start()` call.
"""
assert threading.current_thread() == self.thread
assert self.state.running
self.state.running = False
def update(self):
"""
Redraw the figure to show changed data. This is automatically called
after `start()` was run.
"""
assert threading.current_thread() == threading.main_thread()
for axis, line, interface in self.interfaces:
line.set_xdata(interface.xdata)
line.set_ydata(interface.ydata)
axis.set_xlim(0, interface.width or 1, emit=False)
axis.set_ylim(0, interface.height or 1, emit=False)
self.figure.canvas.draw()
class Plot(Interface):
def __init__(self, title, xlabel, ylabel, style=None, fixed=None):
# pylint: disable=too-many-arguments, redefined-variable-type
super().__init__(title, xlabel, ylabel, style or {})
self.max_ = 0
if not fixed:
self.xdata = []
self.ydata = []
else:
self.xdata = list(range(fixed))
self.ydata = collections.deque([None] * fixed, maxlen=fixed)
self.width = fixed
def __call__(self, values):
self.ydata += values
self.max_ = max(self.max_, *values)
self.height = 1.05 * self.max_
while len(self.xdata) < len(self.ydata):
self.xdata.append(len(self.xdata))
self.width = len(self.xdata) - 1
assert len(self.xdata) == len(self.ydata)
| mit |
SageMay/Cross_Bridge_Fluid_Sim | Vel_2CB_1A_1M.py | 1 | 8750 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 7 12:49:05 2016
@author: sagemalingen
"""
import numpy as np
from numpy import linalg
import matplotlib.pyplot as plt
import math
# 25 X 160 nm, on the z = 0 plain
"""Generates a graph of the velocity vector field resultant from two cross
bridges in lanes created by a single myosin fiber and a single actin fiber."""
#BLOCK 1: Geometric parameters ---> USER INPUT NECESSAR
sm = 8 # Spacing between Myosin filament spheres -- point of singularity to point of singularity
sa = 4 # Spacing between actin filament spheres
r1 = 4 # Radius of the myosin spheres #nanometer
r2 = 2 # Radius of the actin spheres #nanometer
d = 16.5 # Radial distance from actin filament to myosin filament
dm = 9 # Distance from myosin cross bridge singularity to myosin filament
nmyo = 18 # Number of myosin filament spheres
# BLOCK 2: Create a list of distances to measure change in force as function of proximity.
dist = []
for i in range(0,1):
dist.append(-i)
# BLOCK 3: Constructing filaments.
# MYOSIN FILAMENT, BEGINNING AT [0,0,0]
mf = []
for i in range(0,nmyo):
e =[i*(sm),0,0]
mf.append(e)
lenmyo = nmyo*sm
# ACTIN FILAMENT
nact = math.ceil(lenmyo/(sa))
af3 = []
for i in range(0,nact):
e3 = [i*sa,d,0]
af3.append(e3)
lenactin = len(af3)
lenmyosin = len(mf)
# Lists where we will store the strengths on just the cross bridges for each iteration where we vary the proximity of the cross bridges
CB1S = []
CB2S = []
# BLOCK 4:
for i in range(0, len(dist)):
# CROSS BRIDGE
# ---> USER INPUT NECESSARY
cb1 = [24, dm, 0]
cb2 = [34-dist[i],dm,0]
crossb = (cb1,cb2)
c = np.concatenate((mf,af3,crossb), axis = 0)
n = nmyo + nact + 2 # Total number of singularities
# BLOCK 4.1: Generating boundary points
bpoints = []
for j in range(0,n):
if j in range(0,nmyo):
mN = c[j]+[r1,0,0]
mE = c[j]+[0,r1,0]
bpoints.append(mN)
bpoints.append(mE)
elif j in range(nmyo,nmyo+3*nact):
aN = c[j]+[r2,0,0]
aE = c[j]+[0,r2,0]
bpoints.append(aN)
bpoints.append(aE)
elif j in range(nmyo+nact,nmyo+nact+2):
cbN = c[j]+[r1,0,0]
cbE = c[j]+[0,r1,0]
bpoints.append(cbN)
bpoints.append(cbE)
# BLOCK 4.2: Creating a list of the fluid velocities at each boundary point.
# ---> USER INPUT NECESSARY
vel = []
for j in range(0,2*n):
if j in range(0,2*nmyo): # Velocites of myosin filament
crazy1 = [0,0,0]
vel.append(crazy1)
elif j in range(2*nmyo,2*nmyo+2*nact): # Velocities of actin filaments
crazy2 = [0,0,0]
vel.append(crazy2)
elif j in range(2*nmyo+2*nact,2*nmyo+2*nact+2*2): # Velocities of cross bridges
if j in range(2*nmyo+2*nact,2*nmyo+2*nact+2*2-2*1):
crazy3 = [10000,0,0]
vel.append(crazy3)
elif j in range(2*nmyo+2*nact+2*2-2*1,2*nmyo+2*nact+2*2):
crazy4 = [0,0,0]
vel.append(crazy4)
bvel = np.concatenate((vel), axis = 0)
#BLOCK 4.3: Compile a list of the centers where each singularity exists.
centers = []
for k in range(0,n):
for j in range(0,2*n):
centers.append(c[k]) # Will have center 1 listed 2*N times, then center 2 listed 2*N times, etc.
# BLOCK 4.4: Create our array of radii which are determined by distance of
# boundary points to the singularities centers. Observe that the ordering
# of bpoints and centers determines that our radii are determined as the
# distance of all of the boundary points to the first singularity, then all
# of the boundary points to the second singularity, etc.
rad = []
for j in range(0,n):
for k in range(0,2*n):
rad.append(linalg.norm(bpoints[k]-centers[k+j*2*n]))
# Structured as bp1c1,bp2c1,...bp2nc1,...bp1c2,....
dk = np.identity(3) # Defining the Kronecker delta matrix
# Instantiating generic lists to be used in for loop operations
l = list()
l1 = list()
l2 = list()
# BLOCK 4.5
for j in range(0,n): # Cycles through the n centers
l3 = list()
l4 = list()
for k in range(0,2*n): # Cycles through the 2n bp
M = np.array([[(bpoints[k][0]-centers[k + (2*n)*j][0])*(bpoints[k][0]-centers[k + (2*n)*j][0]),
(bpoints[k][0]-centers[k + (2*n)*j][0])*(bpoints[k][1]-centers[k + (2*n)*j][1]),
(bpoints[k][0]-centers[k + (2*n)*j][0])*(bpoints[k][2]-centers[k + (2*n)*j][2])],
[(bpoints[k][1]-centers[k + (2*n)*j][1])*(bpoints[k][0]-centers[k + (2*n)*j][0]),
(bpoints[k][1]-centers[k + (2*n)*j][1])*(bpoints[k][1]-centers[k + (2*n)*j][1]),
(bpoints[k][1]-centers[k + (2*n)*j][1])*(bpoints[k][2]-centers[k + (2*n)*j][2])],
[(bpoints[k][2]-centers[k + (2*n)*j][2])*(bpoints[k][0]-centers[k + (2*n)*j][0]),
(bpoints[k][2]-centers[k + (2*n)*j][2])*(bpoints[k][1]-centers[k + (2*n)*j][1]),
(bpoints[k][2]-centers[k + (2*n)*j][2])*(bpoints[k][2]-centers[k + (2*n)*j][2])]])
r = rad[k + 2*(n)*j] #bp1c1, bp2c1,...
gM = (1/r)*dk + (1/(r**3))*M
dM = (-1/(r**3))*dk + (3/(r**5))*M
l3.append(gM) # Column one
l4.append(dM) # Column two
l1.append(np.concatenate(l3, axis = 0)) # Vertical concatenation
l1.append(np.concatenate(l4, axis = 0)) # Vertical concatenation
a = np.concatenate(l1, axis = 1 ) # Horizontal concatenation
ai =linalg.pinv(a) # Moore-Penrose pseudo inverse of matrix a, since a is singular
s = ai.dot(bvel) # List of strengths g1x,g1y,g1z,d1x,d1y,d1z,g2x,g2y...
#BLOCK 5: Velocity graphing
SomeList_g = []
SomeList_d = []
for a in range(0,n):
num_g = np.array([[s[0+a*6]],[s[1+a*6]],[s[2+a*6]]])
num_d = np.array([[s[3+a*6]],[s[4+a*6]],[s[5+a*6]]])
SomeList_g.append(num_g) # List of the g strength vectors
SomeList_d.append(num_d) # List of the d strength vectors
X, Y = np.mgrid[0:144:8, 0:25:2]
preU = []
preV = []
U = [] # x component of velocity
V = [] # y component of velocity
u = [] # List of velocity vectors at each point
mg = [] # List of matrices to multiply by strength g
md = [] # List of matrices to multiply by strength d
for i in range(0,144,8):
for j in range(0,25,2):
for k in range(0,n): # Number of singularities
mat = np.array([[(i-c[k][0])*(i-c[k][0]),
(i-c[k][0])*(j-c[k][1]),
(i-c[k][0])*(0-c[k][2])],
[(j-c[k][1])*(i-c[k][0]),
(j-c[k][1])*(j-c[k][1]),
(j-c[k][1])*(0-c[k][2])],
[(0-c[k][2])*(i-c[k][0]),
(0-c[k][2])*(j-c[k][1]),
(0-c[k][2])*(0-c[k][2])]])
r = (linalg.norm([i,j,0]-c[k]))
if r == 0:
mg.append(np.zeros((3,3)))
md.append(np.zeros((3,3)))
else:
mg.append((dk/r)+(mat/(r**3)))
md.append((-dk/(r**3))+(3*mat/(r**5)))
b = np.array([0,0,0])
for i in range(0,144,8):
im = int((i)/8)
for j in range(0,25,2):
jm = int(j/2)
for k in range(0,n):
a = (mg[k+im*n+jm*n].dot(SomeList_g[k]) + md[k+im*n+jm*n].dot(SomeList_d[k]))
b = np.array([b[0] + a[0], b[1] + a[1], b[2] + a[2]])
u1 = b[0]
v1 = b[1]
N = np.sqrt(u1**2+v1**2) # There may be a faster numpy "normalize" function
U.append(u1/N)
V.append(v1/N)
plt.quiver(X,Y,U,V,facecolor='cornflowerblue',alpha = .6)
Why = list()
WhyNot = list()
for i in range(0,n):
if c[i][2] == 0:
Why.append(c[i][0]) # x location of singularity
WhyNot.append(c[i][1]) # y location of singularity
plt.plot(Why, WhyNot, 'ro')
plt.show
# plt.savefig('Vel_2CB_1A_1M.pdf')
| mit |
basnijholt/holoviews | holoviews/plotting/bokeh/__init__.py | 1 | 11794 | from __future__ import absolute_import, division, unicode_literals
import numpy as np
import bokeh
from bokeh.palettes import all_palettes
from ...core import (Store, Overlay, NdOverlay, Layout, AdjointLayout,
GridSpace, GridMatrix, NdLayout, config)
from ...element import (Curve, Points, Scatter, Image, Raster, Path,
RGB, Histogram, Spread, HeatMap, Contours, Bars,
Box, Bounds, Ellipse, Polygons, BoxWhisker, Arrow,
ErrorBars, Text, HLine, VLine, Spline, Spikes,
Table, ItemTable, Area, HSV, QuadMesh, VectorField,
Graph, Nodes, EdgePaths, Distribution, Bivariate,
TriMesh, Violin, Chord, Div, HexTiles, Labels, Sankey,
Tiles)
from ...core.options import Options, Cycle, Palette
from ...core.util import LooseVersion, VersionError
if LooseVersion(bokeh.__version__) < '0.12.10':
raise VersionError("The bokeh extension requires a bokeh version >=0.12.10, "
"please upgrade from bokeh %s to a more recent version."
% bokeh.__version__, bokeh.__version__, '0.12.10')
try:
from ...interface import DFrame
except:
DFrame = None
from .annotation import (TextPlot, LineAnnotationPlot, SplinePlot,
ArrowPlot, DivPlot, LabelsPlot)
from ..plot import PlotSelector
from .callbacks import Callback # noqa (API import)
from .element import OverlayPlot, ElementPlot
from .chart import (PointPlot, CurvePlot, SpreadPlot, ErrorPlot, HistogramPlot,
SideHistogramPlot, BarPlot, SpikesPlot, SideSpikesPlot,
AreaPlot, VectorFieldPlot)
from .graphs import GraphPlot, NodePlot, TriMeshPlot, ChordPlot
from .heatmap import HeatMapPlot, RadialHeatMapPlot
from .hex_tiles import HexTilesPlot
from .path import PathPlot, PolygonPlot, ContourPlot
from .plot import GridPlot, LayoutPlot, AdjointLayoutPlot
from .raster import RasterPlot, RGBPlot, HSVPlot, QuadMeshPlot
from .renderer import BokehRenderer
from .sankey import SankeyPlot
from .stats import DistributionPlot, BivariatePlot, BoxWhiskerPlot, ViolinPlot
from .tabular import TablePlot
from .tiles import TilePlot
from .util import bokeh_version # noqa (API import)
Store.renderers['bokeh'] = BokehRenderer.instance()
if len(Store.renderers) == 1:
Store.set_current_backend('bokeh')
associations = {Overlay: OverlayPlot,
NdOverlay: OverlayPlot,
GridSpace: GridPlot,
GridMatrix: GridPlot,
AdjointLayout: AdjointLayoutPlot,
Layout: LayoutPlot,
NdLayout: LayoutPlot,
# Charts
Curve: CurvePlot,
Bars: BarPlot,
Points: PointPlot,
Scatter: PointPlot,
ErrorBars: ErrorPlot,
Spread: SpreadPlot,
Spikes: SpikesPlot,
Area: AreaPlot,
VectorField: VectorFieldPlot,
Histogram: HistogramPlot,
# Rasters
Image: RasterPlot,
RGB: RGBPlot,
HSV: HSVPlot,
Raster: RasterPlot,
HeatMap: PlotSelector(HeatMapPlot.is_radial,
{True: RadialHeatMapPlot,
False: HeatMapPlot},
True),
QuadMesh: QuadMeshPlot,
# Paths
Path: PathPlot,
Contours: ContourPlot,
Path: PathPlot,
Box: PathPlot,
Bounds: PathPlot,
Ellipse: PathPlot,
Polygons: PolygonPlot,
# Annotations
HLine: LineAnnotationPlot,
VLine: LineAnnotationPlot,
Text: TextPlot,
Labels: LabelsPlot,
Spline: SplinePlot,
Arrow: ArrowPlot,
Div: DivPlot,
Tiles: TilePlot,
# Graph Elements
Graph: GraphPlot,
Chord: ChordPlot,
Nodes: NodePlot,
EdgePaths: PathPlot,
TriMesh: TriMeshPlot,
Sankey: SankeyPlot,
# Tabular
Table: TablePlot,
ItemTable: TablePlot,
# Statistics
Distribution: DistributionPlot,
Bivariate: BivariatePlot,
BoxWhisker: BoxWhiskerPlot,
Violin: ViolinPlot,
HexTiles: HexTilesPlot}
if DFrame is not None:
associations[DFrame] = TablePlot
Store.register(associations, 'bokeh')
if config.style_17:
ElementPlot.show_grid = True
RasterPlot.show_grid = True
ElementPlot.show_frame = True
else:
# Raster types, Path types and VectorField should have frames
for framedcls in [VectorFieldPlot, ContourPlot, PathPlot, PolygonPlot,
RasterPlot, RGBPlot, HSVPlot, QuadMeshPlot, HeatMapPlot]:
framedcls.show_frame = True
AdjointLayoutPlot.registry[Histogram] = SideHistogramPlot
AdjointLayoutPlot.registry[Spikes] = SideSpikesPlot
point_size = np.sqrt(6) # Matches matplotlib default
# Register bokeh.palettes with Palette and Cycle
def colormap_generator(palette):
return lambda value: palette[int(value*(len(palette)-1))]
Palette.colormaps.update({name: colormap_generator(p[max(p.keys())])
for name, p in all_palettes.items()})
Cycle.default_cycles.update({name: p[max(p.keys())] for name, p in all_palettes.items()
if max(p.keys()) < 256})
dflt_cmap = 'hot' if config.style_17 else 'fire'
options = Store.options(backend='bokeh')
# Charts
options.Curve = Options('style', color=Cycle(), line_width=2)
options.BoxWhisker = Options('style', box_fill_color='lightgray', whisker_color='black',
box_line_color='black', outlier_color='black')
options.Scatter = Options('style', color=Cycle(), size=point_size, cmap=dflt_cmap)
options.Points = Options('style', color=Cycle(), size=point_size, cmap=dflt_cmap)
if not config.style_17:
options.Points = Options('plot', show_frame=True)
options.Histogram = Options('style', line_color='black', color=Cycle(), muted_alpha=0.2)
options.ErrorBars = Options('style', color='black')
options.Spread = Options('style', color=Cycle(), alpha=0.6, line_color='black', muted_alpha=0.2)
options.Bars = Options('style', color=Cycle(), line_color='black', bar_width=0.8, muted_alpha=0.2)
options.Spikes = Options('style', color='black', cmap='fire', muted_alpha=0.2)
options.Area = Options('style', color=Cycle(), alpha=1, line_color='black', muted_alpha=0.2)
options.VectorField = Options('style', color='black', muted_alpha=0.2)
# Paths
if not config.style_17:
options.Contours = Options('plot', show_legend=True)
options.Contours = Options('style', color=Cycle(), cmap='viridis')
options.Path = Options('style', color=Cycle(), cmap='viridis')
options.Box = Options('style', color='black')
options.Bounds = Options('style', color='black')
options.Ellipse = Options('style', color='black')
options.Polygons = Options('style', color=Cycle(), line_color='black',
cmap='viridis')
# Rasters
options.Image = Options('style', cmap=dflt_cmap)
options.Raster = Options('style', cmap=dflt_cmap)
options.QuadMesh = Options('style', cmap=dflt_cmap, line_alpha=0)
options.HeatMap = Options('style', cmap='RdYlBu_r', annular_line_alpha=0,
xmarks_line_color="#FFFFFF", xmarks_line_width=3,
ymarks_line_color="#FFFFFF", ymarks_line_width=3)
# Annotations
options.HLine = Options('style', color=Cycle(), line_width=3, alpha=1)
options.VLine = Options('style', color=Cycle(), line_width=3, alpha=1)
options.Arrow = Options('style', arrow_size=10)
options.Labels = Options('style', text_align='center', text_baseline='middle')
# Graphs
options.Graph = Options(
'style', node_size=15, node_color=Cycle(), node_line_color='black',
node_nonselection_fill_color=Cycle(), node_hover_line_color='black',
node_hover_fill_color='limegreen', node_nonselection_alpha=0.2,
edge_nonselection_alpha=0.2, node_nonselection_line_color='black',
edge_color='black', edge_line_width=2, edge_nonselection_line_color='black',
edge_hover_line_color='limegreen'
)
options.TriMesh = Options(
'style', node_size=5, node_line_color='black', node_color='white',
edge_line_color='black', node_hover_fill_color='limegreen',
edge_line_width=1, edge_hover_line_color='limegreen',
edge_nonselection_alpha=0.2, edge_nonselection_line_color='black',
node_nonselection_alpha=0.2,
)
options.TriMesh = Options('plot', tools=[])
options.Chord = Options('style', node_size=15, node_color=Cycle(),
node_line_color='black',
node_selection_fill_color='limegreen',
node_nonselection_fill_color=Cycle(),
node_hover_line_color='black',
node_nonselection_line_color='black',
node_selection_line_color='black',
node_hover_fill_color='limegreen',
node_nonselection_alpha=0.2,
edge_nonselection_alpha=0.1,
edge_line_color='black', edge_line_width=1,
edge_nonselection_line_color='black',
edge_hover_line_color='limegreen',
edge_selection_line_color='limegreen',
label_text_font_size='8pt')
options.Chord = Options('plot', xaxis=None, yaxis=None)
options.Nodes = Options('style', line_color='black', color=Cycle(),
size=20, nonselection_fill_color=Cycle(),
selection_fill_color='limegreen',
hover_fill_color='indianred')
options.Nodes = Options('plot', tools=['hover', 'tap'])
options.EdgePaths = Options('style', color='black', nonselection_alpha=0.2,
line_width=2, selection_color='limegreen',
hover_line_color='indianred')
options.EdgePaths = Options('plot', tools=['hover', 'tap'])
options.Sankey = Options(
'plot', xaxis=None, yaxis=None, inspection_policy='edges',
selection_policy='nodes', width=1000, height=600, show_frame=False
)
options.Sankey = Options(
'style', node_nonselection_alpha=0.2, node_size=10, edge_nonselection_alpha=0.2,
edge_fill_alpha=0.6, label_text_font_size='8pt', cmap='Category20',
node_line_color='black', node_selection_line_color='black', node_hover_alpha=1,
edge_hover_alpha=0.9
)
# Define composite defaults
options.GridMatrix = Options('plot', shared_xaxis=True, shared_yaxis=True,
xaxis=None, yaxis=None)
options.Overlay = Options('style', click_policy='mute')
options.NdOverlay = Options('style', click_policy='mute')
options.Curve = Options('style', muted_alpha=0.2)
options.Path = Options('style', muted_alpha=0.2)
options.Scatter = Options('style', muted_alpha=0.2)
options.Points = Options('style', muted_alpha=0.2)
options.Polygons = Options('style', muted_alpha=0.2)
# Statistics
options.Distribution = Options(
'style', color=Cycle(), line_color='black', fill_alpha=0.5,
muted_alpha=0.2
)
options.Violin = Options(
'style', violin_fill_color='lightgray', violin_line_color='black',
violin_fill_alpha=0.5, stats_color='black', box_color='black',
median_color='white'
)
options.HexTiles = Options('style', muted_alpha=0.2)
| bsd-3-clause |
DreamLiMu/ML_Python | tools/Ch06/EXTRAS/plotSupportVectors.py | 4 | 1405 | '''
Created on Nov 22, 2010
@author: Peter
'''
from numpy import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
xcord0 = []
ycord0 = []
xcord1 = []
ycord1 = []
markers =[]
colors =[]
fr = open('testSet.txt')#this file was generated by 2normalGen.py
for line in fr.readlines():
lineSplit = line.strip().split('\t')
xPt = float(lineSplit[0])
yPt = float(lineSplit[1])
label = int(lineSplit[2])
if (label == -1):
xcord0.append(xPt)
ycord0.append(yPt)
else:
xcord1.append(xPt)
ycord1.append(yPt)
fr.close()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord0,ycord0, marker='s', s=90)
ax.scatter(xcord1,ycord1, marker='o', s=50, c='red')
plt.title('Support Vectors Circled')
circle = Circle((4.6581910000000004, 3.507396), 0.5, facecolor='none', edgecolor=(0,0.8,0.8), linewidth=3, alpha=0.5)
ax.add_patch(circle)
circle = Circle((3.4570959999999999, -0.082215999999999997), 0.5, facecolor='none', edgecolor=(0,0.8,0.8), linewidth=3, alpha=0.5)
ax.add_patch(circle)
circle = Circle((6.0805730000000002, 0.41888599999999998), 0.5, facecolor='none', edgecolor=(0,0.8,0.8), linewidth=3, alpha=0.5)
ax.add_patch(circle)
#plt.plot([2.3,8.5], [-6,6]) #seperating hyperplane
b = -3.75567; w0=0.8065; w1=-0.2761
x = arange(-2.0, 12.0, 0.1)
y = (-w0*x - b)/w1
ax.plot(x,y)
ax.axis([-2,12,-8,6])
plt.show() | gpl-2.0 |
lhogstrom/ThornsInRoses | 18_335/nmf_benchmarks.py | 1 | 8363 | #!/usr/bin/env python
import numpy as np
from lin_nmf import *
import matplotlib.pyplot as plt
import nmf_multiplicative_update as nmflh
from time import time
import os
wkdir = '/Users/hogstrom/Dropbox (Personal)/cources_spring_2015_MIT/18_335_Numeric_Methods/NMF/figures'
n=5
### plot error from matrix of increasing size
a = [ pow(2,i) for i in range(12) ]
meanVec = np.zeros((2,len(a)))
for i,m in enumerate(a):
print m
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
#multiplicative update
(wo,ho) = nmflh.nmf_mu(v,np.random.rand(m,n), np.random.rand(n,m),5,100)
# projection gradient
(w_pg,h_pg) = nmf(v, np.random.rand(m,n), np.random.rand(n,m), 0.001, 10, 10)
#backward-like stability measure
diffMtrx = np.dot(w1,h1) - np.dot(wo,ho)
diffMtrx2 = np.dot(w1,h1) - np.dot(w_pg,h_pg)
# meanVec[0,i] = diffMtrx.mean()
# meanVec[1,i] = diffMtrx2.mean()
meanVec[0,i] = np.linalg.norm(diffMtrx, ord=None) #Frobenius norm
meanVec[1,i] = np.linalg.norm(diffMtrx2, ord=None) #Frobenius norm
#plot changes in error
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='multiplicative_update')
plt.plot(a,meanVec[1,:],'r--',label='gradient_projection')
ax.set_xscale('log')
ax.set_yscale('log')
plt.title('Error in NMF estimation')
plt.xlabel('input matrix length')
plt.ylabel('error norm')
plt.legend(loc=4)
outFile = os.path.join(wkdir, 'projection_gradient_vs_multiplicative_update_error.png')
plt.savefig(outFile)
plt.close()
### convergence with increased iteration
a = [ pow(2,i) for i in range(13) ]
m = 500
n = 5
meanVec = np.zeros((1,len(a)))
for i,j in enumerate(a):
print m
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
#multiplicative update
(wo,ho) = nmflh.nmf_mu(v,np.random.rand(m,n), np.random.rand(n,m),5,j)
# projection gradient
#backward-like stability measure
diffMtrx = np.dot(w1,h1) - np.dot(wo,ho)
meanVec[0,i] = diffMtrx.mean()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='multiplicative_update')
ax.set_xscale('log')
plt.title('NMF entry error')
plt.xlabel('n iterations')
plt.ylabel('mean error of entries')
plt.legend(loc=2)
outFile = os.path.join(wkdir, 'multiplicative_update_iteration_error.png')
plt.savefig(outFile)
plt.close()
### ALS vs multiplicative update - convergence with increased iteration -
a = [ pow(2,i) for i in range(12) ]
m = 500
n = 5
meanVec = np.zeros((2,len(a)))
for i,j in enumerate(a):
print j
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
#multiplicative update
(wo,ho) = nmflh.nmf_mu(v,np.random.rand(m,n), np.random.rand(n,m),5,j)
# ALS method
(w_als,h_als) = nmflh.nmf_ALS(v, np.random.rand(m,n), np.random.rand(n,m),5,j)
#backward-like stability measure
diffMtrx = np.dot(w1,h1) - np.dot(wo,ho)
diffMtrx2 = np.dot(w1,h1) - np.dot(w_als,h_als)
# residual norm
meanVec[0,i] = np.linalg.norm(diffMtrx, ord=None) #Frobenius norm
meanVec[1,i] = np.linalg.norm(diffMtrx2, ord=None) #Frobenius norm
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='multiplicative update')
plt.plot(a,meanVec[1,:],'r--',label='ALS method')
ax.set_xscale('log')
ax.set_yscale('log')
plt.title('Error in NMF estimation')
plt.xlabel('n iteration')
plt.ylabel('norm residual')
plt.legend(loc=7)
outFile = os.path.join(wkdir, 'ALS_vs_multiplicative_update_iteration_error.png')
plt.savefig(outFile)
plt.close()
### time used by matrix size n
a = [ pow(2,i) for i in range(15) ]
meanVec = np.zeros((1,len(a)))
for i,m in enumerate(a):
print m
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
#multiplicative update
initt = time()
(wo,ho) = nmflh.nmf_mu(v,np.random.rand(m,n), np.random.rand(n,m),5,100)
tafter = time() - initt
meanVec[0,i] = tafter
#plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='multiplicative_update')
ax.set_xscale('log')
ax.set_yscale('log')
plt.title('NMF decomposition into 5 components')
plt.xlabel('matrix size n')
plt.ylabel('seconds')
plt.legend(loc=2)
outFile = os.path.join(wkdir, 'multiplicative_update_time.png')
plt.savefig(outFile)
plt.close()
### ACLS - sparcity constraints - ERROR
a = [ pow(2,i) for i in range(11) ]
meanVec = np.zeros((2,len(a)))
for i,j in enumerate(a):
print j
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
# ALS method
Winit = np.random.rand(m,n)
Hinit =np.random.rand(n,m)
lw = 2
lh = 2
(w_acls,h_acls) = nmflh.nmf_ACLS(v, Winit, Hinit,lh,lw,5,j)
# ACLS method
(w_als,h_als) = nmflh.nmf_ALS(v, Winit, Hinit,5,j)
#backward-like stability measure
diffMtrx = np.dot(w1,h1) - np.dot(w_als,h_als)
diffMtrx2 = np.dot(w1,h1) - np.dot(w_acls,h_acls)
# residual norm
meanVec[0,i] = np.linalg.norm(diffMtrx, ord=None) #Frobenius norm
meanVec[1,i] = np.linalg.norm(diffMtrx2, ord=None) #Frobenius norm
# plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='ALS method')
plt.plot(a,meanVec[1,:],'r--',label='ACLS method')
ax.set_xscale('log')
ax.set_yscale('log')
plt.title('NMF iteration')
plt.xlabel('n iteration')
plt.ylabel('norm residual')
plt.legend(loc=7)
outFile = os.path.join(wkdir, 'ALS_vs_ACLS_error.png')
plt.savefig(outFile)
plt.close()
#sparcity measure
def percent_nearzero(M,thresh):
n_entries = M.shape[0]*M.shape[1]
above_nearzero = M > thresh
pnz = above_nearzero.sum()/ np.float(n_entries) #percent_nearzero
return pnz
### ACLS - sparsity constraints - Sparsity
a = [ pow(2,i) for i in range(15) ]
a = np.array(a)/100.0
meanVec = np.zeros((2,len(a)))
for i,lw in enumerate(a):
print lw
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
# ALS method
Winit = np.random.rand(m,n)
Hinit =np.random.rand(n,m)
lh = lw
j = 10 #n_iter
# ACLS method
(w_acls,h_acls) = nmflh.nmf_ACLS(v, Winit, Hinit,lh,lw,5,j)
# AHCLS method
# (w_acls,h_acls) = nmflh.nmf_AHCLS(v, Winit, Hinit,lh,lw,.5,.5,5,j)
# ALS method
(w_als,h_als) = nmflh.nmf_ALS(v, Winit, Hinit,5,j)
#backward-like stability measure
near_zero_thresh = .0001
# percent_nearzero(h_als,near_zero_thresh)
# percent_nearzero(h_acls,near_zero_thresh)
# residual norm
meanVec[0,i] = percent_nearzero(w_als,near_zero_thresh)
meanVec[1,i] = percent_nearzero(w_acls,near_zero_thresh)
# plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='ALS method')
plt.plot(a,meanVec[1,:],'r--',label='ACLS method')
ax.set_xscale('log')
plt.title('Sparsity of W matrix')
plt.xlabel('lambda')
plt.ylabel('fraction of nearzero entries')
plt.legend(loc=7)
outFile = os.path.join(wkdir, 'ALS_vs_ACLS_sparsity.pdf')
plt.savefig(outFile,format='pdf')
plt.close()
### ACLS - multiplicative update vs gradient - ERROR
a = [ pow(2,i) for i in range(1,11) ]
meanVec = np.zeros((2,len(a)))
for i,j in enumerate(a):
print j
w1 = np.random.rand(m,n)
h1 = np.random.rand(n,m)
v = np.dot(w1,h1)
#nmf decomposition of v:
# ALS method
Winit = np.random.rand(m,n)
Hinit =np.random.rand(n,m)
(wo,ho) = nmflh.nmf_mu(v,Winit, Hinit,5,j)
# projection gradient
(w_pg,h_pg) = nmf(v, Winit, Hinit, 0.000001, 10, j)
#backward-like stability measure
diffMtrx = np.dot(w1,h1) - np.dot(wo,ho)
diffMtrx2 = np.dot(w1,h1) - np.dot(w_pg,h_pg)
# residual norm
meanVec[0,i] = np.linalg.norm(diffMtrx, ord=None) #Frobenius norm
meanVec[1,i] = np.linalg.norm(diffMtrx2, ord=None) #Frobenius norm
# plot
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.plot(a,meanVec[0,:],color='b',label='MU method')
plt.plot(a,meanVec[1,:],'r--',label='PG method')
ax.set_xscale('log')
ax.set_yscale('log')
plt.title('Error in NMF estimation')
plt.xlabel('n iteration')
plt.ylabel('norm residual')
plt.legend(loc=1)
outFile = os.path.join(wkdir, 'MU_vs_PG_error.png')
plt.savefig(outFile)
plt.close()
| mit |
icdishb/scikit-learn | sklearn/utils/tests/test_multiclass.py | 14 | 15416 | from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from functools import partial
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_label_indicator_matrix
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import is_sequence_of_sequences
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multilabel-sequences': [
[[0, 1]],
[[0], [1]],
[[1, 2, 3]],
[[1, 2, 1]], # duplicate values, why not?
[[1], [2], [0, 1]],
[[1], [2]],
[[]],
[()],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object')),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
# not currently supported sequence of sequences
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabels
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[(0, 1, 2), (0,), tuple(), (2, 1)]),
np.arange(3))
assert_array_equal(assert_warns(DeprecationWarning,
unique_labels,
[[0, 1, 2], [0], list(), [2, 1]]),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
# Some tests with strings input
assert_array_equal(unique_labels(["a", "b", "c"], ["d"]),
["a", "b", "c", "d"])
assert_array_equal(assert_warns(DeprecationWarning, unique_labels,
[["a", "b"], ["c"]], [["d"]]),
["a", "b", "c", "d"])
@ignore_warnings
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-sequences",
"multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
@ignore_warnings
def test_unique_labels_mixed_types():
# Mix of multilabel-indicator and multilabel-sequences
mix_multilabel_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multilabel-sequences"])
for y_multilabel, y_multiclass in mix_multilabel_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"] +
EXAMPLES["multilabel-sequences"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
# Mix string and number input type
assert_raises(ValueError, unique_labels, [[1, 2], [3]],
[["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [3]])
assert_array_equal(unique_labels([(2,), (0, 2,)], [(), ()]), [0, 2])
assert_array_equal(unique_labels([("2",), ("0", "2",)], [(), ()]),
["0", "2"])
@ignore_warnings
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group.startswith('multilabel'):
assert_, exp = assert_true, 'True'
else:
assert_, exp = assert_false, 'False'
for example in group_examples:
assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s' % (example, exp))
def test_is_label_indicator_matrix():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_label_indicator_matrix(exmpl_sparse),
msg=('is_label_indicator_matrix(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_label_indicator_matrix(example),
msg='is_label_indicator_matrix(%r) should be %s'
% (example, dense_exp))
def test_is_sequence_of_sequences():
for group, group_examples in iteritems(EXAMPLES):
if group == 'multilabel-sequences':
assert_, exp = assert_true, 'True'
check = partial(assert_warns, DeprecationWarning,
is_sequence_of_sequences)
else:
assert_, exp = assert_false, 'False'
check = is_sequence_of_sequences
for example in group_examples:
assert_(check(example),
msg='is_sequence_of_sequences(%r) should be %s'
% (example, exp))
@ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg='type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example)))
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
alexeyum/scikit-learn | setup.py | 25 | 11732 | #! /usr/bin/env python
#
# Copyright (C) 2007-2009 Cournapeau David <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# License: 3-clause BSD
import subprocess
descr = """A set of python modules for machine learning and data mining"""
import sys
import os
import shutil
from distutils.command.clean import clean as Clean
from pkg_resources import parse_version
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
# This is a bit (!) hackish: we are setting a global variable so that the main
# sklearn __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet:
# the numpy distutils extensions that are used by scikit-learn to recursively
# build the compiled extensions in sub-packages is based on the Python import
# machinery.
builtins.__SKLEARN_SETUP__ = True
DISTNAME = 'scikit-learn'
DESCRIPTION = 'A set of python modules for machine learning and data mining'
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
MAINTAINER = 'Andreas Mueller'
MAINTAINER_EMAIL = '[email protected]'
URL = 'http://scikit-learn.org'
LICENSE = 'new BSD'
DOWNLOAD_URL = 'http://sourceforge.net/projects/scikit-learn/files/'
# We can actually import a restricted version of sklearn that
# does not need the compiled code
import sklearn
VERSION = sklearn.__version__
# Optional setuptools features
# We need to import setuptools early, if we want setuptools features,
# as it monkey-patches the 'setup' function
# For some commands, use setuptools
SETUPTOOLS_COMMANDS = set([
'develop', 'release', 'bdist_egg', 'bdist_rpm',
'bdist_wininst', 'install_egg_info', 'build_sphinx',
'egg_info', 'easy_install', 'upload', 'bdist_wheel',
'--single-version-externally-managed',
])
if SETUPTOOLS_COMMANDS.intersection(sys.argv):
import setuptools
extra_setuptools_args = dict(
zip_safe=False, # the package can run out of an .egg file
include_package_data=True,
)
else:
extra_setuptools_args = dict()
# Custom clean command to remove build artifacts
class CleanCommand(Clean):
description = "Remove build artifacts from the source tree"
def run(self):
Clean.run(self)
# Remove c files if we are not within a sdist package
cwd = os.path.abspath(os.path.dirname(__file__))
remove_c_files = not os.path.exists(os.path.join(cwd, 'PKG-INFO'))
if remove_c_files:
cython_hash_file = os.path.join(cwd, 'cythonize.dat')
if os.path.exists(cython_hash_file):
os.unlink(cython_hash_file)
print('Will remove generated .c files')
if os.path.exists('build'):
shutil.rmtree('build')
for dirpath, dirnames, filenames in os.walk('sklearn'):
for filename in filenames:
if any(filename.endswith(suffix) for suffix in
(".so", ".pyd", ".dll", ".pyc")):
os.unlink(os.path.join(dirpath, filename))
continue
extension = os.path.splitext(filename)[1]
if remove_c_files and extension in ['.c', '.cpp']:
pyx_file = str.replace(filename, extension, '.pyx')
if os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(dirpath, dirname))
cmdclass = {'clean': CleanCommand}
# Optional wheelhouse-uploader features
# To automate release of binary packages for scikit-learn we need a tool
# to download the packages generated by travis and appveyor workers (with
# version number matching the current release) and upload them all at once
# to PyPI at release time.
# The URL of the artifact repositories are configured in the setup.cfg file.
WHEELHOUSE_UPLOADER_COMMANDS = set(['fetch_artifacts', 'upload_all'])
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
def configuration(parent_package='', top_path=None):
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
# Avoid non-useful msg:
# "Ignoring attempt to set 'name' (from ... "
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('sklearn')
return config
scipy_min_version = '0.9'
numpy_min_version = '1.6.1'
def get_scipy_status():
"""
Returns a dictionary containing a boolean specifying whether SciPy
is up-to-date, along with the version string (empty string if
not installed).
"""
scipy_status = {}
try:
import scipy
scipy_version = scipy.__version__
scipy_status['up_to_date'] = parse_version(
scipy_version) >= parse_version(scipy_min_version)
scipy_status['version'] = scipy_version
except ImportError:
scipy_status['up_to_date'] = False
scipy_status['version'] = ""
return scipy_status
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(
numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable, os.path.join(cwd,
'build_tools',
'cythonize.py'),
'sklearn'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def setup_package():
metadata = dict(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
cmdclass=cmdclass,
**extra_setuptools_args)
if len(sys.argv) == 1 or (
len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info',
'--version',
'clean'))):
# For these actions, NumPy is not required, nor Cythonization
#
# They are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
metadata['version'] = VERSION
else:
numpy_status = get_numpy_status()
numpy_req_str = "scikit-learn requires NumPy >= {0}.\n".format(
numpy_min_version)
scipy_status = get_scipy_status()
scipy_req_str = "scikit-learn requires SciPy >= {0}.\n".format(
scipy_min_version)
instructions = ("Installation instructions are available on the "
"scikit-learn website: "
"http://scikit-learn.org/stable/install.html\n")
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of Numerical Python "
"(NumPy) {0} is out-of-date.\n{1}{2}"
.format(numpy_status['version'],
numpy_req_str, instructions))
else:
raise ImportError("Numerical Python (NumPy) is not "
"installed.\n{0}{1}"
.format(numpy_req_str, instructions))
if scipy_status['up_to_date'] is False:
if scipy_status['version']:
raise ImportError("Your installation of Scientific Python "
"(SciPy) {0} is out-of-date.\n{1}{2}"
.format(scipy_status['version'],
scipy_req_str, instructions))
else:
raise ImportError("Scientific Python (SciPy) is not "
"installed.\n{0}{1}"
.format(scipy_req_str, instructions))
from numpy.distutils.core import setup
metadata['configuration'] = configuration
if len(sys.argv) >= 2 and sys.argv[1] not in 'config':
# Cythonize if needed
print('Generating cython files')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
# Clean left-over .so file
for dirpath, dirnames, filenames in os.walk(
os.path.join(cwd, 'sklearn')):
for filename in filenames:
extension = os.path.splitext(filename)[1]
if extension in (".so", ".pyd", ".dll"):
pyx_file = str.replace(filename, extension, '.pyx')
print(pyx_file)
if not os.path.exists(os.path.join(dirpath, pyx_file)):
os.unlink(os.path.join(dirpath, filename))
setup(**metadata)
if __name__ == "__main__":
setup_package()
| bsd-3-clause |
OshynSong/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 311 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
4tikhonov/eurogis | maps/usecases/dataapi.py | 3 | 12390 | {
"metadata": {
"name": "",
"signature": "sha256:9509cd5ecf7e3d91bfc019d1efa4a828e592bd352e0a16093905deb34d5021cf"
},
"nbformat": 3,
"nbformat_minor": 0,
"worksheets": [
{
"cells": [
{
"cell_type": "code",
"collapsed": false,
"input": [
"%matplotlib inline\n",
"import urllib2\n",
"import simplejson\n",
"import json\n",
"import sys\n",
"import pandas as pd\n",
"import random\n",
"import vincent\n",
"\n",
"# Global\n",
"apiurl = \"http://node-128.dev.socialhistoryservices.org/api/data\"\n",
"amscodecolumn = 'amsterdam_code'\n",
"yearcolumn = 'year'\n",
"\n",
"def load_api_data(apiurl, code, year):\n",
" amscode = str(code)\n",
" jsondataurl = apiurl + \"?code=\" + str(code)\n",
" \n",
" req = urllib2.Request(jsondataurl)\n",
" opener = urllib2.build_opener()\n",
" f = opener.open(req)\n",
" dataframe = simplejson.load(f)\n",
" return dataframe\n",
"\n",
"def data2frame(dataframe):\n",
" data = dataframe['data']\n",
" years = {}\n",
" debug = 0\n",
" datavalues = {}\n",
" \n",
" for item in data:\n",
" amscode = item[amscodecolumn]\n",
" year = item[yearcolumn]\n",
" datavalues[year] = item\n",
" if debug:\n",
" print str(amscode) + ' ' + str(year)\n",
" print item\n",
" \n",
" for year in datavalues: \n",
" values = datavalues[year]\n",
" for name in values:\n",
" if debug:\n",
" print name + ' ' + str(values[name])\n",
" return datavalues\n",
" \n",
"varcode = \"TXCU\"\n",
"varyear = \"1997\"\n",
"data = load_api_data(apiurl, varcode, varyear)\n",
"# 'indicator': 'TK', 'code': 'TXCU', 'naam': 'ADORP', 'amsterdam_code': '10996', 'value': 89.0, 'year': 1937, 'id': 1, 'cbsnr': '1'\n",
"# Create DataFrame object pf and load data \n",
"df = pd.DataFrame(data['data'])\n",
"#print df['indicator']\n",
"\n",
"# Exploring dataset\n",
"print df.head()\n",
"newframe = df[['year', 'amsterdam_code', 'naam', 'value']]"
],
"language": "python",
"metadata": {},
"outputs": [
{
"output_type": "stream",
"stream": "stdout",
"text": [
" amsterdam_code cbsnr code id indicator naam value year\n",
"0 10996 1 TXCU 1 TK ADORP 89 1937\n",
"1 10999 2 TXCU 209 TK ADUARD 49 1937\n",
"2 10886 3 TXCU 426 TK APPINGEDAM 315 1937\n",
"3 10539 4 TXCU 660 TK BAFLO 260 1937\n",
"4 10425 5 TXCU 877 TK BEDUM 263 1937\n",
"\n",
"[5 rows x 8 columns]\n"
]
}
],
"prompt_number": 137
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now let's calculate total values for each city and show 10 cities"
]
},
{
"cell_type": "code",
"collapsed": false,
"input": [
"newframe = df[['naam', 'value']][:20]\n",
"print newframe"
],
"language": "python",
"metadata": {},
"outputs": [
{
"output_type": "stream",
"stream": "stdout",
"text": [
" naam value\n",
"0 ADORP 89\n",
"1 ADUARD 49\n",
"2 APPINGEDAM 315\n",
"3 BAFLO 260\n",
"4 BEDUM 263\n",
"5 BEERTA 57\n",
"6 BIERUM 420\n",
"7 TEN BOER 247\n",
"8 DELFZIJL 288\n",
"9 EENRUM 128\n",
"10 EZINGE 42\n",
"11 FINSTERWOLDE 54\n",
"12 GRONINGEN 2331\n",
"13 GROOTEGAST 389\n",
"14 GRIJPSKERK 73\n",
"15 HAREN GR 327\n",
"16 KANTENS 189\n",
"17 KLOOSTERBUREN 78\n",
"18 LEEK 375\n",
"19 LEENS 195\n",
"\n",
"[20 rows x 2 columns]\n"
]
}
],
"prompt_number": 138
},
{
"cell_type": "code",
"collapsed": false,
"input": [
"values = newframe['value'][:20]\n",
"names = newframe['naam'][:20]\n",
"list_data = []\n",
"for value in values:\n",
" list_data.append(value)\n",
"\n",
"bar = vincent.Bar(list_data)\n",
"print list_data\n",
"vincent.core.initialize_notebook()\n",
"\n",
"bar.axis_titles(x='CityID', y='Value')\n",
"bar.display()"
],
"language": "python",
"metadata": {},
"outputs": [
{
"output_type": "stream",
"stream": "stdout",
"text": [
"[89.0, 49.0, 315.0, 260.0, 263.0, 57.0, 420.0, 247.0, 288.0, 128.0, 42.0, 54.0, 2331.0, 389.0, 73.0, 327.0, 189.0, 78.0, 375.0, 195.0]\n"
]
},
{
"html": [
"\n",
" <script>\n",
" \n",
" function vct_load_lib(url, callback){\n",
" if(typeof d3 !== 'undefined' &&\n",
" url === 'http://d3js.org/d3.v3.min.js'){\n",
" callback()\n",
" }\n",
" var s = document.createElement('script');\n",
" s.src = url;\n",
" s.async = true;\n",
" s.onreadystatechange = s.onload = callback;\n",
" s.onerror = function(){\n",
" console.warn(\"failed to load library \" + url);\n",
" };\n",
" document.getElementsByTagName(\"head\")[0].appendChild(s);\n",
" };\n",
" var vincent_event = new CustomEvent(\n",
" \"vincent_libs_loaded\",\n",
" {bubbles: true, cancelable: true}\n",
" );\n",
" \n",
" function load_all_libs(){\n",
" console.log('Loading Vincent libs...')\n",
" vct_load_lib('http://d3js.org/d3.v3.min.js', function(){\n",
" vct_load_lib('http://d3js.org/d3.geo.projection.v0.min.js', function(){\n",
" vct_load_lib('http://wrobstory.github.io/d3-cloud/d3.layout.cloud.js', function(){\n",
" vct_load_lib('http://wrobstory.github.io/vega/vega.v1.3.3.js', function(){\n",
" window.dispatchEvent(vincent_event);\n",
" });\n",
" });\n",
" });\n",
" });\n",
" };\n",
" if(typeof define === \"function\" && define.amd){\n",
" if (window['d3'] === undefined ||\n",
" window['topojson'] === undefined){\n",
" require.config(\n",
" {paths: {\n",
" d3: 'http://d3js.org/d3.v3.min',\n",
" topojson: 'http://d3js.org/topojson.v1.min'\n",
" }\n",
" }\n",
" );\n",
" require([\"d3\"], function(d3){\n",
" console.log('Loading Vincent from require.js...')\n",
" window.d3 = d3;\n",
" require([\"topojson\"], function(topojson){\n",
" window.topojson = topojson;\n",
" load_all_libs();\n",
" });\n",
" });\n",
" } else {\n",
" load_all_libs();\n",
" };\n",
" }else{\n",
" console.log('Require.js not found, loading manually...')\n",
" load_all_libs();\n",
" };\n",
"\n",
" </script>"
],
"metadata": {},
"output_type": "display_data",
"text": [
"<IPython.core.display.HTML at 0xaf76672c>"
]
},
{
"html": [
"<div id=\"vis6875aef783df4a92bc278bf3d63896d9\"></div>\n",
"<script>\n",
" ( function() {\n",
" var _do_plot = function() {\n",
" if (typeof vg === 'undefined') {\n",
" window.addEventListener('vincent_libs_loaded', _do_plot)\n",
" return;\n",
" }\n",
" vg.parse.spec({\"axes\": [{\"scale\": \"x\", \"title\": \"CityID\", \"type\": \"x\"}, {\"scale\": \"y\", \"title\": \"Value\", \"type\": \"y\"}], \"data\": [{\"name\": \"table\", \"values\": [{\"col\": \"data\", \"idx\": 0, \"val\": 89.0}, {\"col\": \"data\", \"idx\": 1, \"val\": 49.0}, {\"col\": \"data\", \"idx\": 2, \"val\": 315.0}, {\"col\": \"data\", \"idx\": 3, \"val\": 260.0}, {\"col\": \"data\", \"idx\": 4, \"val\": 263.0}, {\"col\": \"data\", \"idx\": 5, \"val\": 57.0}, {\"col\": \"data\", \"idx\": 6, \"val\": 420.0}, {\"col\": \"data\", \"idx\": 7, \"val\": 247.0}, {\"col\": \"data\", \"idx\": 8, \"val\": 288.0}, {\"col\": \"data\", \"idx\": 9, \"val\": 128.0}, {\"col\": \"data\", \"idx\": 10, \"val\": 42.0}, {\"col\": \"data\", \"idx\": 11, \"val\": 54.0}, {\"col\": \"data\", \"idx\": 12, \"val\": 2331.0}, {\"col\": \"data\", \"idx\": 13, \"val\": 389.0}, {\"col\": \"data\", \"idx\": 14, \"val\": 73.0}, {\"col\": \"data\", \"idx\": 15, \"val\": 327.0}, {\"col\": \"data\", \"idx\": 16, \"val\": 189.0}, {\"col\": \"data\", \"idx\": 17, \"val\": 78.0}, {\"col\": \"data\", \"idx\": 18, \"val\": 375.0}, {\"col\": \"data\", \"idx\": 19, \"val\": 195.0}]}, {\"name\": \"stats\", \"source\": \"table\", \"transform\": [{\"keys\": [\"data.idx\"], \"type\": \"facet\"}, {\"type\": \"stats\", \"value\": \"data.val\"}]}], \"height\": 500, \"legends\": [], \"marks\": [{\"from\": {\"data\": \"table\", \"transform\": [{\"keys\": [\"data.col\"], \"type\": \"facet\"}, {\"height\": \"data.val\", \"point\": \"data.idx\", \"type\": \"stack\"}]}, \"marks\": [{\"properties\": {\"enter\": {\"fill\": {\"field\": \"data.col\", \"scale\": \"color\"}, \"width\": {\"band\": true, \"offset\": -1, \"scale\": \"x\"}, \"x\": {\"field\": \"data.idx\", \"scale\": \"x\"}, \"y\": {\"field\": \"y\", \"scale\": \"y\"}, \"y2\": {\"field\": \"y2\", \"scale\": \"y\"}}}, \"type\": \"rect\"}], \"type\": \"group\"}], \"padding\": \"auto\", \"scales\": [{\"domain\": {\"data\": \"table\", \"field\": \"data.idx\"}, \"name\": \"x\", \"range\": \"width\", \"type\": \"ordinal\", \"zero\": false}, {\"domain\": {\"data\": \"stats\", \"field\": \"sum\"}, \"name\": \"y\", \"nice\": true, \"range\": \"height\"}, {\"domain\": {\"data\": \"table\", \"field\": \"data.col\"}, \"name\": \"color\", \"range\": \"category20\", \"type\": \"ordinal\"}], \"width\": 960}, function(chart) {\n",
" chart({el: \"#vis6875aef783df4a92bc278bf3d63896d9\"}).update();\n",
" });\n",
" };\n",
" _do_plot();\n",
" })();\n",
"</script>\n",
"<style>.vega canvas {width: 100%;}</style>\n",
" "
],
"metadata": {},
"output_type": "display_data",
"text": [
"<IPython.core.display.HTML at 0xadc5a28c>"
]
}
],
"prompt_number": 142
},
{
"cell_type": "code",
"collapsed": false,
"input": [],
"language": "python",
"metadata": {},
"outputs": [],
"prompt_number": 139
}
],
"metadata": {}
}
]
} | gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/matplotlib/widgets.py | 2 | 50989 | """
GUI Neutral widgets
===================
Widgets that are designed to work for any of the GUI backends.
All of these widgets require you to predefine an :class:`matplotlib.axes.Axes`
instance and pass that as the first arg. matplotlib doesn't try to
be too smart with respect to layout -- you will have to figure out how
wide and tall you want your Axes to be to accommodate your widget.
"""
from __future__ import print_function
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
from matplotlib import MatplotlibDeprecationWarning as mplDeprecation
class LockDraw:
"""
Some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstances, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use ``matplotlib.widgets.lock(someobj)`` to pr
"""
# FIXME: This docstring ends abruptly without...
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for *o*'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to *o*'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'Return True if *o* owns this lock'
return self._owner is o
def locked(self):
'Return True if the lock is currently held by an owner'
return self._owner is not None
class Widget(object):
"""
Abstract base class for GUI neutral widgets
"""
drawon = True
eventson = True
class AxesWidget(Widget):
"""Widget that is connected to a single :class:`~matplotlib.axes.Axes`.
Attributes:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget
*canvas* : :class:`~matplotlib.backend_bases.FigureCanvasBase` subclass
The parent figure canvas for the widget.
*active* : bool
If False, the widget does not respond to events.
"""
def __init__(self, ax):
self.ax = ax
self.canvas = ax.figure.canvas
self.cids = []
self.active = True
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
class Button(AxesWidget):
"""
A GUI neutral button
The following attributes are accessible
*ax*
The :class:`matplotlib.axes.Axes` the button renders into.
*label*
A :class:`matplotlib.text.Text` instance.
*color*
The color of the button when not hovering.
*hovercolor*
The color of the button when hovering.
Call :meth:`on_clicked` to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
*ax*
The :class:`matplotlib.axes.Axes` instance the button
will be placed into.
*label*
The button text. Accepts string.
*image*
The image to place in the button, if not *None*.
Can be any legal arg to imshow (numpy array,
matplotlib Image instance, or PIL image).
*color*
The color of the button when not activated
*hovercolor*
The color of the button when the mouse is over it
"""
AxesWidget.__init__(self, ax)
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.connect_event('button_press_event', self._click)
self.connect_event('button_release_event', self._release)
self.connect_event('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if self.ignore(event):
return
if event.inaxes != self.ax:
return
if not self.eventson:
return
if event.canvas.mouse_grabber != self.ax:
event.canvas.grab_mouse(self.ax)
def _release(self, event):
if self.ignore(event):
return
if event.canvas.mouse_grabber != self.ax:
return
event.canvas.release_mouse(self.ax)
if not self.eventson:
return
if event.inaxes != self.ax:
return
for cid, func in self.observers.iteritems():
func(event)
def _motion(self, event):
if self.ignore(event):
return
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this *func* with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id *cid*'
try: del self.observers[cid]
except KeyError: pass
class Slider(AxesWidget):
"""
A slider representing a floating point range
The following attributes are defined
*ax* : the slider :class:`matplotlib.axes.Axes` instance
*val* : the current slider value
*vline* : a :class:`matplotlib.lines.Line2D` instance
representing the initial value of the slider
*poly* : A :class:`matplotlib.patches.Polygon` instance
which is the slider knob
*valfmt* : the format string for formatting the slider text
*label* : a :class:`matplotlib.text.Text` instance
for the slider label
*closedmin* : whether the slider is closed on the minimum
*closedmax* : whether the slider is closed on the maximum
*slidermin* : another slider - if not *None*, this slider must be
greater than *slidermin*
*slidermax* : another slider - if not *None*, this slider must be
less than *slidermax*
*dragging* : allow for mouse dragging on slider
Call :meth:`on_changed` to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from *valmin* to *valmax* in axes *ax*
*valinit*
The slider initial position
*label*
The slider label
*valfmt*
Used to format the slider value
*closedmin* and *closedmax*
Indicate whether the slider interval is closed
*slidermin* and *slidermax*
Used to constrain the value of this slider to the values
of other sliders.
additional kwargs are passed on to ``self.poly`` which is the
:class:`matplotlib.patches.Rectangle` which draws the slider
knob. See the :class:`matplotlib.patches.Rectangle` documentation
valid property names (e.g., *facecolor*, *edgecolor*, *alpha*, ...)
"""
AxesWidget.__init__(self, ax)
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
self.connect_event('button_press_event', self._update)
self.connect_event('button_release_event', self._update)
if dragging:
self.connect_event('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
self.drag_active = False
def _update(self, event):
'update the slider position'
if self.ignore(event):
return
if event.button != 1:
return
if event.name == 'button_press_event' and event.inaxes == self.ax:
self.drag_active = True
event.canvas.grab_mouse(self.ax)
if not self.drag_active:
return
elif ((event.name == 'button_release_event')
or (event.name == 'button_press_event' and event.inaxes != self.ax)):
self.drag_active = False
event.canvas.release_mouse(self.ax)
return
val = event.xdata
if val <= self.valmin:
if not self.closedmin:
return
val = self.valmin
elif val >= self.valmax:
if not self.closedmax:
return
val = self.valmax
if self.slidermin is not None and val <= self.slidermin.val:
if not self.closedmin:
return
val = self.slidermin.val
if self.slidermax is not None and val >= self.slidermax.val:
if not self.closedmax:
return
val = self.slidermax.val
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.iteritems():
func(val)
def on_changed(self, func):
"""
When the slider value is changed, call *func* with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id *cid*'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(AxesWidget):
"""
A GUI neutral radio button
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are
located in
*labels*
List of :class:`matplotlib.text.Text` instances
*lines*
List of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have ``set_visible(False)``
when its box is not checked.
*rectangles*
List of :class:`matplotlib.patches.Rectangle` instances
Connect to the CheckButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*actives*
A len(buttons) list of booleans indicating whether
the button is active
"""
AxesWidget.__init__(self, ax)
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.iteritems():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id *cid*'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(AxesWidget):
"""
A GUI neutral radio button
The following attributes are exposed
*ax*
The :class:`matplotlib.axes.Axes` instance the buttons are in
*activecolor*
The color of the button when clicked
*labels*
A list of :class:`matplotlib.text.Text` instances
*circles*
A list of :class:`matplotlib.patches.Circle` instances
Connect to the RadioButtons with the :meth:`on_clicked` method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to :class:`matplotlib.axes.Axes` instance *ax*
*labels*
A len(buttons) list of labels as strings
*active*
The index into labels for the button that is active
*activecolor*
The color of the button when clicked
"""
AxesWidget.__init__(self, ax)
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
self.connect_event('button_press_event', self._clicked)
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if self.ignore(event):
return
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.iteritems():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call *func* with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id *cid*'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of a :class:`matplotlib.figure.Figure`
"""
def __init__(self, targetfig, toolfig):
"""
*targetfig*
The figure instance to adjust
*toolfig*
The figure instance to embed the subplot tool into. If
None, a default figure will be created. If you are using
this from the GUI
"""
# FIXME: The docstring seems to just abruptly end without...
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor(AxesWidget):
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
*horizOn*
Controls the visibility of the horizontal line
*vertOn*
Controls the visibility of the horizontal line
and the visibility of the cursor itself with the *visible* attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to *ax*. If ``useblit=True``, use the backend-
dependent blitting features for faster updates (GTKAgg
only for now). *lineprops* is a dictionary of line properties.
.. plot :: mpl_examples/widgets/cursor.py
"""
# TODO: Is the GTKAgg limitation still true?
AxesWidget.__init__(self, ax)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
if useblit:
lineprops['animated'] = True
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if self.ignore(event):
return
if not self.canvas.widgetlock.available(self):
return
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor(Widget):
"""
Provide a vertical line cursor shared between multiple axes
Example usage::
from matplotlib.widgets import MultiCursor
from pylab import figure, show, np
t = np.arange(0.0, 2.0, 0.01)
s1 = np.sin(2*np.pi*t)
s2 = np.sin(4*np.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
if useblit:
lineprops['animated'] = True
self.lines = [ax.axvline(xmid, visible=False, **lineprops)
for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(
self.canvas.figure.bbox)
for line in self.lines:
line.set_visible(False)
def onmove(self, event):
if event.inaxes is None:
return
if not self.canvas.widgetlock.available(self):
return
self.needclear = True
if not self.visible:
return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector(AxesWidget):
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
*onmove_callback* is an optional callback that is called on mouse
move within the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False,
rectprops=None, onmove_callback=None):
"""
Create a span selector in *ax*. When a selection is made, clear
the span and call *onselect* with::
onselect(vmin, vmax)
and clear the span.
*direction* must be 'horizontal' or 'vertical'
If *minspan* is not *None*, ignore events smaller than *minspan*
The span rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', alpha=0.5)
Set the visible attribute to *False* if you want to turn off
the functionality of the span selector
"""
AxesWidget.__init__(self, ax)
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.visible = True
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
# Reset canvas so that `new_axes` connects events.
self.canvas = None
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
self.disconnect_events()
self.canvas = ax.figure.canvas
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
# If you add a call to `ignore` here, you'll want to check edge case:
# `release` can call a draw event even when `ignore` is True.
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return *True* if *event* should be ignored'
widget_off = not self.visible or not self.active
non_event = event.inaxes!=self.ax or event.button !=1
return widget_off or non_event
def press(self, event):
'on button press event'
if self.ignore(event):
return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.ignore(event) and not self.buttonDown:
return
if self.pressv is None:
return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
"""
Draw using newfangled blit or oldfangled draw depending
on *useblit*
"""
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event):
return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', mplDeprecation)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector(AxesWidget):
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data',
button=None):
"""
Create a selector in *ax*. When a selection is made, clear
the span and call onselect with::
onselect(pos_1, pos_2)
and clear the drawn box/line. The ``pos_1`` and ``pos_2`` are
arrays of length 2 containing the x- and y-coordinate.
If *minspanx* is not *None* then events smaller than *minspanx*
in x direction are ignored (it's the same for y).
The rectangle is drawn with *rectprops*; default::
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with *lineprops*; default::
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use *drawtype* if you want the mouse to draw a line,
a box or nothing between click and actual position by setting
``drawtype = 'line'``, ``drawtype='box'`` or ``drawtype = 'none'``.
*spancoords* is one of 'data' or 'pixels'. If 'data', *minspanx*
and *minspanx* will be interpreted in the same coordinates as
the x and y axis. If 'pixels', they are in pixels.
*button* is a list of integers indicating which mouse buttons should
be used for rectangle selection. You can also specify a single
integer if only a single button is desired. Default is *None*,
which does not limit which button can be used.
Note, typically:
1 = left mouse button
2 = center mouse button (scroll wheel)
3 = right mouse button
"""
AxesWidget.__init__(self, ax)
self.visible = True
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('button_press_event', self.press)
self.connect_event('button_release_event', self.release)
self.connect_event('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
if button is None or isinstance(button, list):
self.validButtons = button
elif isinstance(button, int):
self.validButtons = [button]
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return *True* if *event* should be ignored'
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# Only do rectangle selection if event was triggered
# with a desired button
if self.validButtons is not None:
if not event.button in self.validButtons:
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same. If event is out of axis, limit the data coordinates to axes
# boundaries.
if event.button == self.eventpress.button and event.inaxes != self.ax:
(xdata, ydata) = self.ax.transData.inverted().transform_point((event.x, event.y))
x0, x1 = self.ax.get_xbound()
y0, y1 = self.ax.get_ybound()
xdata = max(x0, xdata)
xdata = min(x1, xdata)
ydata = max(y0, ydata)
ydata = min(y1, ydata)
event.xdata = xdata
event.ydata = ydata
return False
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
if self.ignore(event):
return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event):
return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
# TODO: Why is there triple-quoted items, and two separate checks.
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawn distance (if it exists) is
return # not too small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawn distance (if it exists) is
return # not too small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event):
return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
"""
Use this to activate / deactivate the RectangleSelector
from your program with an boolean parameter *active*.
"""
self.active = active
def get_active(self):
""" Get status of active mode (boolean variable)"""
return self.active
class LassoSelector(AxesWidget):
"""Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select
data points from an image.
In contrast to :class:`Lasso`, `LassoSelector` is written with an interface
similar to :class:`RectangleSelector` and :class:`SpanSelector` and will
continue to interact with the axes until disconnected.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*onselect* : function
Whenever the lasso is released, the `onselect` function is called and
passed the vertices of the selected path.
Example usage::
ax = subplot(111)
ax.plot(x,y)
def onselect(verts):
print verts
lasso = LassoSelector(ax, onselect)
"""
def __init__(self, ax, onselect=None, useblit=True, lineprops=None):
AxesWidget.__init__(self, ax)
self.useblit = useblit
self.onselect = onselect
self.verts = None
if lineprops is None:
lineprops = dict()
self.line = Line2D([], [], **lineprops)
self.line.set_visible(False)
self.ax.add_line(self.line)
self.connect_event('button_press_event', self.onpress)
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
self.connect_event('draw_event', self.update_background)
def ignore(self, event):
wrong_button = hasattr(event, 'button') and event.button != 1
return not self.active or wrong_button
def onpress(self, event):
if self.ignore(event) or event.inaxes != self.ax:
return
self.verts = [(event.xdata, event.ydata)]
self.line.set_visible(True)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
if event.inaxes == self.ax:
self.verts.append((event.xdata, event.ydata))
self.onselect(self.verts)
self.line.set_data([[], []])
self.line.set_visible(False)
self.verts = None
def onmove(self, event):
if self.ignore(event) or event.inaxes != self.ax:
return
if self.verts is None: return
if event.inaxes != self.ax: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def update_background(self, event):
if self.ignore(event):
return
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
class Lasso(AxesWidget):
"""Selection curve of an arbitrary shape.
The selected path can be used in conjunction with
:func:`~matplotlib.path.Path.contains_point` to select data points
from an image.
Unlike :class:`LassoSelector`, this must be initialized with a starting
point `xy`, and the `Lasso` events are destroyed upon release.
Parameters:
*ax* : :class:`~matplotlib.axes.Axes`
The parent axes for the widget.
*xy* : array
Coordinates of the start of the lasso.
*callback* : function
Whenever the lasso is released, the `callback` function is called and
passed the vertices of the selected path.
"""
def __init__(self, ax, xy, callback=None, useblit=True):
AxesWidget.__init__(self, ax)
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.ax.add_line(self.line)
self.callback = callback
self.connect_event('button_release_event', self.onrelease)
self.connect_event('motion_notify_event', self.onmove)
def onrelease(self, event):
if self.ignore(event):
return
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.ax.lines.remove(self.line)
self.verts = None
self.disconnect_events()
def onmove(self, event):
if self.ignore(event):
return
if self.verts is None: return
if event.inaxes != self.ax: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.line)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
| mit |
Djabbz/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
mjudsp/Tsallis | sklearn/datasets/lfw.py | 31 | 19544 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s",
archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representation
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the
shape of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change
the shape of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# iterating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828). Shape depends on ``subset``.
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_``, ``resize`` or ``subset`` parameters
will change the shape of the output.
pairs : numpy array of shape (2200, 2, 62, 47). Shape depends on
``subset``.
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_``,
``resize`` or ``subset`` parameters will change the shape of the
output.
target : numpy array of shape (2200,). Shape depends on ``subset``.
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will "
"be removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
zfrenchee/pandas | pandas/io/formats/format.py | 1 | 90732 | # -*- coding: utf-8 -*-
"""
Internal module for formatting output data in csv, html,
and latex files. This module also applies to display formatting.
"""
from __future__ import print_function
from distutils.version import LooseVersion
# pylint: disable=W0141
from textwrap import dedent
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_float_dtype,
is_period_arraylike,
is_integer_dtype,
is_interval_dtype,
is_datetimetz,
is_integer,
is_float,
is_scalar,
is_numeric_dtype,
is_datetime64_dtype,
is_timedelta64_dtype,
is_list_like)
from pandas.core.dtypes.generic import ABCSparseArray
from pandas.core.base import PandasObject
from pandas.core.common import _any_not_none, sentinel_factory
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas import compat
from pandas.compat import (StringIO, lzip, range, map, zip, u,
OrderedDict, unichr)
from pandas.io.formats.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
from pandas.io.common import (_get_handle, UnicodeWriter, _expand_user,
_stringify_path)
from pandas.io.formats.printing import adjoin, justify, pprint_thing
from pandas.io.formats.common import get_level_lengths
from pandas._libs import lib
from pandas._libs.tslib import (iNaT, Timestamp, Timedelta,
format_array_from_datetime)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import PeriodIndex
import pandas as pd
import numpy as np
import csv
from functools import partial
common_docstring = """
Parameters
----------
buf : StringIO-like, optional
buffer to write to
columns : sequence, optional
the subset of columns to write; default None writes all columns
col_space : int, optional
the minimum width of each column
header : bool, optional
%(header)s
index : bool, optional
whether to print index (row) labels, default True
na_rep : string, optional
string representation of NAN to use, default 'NaN'
formatters : list or dict of one-parameter functions, optional
formatter functions to apply to columns' elements by position or name,
default None. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function, optional
formatter function to apply to columns' elements if they are floats,
default None. The result of this function must be a unicode string.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print every
multiindex key at each row, default True
index_names : bool, optional
Prints the names of the indexes, default True
line_width : int, optional
Width to wrap a line in characters, default no wrap"""
_VALID_JUSTIFY_PARAMETERS = ("left", "right", "center", "justify",
"justify-all", "start", "end", "inherit",
"match-parent", "initial", "unset")
justify_docstring = """
justify : str, default None
How to justify the column labels. If None uses the option from
the print configuration (controlled by set_option), 'right' out
of the box. Valid values are
* left
* right
* center
* justify
* justify-all
* start
* end
* inherit
* match-parent
* initial
* unset
"""
return_docstring = """
Returns
-------
formatted : string (or unicode, depending on data and options)"""
docstring_to_string = common_docstring + justify_docstring + return_docstring
class CategoricalFormatter(object):
def __init__(self, categorical, buf=None, length=True, na_rep='NaN',
footer=True):
self.categorical = categorical
self.buf = buf if buf is not None else StringIO(u(""))
self.na_rep = na_rep
self.length = length
self.footer = footer
def _get_footer(self):
footer = ''
if self.length:
if footer:
footer += ', '
footer += "Length: {length}".format(length=len(self.categorical))
level_info = self.categorical._repr_categories_info()
# Levels are added in a newline
if footer:
footer += '\n'
footer += level_info
return compat.text_type(footer)
def _get_formatted_values(self):
return format_array(self.categorical.get_values(), None,
float_format=None, na_rep=self.na_rep)
def to_string(self):
categorical = self.categorical
if len(categorical) == 0:
if self.footer:
return self._get_footer()
else:
return u('')
fmt_values = self._get_formatted_values()
result = [u('{i}').format(i=i) for i in fmt_values]
result = [i.strip() for i in result]
result = u(', ').join(result)
result = [u('[') + result + u(']')]
if self.footer:
footer = self._get_footer()
if footer:
result.append(footer)
return compat.text_type(u('\n').join(result))
class SeriesFormatter(object):
def __init__(self, series, buf=None, length=True, header=True, index=True,
na_rep='NaN', name=False, float_format=None, dtype=True,
max_rows=None):
self.series = series
self.buf = buf if buf is not None else StringIO()
self.name = name
self.na_rep = na_rep
self.header = header
self.length = length
self.index = index
self.max_rows = max_rows
if float_format is None:
float_format = get_option("display.float_format")
self.float_format = float_format
self.dtype = dtype
self.adj = _get_adjustment()
self._chk_truncate()
def _chk_truncate(self):
from pandas.core.reshape.concat import concat
max_rows = self.max_rows
truncate_v = max_rows and (len(self.series) > max_rows)
series = self.series
if truncate_v:
if max_rows == 1:
row_num = max_rows
series = series.iloc[:max_rows]
else:
row_num = max_rows // 2
series = concat((series.iloc[:row_num],
series.iloc[-row_num:]))
self.tr_row_num = row_num
self.tr_series = series
self.truncate_v = truncate_v
def _get_footer(self):
name = self.series.name
footer = u('')
if getattr(self.series.index, 'freq', None) is not None:
footer += 'Freq: {freq}'.format(freq=self.series.index.freqstr)
if self.name is not False and name is not None:
if footer:
footer += ', '
series_name = pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
footer += ((u"Name: {sname}".format(sname=series_name))
if name is not None else "")
if (self.length is True or
(self.length == 'truncate' and self.truncate_v)):
if footer:
footer += ', '
footer += 'Length: {length}'.format(length=len(self.series))
if self.dtype is not False and self.dtype is not None:
name = getattr(self.tr_series.dtype, 'name', None)
if name:
if footer:
footer += ', '
footer += u'dtype: {typ}'.format(typ=pprint_thing(name))
# level infos are added to the end and in a new line, like it is done
# for Categoricals
if is_categorical_dtype(self.tr_series.dtype):
level_info = self.tr_series._values._repr_categories_info()
if footer:
footer += "\n"
footer += level_info
return compat.text_type(footer)
def _get_formatted_index(self):
index = self.tr_series.index
is_multi = isinstance(index, MultiIndex)
if is_multi:
have_header = any(name for name in index.names)
fmt_index = index.format(names=True)
else:
have_header = index.name is not None
fmt_index = index.format(name=True)
return fmt_index, have_header
def _get_formatted_values(self):
values_to_format = self.tr_series._formatting_values()
return format_array(values_to_format, None,
float_format=self.float_format, na_rep=self.na_rep)
def to_string(self):
series = self.tr_series
footer = self._get_footer()
if len(series) == 0:
return 'Series([], ' + footer + ')'
fmt_index, have_header = self._get_formatted_index()
fmt_values = self._get_formatted_values()
if self.truncate_v:
n_header_rows = 0
row_num = self.tr_row_num
width = self.adj.len(fmt_values[row_num - 1])
if width > 3:
dot_str = '...'
else:
dot_str = '..'
# Series uses mode=center because it has single value columns
# DataFrame uses mode=left
dot_str = self.adj.justify([dot_str], width, mode='center')[0]
fmt_values.insert(row_num + n_header_rows, dot_str)
fmt_index.insert(row_num + 1, '')
if self.index:
result = self.adj.adjoin(3, *[fmt_index[1:], fmt_values])
else:
result = self.adj.adjoin(3, fmt_values).replace('\n ',
'\n').strip()
if self.header and have_header:
result = fmt_index[0] + '\n' + result
if footer:
result += '\n' + footer
return compat.text_type(u('').join(result))
class TextAdjustment(object):
def __init__(self):
self.encoding = get_option("display.encoding")
def len(self, text):
return compat.strlen(text, encoding=self.encoding)
def justify(self, texts, max_len, mode='right'):
return justify(texts, max_len, mode=mode)
def adjoin(self, space, *lists, **kwargs):
return adjoin(space, *lists, strlen=self.len,
justfunc=self.justify, **kwargs)
class EastAsianTextAdjustment(TextAdjustment):
def __init__(self):
super(EastAsianTextAdjustment, self).__init__()
if get_option("display.unicode.ambiguous_as_wide"):
self.ambiguous_width = 2
else:
self.ambiguous_width = 1
def len(self, text):
return compat.east_asian_len(text, encoding=self.encoding,
ambiguous_width=self.ambiguous_width)
def justify(self, texts, max_len, mode='right'):
# re-calculate padding space per str considering East Asian Width
def _get_pad(t):
return max_len - self.len(t) + len(t)
if mode == 'left':
return [x.ljust(_get_pad(x)) for x in texts]
elif mode == 'center':
return [x.center(_get_pad(x)) for x in texts]
else:
return [x.rjust(_get_pad(x)) for x in texts]
def _get_adjustment():
use_east_asian_width = get_option("display.unicode.east_asian_width")
if use_east_asian_width:
return EastAsianTextAdjustment()
else:
return TextAdjustment()
class TableFormatter(object):
is_truncated = False
show_dimensions = None
@property
def should_show_dimensions(self):
return (self.show_dimensions is True or
(self.show_dimensions == 'truncate' and self.is_truncated))
def _get_formatter(self, i):
if isinstance(self.formatters, (list, tuple)):
if is_integer(i):
return self.formatters[i]
else:
return None
else:
if is_integer(i) and i not in self.columns:
i = self.columns[i]
return self.formatters.get(i, None)
class DataFrameFormatter(TableFormatter):
"""
Render a DataFrame
self.to_string() : console-friendly tabular output
self.to_html() : html table
self.to_latex() : LaTeX tabular environment table
"""
__doc__ = __doc__ if __doc__ else ''
__doc__ += common_docstring + justify_docstring + return_docstring
def __init__(self, frame, buf=None, columns=None, col_space=None,
header=True, index=True, na_rep='NaN', formatters=None,
justify=None, float_format=None, sparsify=None,
index_names=True, line_width=None, max_rows=None,
max_cols=None, show_dimensions=False, decimal='.', **kwds):
self.frame = frame
if buf is not None:
self.buf = _expand_user(_stringify_path(buf))
else:
self.buf = StringIO()
self.show_index_names = index_names
if sparsify is None:
sparsify = get_option("display.multi_sparse")
self.sparsify = sparsify
self.float_format = float_format
self.formatters = formatters if formatters is not None else {}
self.na_rep = na_rep
self.decimal = decimal
self.col_space = col_space
self.header = header
self.index = index
self.line_width = line_width
self.max_rows = max_rows
self.max_cols = max_cols
self.max_rows_displayed = min(max_rows or len(self.frame),
len(self.frame))
self.show_dimensions = show_dimensions
if justify is None:
self.justify = get_option("display.colheader_justify")
else:
self.justify = justify
self.kwds = kwds
if columns is not None:
self.columns = _ensure_index(columns)
self.frame = self.frame[self.columns]
else:
self.columns = frame.columns
self._chk_truncate()
self.adj = _get_adjustment()
def _chk_truncate(self):
"""
Checks whether the frame should be truncated. If so, slices
the frame up.
"""
from pandas.core.reshape.concat import concat
# Column of which first element is used to determine width of a dot col
self.tr_size_col = -1
# Cut the data to the information actually printed
max_cols = self.max_cols
max_rows = self.max_rows
if max_cols == 0 or max_rows == 0: # assume we are in the terminal
# (why else = 0)
(w, h) = get_terminal_size()
self.w = w
self.h = h
if self.max_rows == 0:
dot_row = 1
prompt_row = 1
if self.show_dimensions:
show_dimension_rows = 3
n_add_rows = (self.header + dot_row + show_dimension_rows +
prompt_row)
# rows available to fill with actual data
max_rows_adj = self.h - n_add_rows
self.max_rows_adj = max_rows_adj
# Format only rows and columns that could potentially fit the
# screen
if max_cols == 0 and len(self.frame.columns) > w:
max_cols = w
if max_rows == 0 and len(self.frame) > h:
max_rows = h
if not hasattr(self, 'max_rows_adj'):
self.max_rows_adj = max_rows
if not hasattr(self, 'max_cols_adj'):
self.max_cols_adj = max_cols
max_cols_adj = self.max_cols_adj
max_rows_adj = self.max_rows_adj
truncate_h = max_cols_adj and (len(self.columns) > max_cols_adj)
truncate_v = max_rows_adj and (len(self.frame) > max_rows_adj)
frame = self.frame
if truncate_h:
if max_cols_adj == 0:
col_num = len(frame.columns)
elif max_cols_adj == 1:
frame = frame.iloc[:, :max_cols]
col_num = max_cols
else:
col_num = (max_cols_adj // 2)
frame = concat((frame.iloc[:, :col_num],
frame.iloc[:, -col_num:]), axis=1)
self.tr_col_num = col_num
if truncate_v:
if max_rows_adj == 0:
row_num = len(frame)
if max_rows_adj == 1:
row_num = max_rows
frame = frame.iloc[:max_rows, :]
else:
row_num = max_rows_adj // 2
frame = concat((frame.iloc[:row_num, :],
frame.iloc[-row_num:, :]))
self.tr_row_num = row_num
self.tr_frame = frame
self.truncate_h = truncate_h
self.truncate_v = truncate_v
self.is_truncated = self.truncate_h or self.truncate_v
def _to_str_columns(self):
"""
Render a DataFrame to a list of columns (as lists of strings).
"""
frame = self.tr_frame
# may include levels names also
str_index = self._get_formatted_index(frame)
if not is_list_like(self.header) and not self.header:
stringified = []
for i, c in enumerate(frame):
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=(self.col_space or 0),
adj=self.adj)
stringified.append(fmt_values)
else:
if is_list_like(self.header):
if len(self.header) != len(self.columns):
raise ValueError(('Writing {ncols} cols but got {nalias} '
'aliases'
.format(ncols=len(self.columns),
nalias=len(self.header))))
str_columns = [[label] for label in self.header]
else:
str_columns = self._get_formatted_column_labels(frame)
stringified = []
for i, c in enumerate(frame):
cheader = str_columns[i]
header_colwidth = max(self.col_space or 0,
*(self.adj.len(x) for x in cheader))
fmt_values = self._format_col(i)
fmt_values = _make_fixed_width(fmt_values, self.justify,
minimum=header_colwidth,
adj=self.adj)
max_len = max(max(self.adj.len(x) for x in fmt_values),
header_colwidth)
cheader = self.adj.justify(cheader, max_len, mode=self.justify)
stringified.append(cheader + fmt_values)
strcols = stringified
if self.index:
strcols.insert(0, str_index)
# Add ... to signal truncated
truncate_h = self.truncate_h
truncate_v = self.truncate_v
if truncate_h:
col_num = self.tr_col_num
# infer from column header
col_width = self.adj.len(strcols[self.tr_size_col][0])
strcols.insert(self.tr_col_num + 1, ['...'.center(col_width)] *
(len(str_index)))
if truncate_v:
n_header_rows = len(str_index) - len(frame)
row_num = self.tr_row_num
for ix, col in enumerate(strcols):
# infer from above row
cwidth = self.adj.len(strcols[ix][row_num])
is_dot_col = False
if truncate_h:
is_dot_col = ix == col_num + 1
if cwidth > 3 or is_dot_col:
my_str = '...'
else:
my_str = '..'
if ix == 0:
dot_mode = 'left'
elif is_dot_col:
cwidth = self.adj.len(strcols[self.tr_size_col][0])
dot_mode = 'center'
else:
dot_mode = 'right'
dot_str = self.adj.justify([my_str], cwidth, mode=dot_mode)[0]
strcols[ix].insert(row_num + n_header_rows, dot_str)
return strcols
def to_string(self):
"""
Render a DataFrame to a console-friendly tabular output.
"""
from pandas import Series
frame = self.frame
if len(frame.columns) == 0 or len(frame.index) == 0:
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
.format(name=type(self.frame).__name__,
col=pprint_thing(frame.columns),
idx=pprint_thing(frame.index)))
text = info_line
else:
strcols = self._to_str_columns()
if self.line_width is None: # no need to wrap around just print
# the whole frame
text = self.adj.adjoin(1, *strcols)
elif (not isinstance(self.max_cols, int) or
self.max_cols > 0): # need to wrap around
text = self._join_multiline(*strcols)
else: # max_cols == 0. Try to fit frame to terminal
text = self.adj.adjoin(1, *strcols).split('\n')
max_len = Series(text).str.len().max()
headers = [ele[0] for ele in strcols]
# Size of last col determines dot col size. See
# `self._to_str_columns
size_tr_col = len(headers[self.tr_size_col])
max_len += size_tr_col # Need to make space for largest row
# plus truncate dot col
dif = max_len - self.w
adj_dif = dif
col_lens = Series([Series(ele).apply(len).max()
for ele in strcols])
n_cols = len(col_lens)
counter = 0
while adj_dif > 0 and n_cols > 1:
counter += 1
mid = int(round(n_cols / 2.))
mid_ix = col_lens.index[mid]
col_len = col_lens[mid_ix]
adj_dif -= (col_len + 1) # adjoin adds one
col_lens = col_lens.drop(mid_ix)
n_cols = len(col_lens)
max_cols_adj = n_cols - self.index # subtract index column
self.max_cols_adj = max_cols_adj
# Call again _chk_truncate to cut frame appropriately
# and then generate string representation
self._chk_truncate()
strcols = self._to_str_columns()
text = self.adj.adjoin(1, *strcols)
if not self.index:
text = text.replace('\n ', '\n').strip()
self.buf.writelines(text)
if self.should_show_dimensions:
self.buf.write("\n\n[{nrows} rows x {ncols} columns]"
.format(nrows=len(frame), ncols=len(frame.columns)))
def _join_multiline(self, *strcols):
lwidth = self.line_width
adjoin_width = 1
strcols = list(strcols)
if self.index:
idx = strcols.pop(0)
lwidth -= np.array([self.adj.len(x)
for x in idx]).max() + adjoin_width
col_widths = [np.array([self.adj.len(x) for x in col]).max() if
len(col) > 0 else 0 for col in strcols]
col_bins = _binify(col_widths, lwidth)
nbins = len(col_bins)
if self.truncate_v:
nrows = self.max_rows_adj + 1
else:
nrows = len(self.frame)
str_lst = []
st = 0
for i, ed in enumerate(col_bins):
row = strcols[st:ed]
if self.index:
row.insert(0, idx)
if nbins > 1:
if ed <= len(strcols) and i < nbins - 1:
row.append([' \\'] + [' '] * (nrows - 1))
else:
row.append([' '] * nrows)
str_lst.append(self.adj.adjoin(adjoin_width, *row))
st = ed
return '\n\n'.join(str_lst)
def to_latex(self, column_format=None, longtable=False, encoding=None,
multicolumn=False, multicolumn_format=None, multirow=False):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
latex_renderer = LatexFormatter(self, column_format=column_format,
longtable=longtable,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow)
if encoding is None:
encoding = 'ascii' if compat.PY2 else 'utf-8'
if hasattr(self.buf, 'write'):
latex_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
import codecs
with codecs.open(self.buf, 'w', encoding=encoding) as f:
latex_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
'method')
def _format_col(self, i):
frame = self.tr_frame
formatter = self._get_formatter(i)
values_to_format = frame.iloc[:, i]._formatting_values()
return format_array(values_to_format, formatter,
float_format=self.float_format, na_rep=self.na_rep,
space=self.col_space, decimal=self.decimal)
def to_html(self, classes=None, notebook=False, border=None):
"""
Render a DataFrame to a html table.
Parameters
----------
classes : str or list-like
classes to include in the `class` attribute of the opening
``<table>`` tag, in addition to the default "dataframe".
notebook : {True, False}, optional, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
``<table>`` tag. Default ``pd.options.html.border``.
.. versionadded:: 0.19.0
"""
html_renderer = HTMLFormatter(self, classes=classes,
max_rows=self.max_rows,
max_cols=self.max_cols,
notebook=notebook,
border=border)
if hasattr(self.buf, 'write'):
html_renderer.write_result(self.buf)
elif isinstance(self.buf, compat.string_types):
with open(self.buf, 'w') as f:
html_renderer.write_result(f)
else:
raise TypeError('buf is not a file name and it has no write '
' method')
def _get_formatted_column_labels(self, frame):
from pandas.core.index import _sparsify
columns = frame.columns
if isinstance(columns, MultiIndex):
fmt_columns = columns.format(sparsify=False, adjoin=False)
fmt_columns = lzip(*fmt_columns)
dtypes = self.frame.dtypes._values
# if we have a Float level, they don't use leading space at all
restrict_formatting = any(l.is_floating for l in columns.levels)
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
def space_format(x, y):
if (y not in self.formatters and
need_leadsp[x] and not restrict_formatting):
return ' ' + y
return y
str_columns = list(zip(*[[space_format(x, y) for y in x]
for x in fmt_columns]))
if self.sparsify:
str_columns = _sparsify(str_columns)
str_columns = [list(x) for x in zip(*str_columns)]
else:
fmt_columns = columns.format()
dtypes = self.frame.dtypes
need_leadsp = dict(zip(fmt_columns, map(is_numeric_dtype, dtypes)))
str_columns = [[' ' + x if not self._get_formatter(i) and
need_leadsp[x] else x]
for i, (col, x) in enumerate(zip(columns,
fmt_columns))]
if self.show_index_names and self.has_index_names:
for x in str_columns:
x.append('')
# self.str_columns = str_columns
return str_columns
@property
def has_index_names(self):
return _has_names(self.frame.index)
@property
def has_column_names(self):
return _has_names(self.frame.columns)
def _get_formatted_index(self, frame):
# Note: this is only used by to_string() and to_latex(), not by
# to_html().
index = frame.index
columns = frame.columns
show_index_names = self.show_index_names and self.has_index_names
show_col_names = (self.show_index_names and self.has_column_names)
fmt = self._get_formatter('__index__')
if isinstance(index, MultiIndex):
fmt_index = index.format(sparsify=self.sparsify, adjoin=False,
names=show_index_names, formatter=fmt)
else:
fmt_index = [index.format(name=show_index_names, formatter=fmt)]
fmt_index = [tuple(_make_fixed_width(list(x), justify='left',
minimum=(self.col_space or 0),
adj=self.adj)) for x in fmt_index]
adjoined = self.adj.adjoin(1, *fmt_index).split('\n')
# empty space for columns
if show_col_names:
col_header = ['{x}'.format(x=x)
for x in self._get_column_name_list()]
else:
col_header = [''] * columns.nlevels
if self.header:
return col_header + adjoined
else:
return adjoined
def _get_column_name_list(self):
names = []
columns = self.frame.columns
if isinstance(columns, MultiIndex):
names.extend('' if name is None else name
for name in columns.names)
else:
names.append('' if columns.name is None else columns.name)
return names
class LatexFormatter(TableFormatter):
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
longtable : boolean, default False
Use a longtable environment instead of tabular.
See also
--------
HTMLFormatter
"""
def __init__(self, formatter, column_format=None, longtable=False,
multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
.format(name=type(self.frame).__name__,
col=self.frame.columns,
idx=self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
name = any(self.frame.index.names)
cname = any(self.frame.columns.names)
lastcol = self.frame.index.nlevels - 1
previous_lev3 = None
for i, lev in enumerate(self.frame.index.levels):
lev2 = lev.format()
blank = ' ' * len(lev2[0])
# display column names in last index-column
if cname and i == lastcol:
lev3 = [x if x else '{}' for x in self.frame.columns.names]
else:
lev3 = [blank] * clevels
if name:
lev3.append(lev.name)
current_idx_val = None
for level_idx in self.frame.index.labels[i]:
if ((previous_lev3 is None or
previous_lev3[len(lev3)].isspace()) and
lev2[level_idx] == current_idx_val):
# same index as above row and left index was the same
lev3.append(blank)
else:
# different value than above or left index different
lev3.append(lev2[level_idx])
current_idx_val = lev2[level_idx]
strcols.insert(i, lev3)
previous_lev3 = lev3
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, '
'not {typ}'.format(typ=type(column_format)))
if not self.longtable:
buf.write('\\begin{{tabular}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
else:
buf.write('\\begin{{longtable}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if any(self.frame.index.names):
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next '
'page}}}} \\\\\n'.format(n=len(row)))
buf.write('\\midrule\n')
buf.write('\\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.fmt.kwds.get('escape', True):
# escape backslashes first
crow = [(x.replace('\\', '\\textbackslash').replace('_', '\\_')
.replace('%', '\\%').replace('$', '\\$')
.replace('#', '\\#').replace('{', '\\{')
.replace('}', '\\}').replace('~', '\\textasciitilde')
.replace('^', '\\textasciicircum').replace('&', '\\&')
if (x and x != '{}') else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
crow = ['\\textbf{{{x}}}'.format(x=x)
if j < ilevels and x.strip() not in ['', '{}'] else x
for j, x in enumerate(crow)]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if (i >= nlevels and self.fmt.index and self.multirow and
ilevels > 1):
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
def _format_multicolumn(self, row, ilevels):
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = list(row[:ilevels])
ncol = 1
coltext = ''
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'
.format(ncol=ncol, fmt=self.multicolumn_format,
txt=coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
for c in row[ilevels:]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row, ilevels, i, rows):
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(ilevels):
if row[j].strip():
nrow = 1
for r in rows[i + 1:]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format(
nrow=nrow, row=row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'
.format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
class HTMLFormatter(TableFormatter):
indent_delta = 2
def __init__(self, formatter, classes=None, max_rows=None, max_cols=None,
notebook=False, border=None):
self.fmt = formatter
self.classes = classes
self.frame = self.fmt.frame
self.columns = self.fmt.tr_frame.columns
self.elements = []
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.escape = self.fmt.kwds.get('escape', True)
self.max_rows = max_rows or len(self.fmt.frame)
self.max_cols = max_cols or len(self.fmt.columns)
self.show_dimensions = self.fmt.show_dimensions
self.is_truncated = (self.max_rows < len(self.fmt.frame) or
self.max_cols < len(self.fmt.columns))
self.notebook = notebook
if border is None:
border = get_option('display.html.border')
self.border = border
def write(self, s, indent=0):
rs = pprint_thing(s)
self.elements.append(' ' * indent + rs)
def write_th(self, s, indent=0, tags=None):
if self.fmt.col_space is not None and self.fmt.col_space > 0:
tags = (tags or "")
tags += ('style="min-width: {colspace};"'
.format(colspace=self.fmt.col_space))
return self._write_cell(s, kind='th', indent=indent, tags=tags)
def write_td(self, s, indent=0, tags=None):
return self._write_cell(s, kind='td', indent=indent, tags=tags)
def _write_cell(self, s, kind='td', indent=0, tags=None):
if tags is not None:
start_tag = '<{kind} {tags}>'.format(kind=kind, tags=tags)
else:
start_tag = '<{kind}>'.format(kind=kind)
if self.escape:
# escape & first to prevent double escaping of &
esc = OrderedDict([('&', r'&'), ('<', r'<'),
('>', r'>')])
else:
esc = {}
rs = pprint_thing(s, escape_chars=esc).strip()
self.write(u'{start}{rs}</{kind}>'
.format(start=start_tag, rs=rs, kind=kind), indent)
def write_tr(self, line, indent=0, indent_delta=4, header=False,
align=None, tags=None, nindex_levels=0):
if tags is None:
tags = {}
if align is None:
self.write('<tr>', indent)
else:
self.write('<tr style="text-align: {align};">'
.format(align=align), indent)
indent += indent_delta
for i, s in enumerate(line):
val_tag = tags.get(i, None)
if header or (self.bold_rows and i < nindex_levels):
self.write_th(s, indent, tags=val_tag)
else:
self.write_td(s, indent, tags=val_tag)
indent -= indent_delta
self.write('</tr>', indent)
def write_style(self):
# We use the "scoped" attribute here so that the desired
# style properties for the data frame are not then applied
# throughout the entire notebook.
template_first = """\
<style scoped>"""
template_last = """\
</style>"""
template_select = """\
.dataframe %s {
%s: %s;
}"""
element_props = [('tbody tr th:only-of-type',
'vertical-align',
'middle'),
('tbody tr th',
'vertical-align',
'top')]
if isinstance(self.columns, MultiIndex):
element_props.append(('thead tr th',
'text-align',
'left'))
if all((self.fmt.has_index_names,
self.fmt.index,
self.fmt.show_index_names)):
element_props.append(('thead tr:last-of-type th',
'text-align',
'right'))
else:
element_props.append(('thead th',
'text-align',
'right'))
template_mid = '\n\n'.join(map(lambda t: template_select % t,
element_props))
template = dedent('\n'.join((template_first,
template_mid,
template_last)))
if self.notebook:
self.write(template)
def write_result(self, buf):
indent = 0
frame = self.frame
_classes = ['dataframe'] # Default class.
if self.classes is not None:
if isinstance(self.classes, str):
self.classes = self.classes.split()
if not isinstance(self.classes, (list, tuple)):
raise AssertionError('classes must be list or tuple, not {typ}'
.format(typ=type(self.classes)))
_classes.extend(self.classes)
if self.notebook:
div_style = ''
try:
import IPython
if IPython.__version__ < LooseVersion('3.0.0'):
div_style = ' style="max-width:1500px;overflow:auto;"'
except (ImportError, AttributeError):
pass
self.write('<div{style}>'.format(style=div_style))
self.write_style()
self.write('<table border="{border}" class="{cls}">'
.format(border=self.border, cls=' '.join(_classes)), indent)
indent += self.indent_delta
indent = self._write_header(indent)
indent = self._write_body(indent)
self.write('</table>', indent)
if self.should_show_dimensions:
by = chr(215) if compat.PY3 else unichr(215) # ×
self.write(u('<p>{rows} rows {by} {cols} columns</p>')
.format(rows=len(frame),
by=by,
cols=len(frame.columns)))
if self.notebook:
self.write('</div>')
_put_lines(buf, self.elements)
def _write_header(self, indent):
truncate_h = self.fmt.truncate_h
row_levels = self.frame.index.nlevels
if not self.fmt.header:
# write nothing
return indent
def _column_header():
if self.fmt.index:
row = [''] * (self.frame.index.nlevels - 1)
else:
row = []
if isinstance(self.columns, MultiIndex):
if self.fmt.has_column_names and self.fmt.index:
row.append(single_column_table(self.columns.names))
else:
row.append('')
style = "text-align: {just};".format(just=self.fmt.justify)
row.extend([single_column_table(c, self.fmt.justify, style)
for c in self.columns])
else:
if self.fmt.index:
row.append(self.columns.name or '')
row.extend(self.columns)
return row
self.write('<thead>', indent)
row = []
indent += self.indent_delta
if isinstance(self.columns, MultiIndex):
template = 'colspan="{span:d}" halign="left"'
if self.fmt.sparsify:
# GH3547
sentinel = sentinel_factory()
else:
sentinel = None
levels = self.columns.format(sparsify=sentinel, adjoin=False,
names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
for lnum, (records, values) in enumerate(zip(level_lengths,
levels)):
if truncate_h:
# modify the header lines
ins_col = self.fmt.tr_col_num
if self.fmt.sparsify:
recs_new = {}
# Increment tags after ... col.
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
elif tag + span > ins_col:
recs_new[tag] = span + 1
if lnum == inner_lvl:
values = (values[:ins_col] + (u('...'),) +
values[ins_col:])
else:
# sparse col headers do not receive a ...
values = (values[:ins_col] +
(values[ins_col - 1], ) +
values[ins_col:])
else:
recs_new[tag] = span
# if ins_col lies between tags, all col headers
# get ...
if tag + span == ins_col:
recs_new[ins_col] = 1
values = (values[:ins_col] + (u('...'),) +
values[ins_col:])
records = recs_new
inner_lvl = len(level_lengths) - 1
if lnum == inner_lvl:
records[ins_col] = 1
else:
recs_new = {}
for tag, span in list(records.items()):
if tag >= ins_col:
recs_new[tag + 1] = span
else:
recs_new[tag] = span
recs_new[ins_col] = 1
records = recs_new
values = (values[:ins_col] + [u('...')] +
values[ins_col:])
name = self.columns.names[lnum]
row = [''] * (row_levels - 1) + ['' if name is None else
pprint_thing(name)]
if row == [""] and self.fmt.index is False:
row = []
tags = {}
j = len(row)
for i, v in enumerate(values):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
continue
j += 1
row.append(v)
self.write_tr(row, indent, self.indent_delta, tags=tags,
header=True)
else:
col_row = _column_header()
align = self.fmt.justify
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
col_row.insert(ins_col, '...')
self.write_tr(col_row, indent, self.indent_delta, header=True,
align=align)
if all((self.fmt.has_index_names,
self.fmt.index,
self.fmt.show_index_names)):
row = ([x if x is not None else ''
for x in self.frame.index.names] +
[''] * min(len(self.columns), self.max_cols))
if truncate_h:
ins_col = row_levels + self.fmt.tr_col_num
row.insert(ins_col, '')
self.write_tr(row, indent, self.indent_delta, header=True)
indent -= self.indent_delta
self.write('</thead>', indent)
return indent
def _write_body(self, indent):
self.write('<tbody>', indent)
indent += self.indent_delta
fmt_values = {}
for i in range(min(len(self.columns), self.max_cols)):
fmt_values[i] = self.fmt._format_col(i)
# write values
if self.fmt.index:
if isinstance(self.frame.index, MultiIndex):
self._write_hierarchical_rows(fmt_values, indent)
else:
self._write_regular_rows(fmt_values, indent)
else:
for i in range(min(len(self.frame), self.max_rows)):
row = [fmt_values[j][i] for j in range(len(self.columns))]
self.write_tr(row, indent, self.indent_delta, tags=None)
indent -= self.indent_delta
self.write('</tbody>', indent)
indent -= self.indent_delta
return indent
def _write_regular_rows(self, fmt_values, indent):
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
ncols = len(self.fmt.tr_frame.columns)
nrows = len(self.fmt.tr_frame)
fmt = self.fmt._get_formatter('__index__')
if fmt is not None:
index_values = self.fmt.tr_frame.index.map(fmt)
else:
index_values = self.fmt.tr_frame.index.format()
row = []
for i in range(nrows):
if truncate_v and i == (self.fmt.tr_row_num):
str_sep_row = ['...' for ele in row]
self.write_tr(str_sep_row, indent, self.indent_delta,
tags=None, nindex_levels=1)
row = []
row.append(index_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
dot_col_ix = self.fmt.tr_col_num + 1
row.insert(dot_col_ix, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=1)
def _write_hierarchical_rows(self, fmt_values, indent):
template = 'rowspan="{span}" valign="top"'
truncate_h = self.fmt.truncate_h
truncate_v = self.fmt.truncate_v
frame = self.fmt.tr_frame
ncols = len(frame.columns)
nrows = len(frame)
row_levels = self.frame.index.nlevels
idx_values = frame.index.format(sparsify=False, adjoin=False,
names=False)
idx_values = lzip(*idx_values)
if self.fmt.sparsify:
# GH3547
sentinel = sentinel_factory()
levels = frame.index.format(sparsify=sentinel, adjoin=False,
names=False)
level_lengths = get_level_lengths(levels, sentinel)
inner_lvl = len(level_lengths) - 1
if truncate_v:
# Insert ... row and adjust idx_values and
# level_lengths to take this into account.
ins_row = self.fmt.tr_row_num
inserted = False
for lnum, records in enumerate(level_lengths):
rec_new = {}
for tag, span in list(records.items()):
if tag >= ins_row:
rec_new[tag + 1] = span
elif tag + span > ins_row:
rec_new[tag] = span + 1
# GH 14882 - Make sure insertion done once
if not inserted:
dot_row = list(idx_values[ins_row - 1])
dot_row[-1] = u('...')
idx_values.insert(ins_row, tuple(dot_row))
inserted = True
else:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u('...')
idx_values[ins_row] = tuple(dot_row)
else:
rec_new[tag] = span
# If ins_row lies between tags, all cols idx cols
# receive ...
if tag + span == ins_row:
rec_new[ins_row] = 1
if lnum == 0:
idx_values.insert(ins_row, tuple(
[u('...')] * len(level_lengths)))
# GH 14882 - Place ... in correct level
elif inserted:
dot_row = list(idx_values[ins_row])
dot_row[inner_lvl - lnum] = u('...')
idx_values[ins_row] = tuple(dot_row)
level_lengths[lnum] = rec_new
level_lengths[inner_lvl][ins_row] = 1
for ix_col in range(len(fmt_values)):
fmt_values[ix_col].insert(ins_row, '...')
nrows += 1
for i in range(nrows):
row = []
tags = {}
sparse_offset = 0
j = 0
for records, v in zip(level_lengths, idx_values[i]):
if i in records:
if records[i] > 1:
tags[j] = template.format(span=records[i])
else:
sparse_offset += 1
continue
j += 1
row.append(v)
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels - sparse_offset +
self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=tags,
nindex_levels=len(levels) - sparse_offset)
else:
for i in range(len(frame)):
idx_values = list(zip(*frame.index.format(
sparsify=False, adjoin=False, names=False)))
row = []
row.extend(idx_values[i])
row.extend(fmt_values[j][i] for j in range(ncols))
if truncate_h:
row.insert(row_levels + self.fmt.tr_col_num, '...')
self.write_tr(row, indent, self.indent_delta, tags=None,
nindex_levels=frame.index.nlevels)
class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='',
float_format=None, cols=None, header=True, index=True,
index_label=None, mode='w', nanRep=None, encoding=None,
compression=None, quoting=None, line_terminator='\n',
chunksize=None, tupleize_cols=False, quotechar='"',
date_format=None, doublequote=True, escapechar=None,
decimal='.'):
self.obj = obj
if path_or_buf is None:
path_or_buf = StringIO()
self.path_or_buf = _expand_user(_stringify_path(path_or_buf))
self.sep = sep
self.na_rep = na_rep
self.float_format = float_format
self.decimal = decimal
self.header = header
self.index = index
self.index_label = index_label
self.mode = mode
self.encoding = encoding
self.compression = compression
if quoting is None:
quoting = csv.QUOTE_MINIMAL
self.quoting = quoting
if quoting == csv.QUOTE_NONE:
# prevents crash in _csv
quotechar = None
self.quotechar = quotechar
self.doublequote = doublequote
self.escapechar = escapechar
self.line_terminator = line_terminator
self.date_format = date_format
self.tupleize_cols = tupleize_cols
self.has_mi_columns = (isinstance(obj.columns, MultiIndex) and
not self.tupleize_cols)
# validate mi options
if self.has_mi_columns:
if cols is not None:
raise TypeError("cannot specify cols with a MultiIndex on the "
"columns")
if cols is not None:
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
self.obj = self.obj.loc[:, cols]
# update columns to include possible multiplicity of dupes
# and make sure sure cols is just a list of labels
cols = self.obj.columns
if isinstance(cols, Index):
cols = cols.to_native_types(na_rep=na_rep,
float_format=float_format,
date_format=date_format,
quoting=self.quoting)
else:
cols = list(cols)
# save it
self.cols = cols
# preallocate data 2d list
self.blocks = self.obj._data.blocks
ncols = sum(b.shape[0] for b in self.blocks)
self.data = [None] * ncols
if chunksize is None:
chunksize = (100000 // (len(self.cols) or 1)) or 1
self.chunksize = int(chunksize)
self.data_index = obj.index
if (isinstance(self.data_index, (DatetimeIndex, PeriodIndex)) and
date_format is not None):
self.data_index = Index([x.strftime(date_format) if notna(x) else
'' for x in self.data_index])
self.nlevels = getattr(self.data_index, 'nlevels', 1)
if not index:
self.nlevels = 0
def save(self):
# create the writer & save
if self.encoding is None:
if compat.PY2:
encoding = 'ascii'
else:
encoding = 'utf-8'
else:
encoding = self.encoding
if hasattr(self.path_or_buf, 'write'):
f = self.path_or_buf
close = False
else:
f, handles = _get_handle(self.path_or_buf, self.mode,
encoding=encoding,
compression=self.compression)
close = True
try:
writer_kwargs = dict(lineterminator=self.line_terminator,
delimiter=self.sep, quoting=self.quoting,
doublequote=self.doublequote,
escapechar=self.escapechar,
quotechar=self.quotechar)
if encoding == 'ascii':
self.writer = csv.writer(f, **writer_kwargs)
else:
writer_kwargs['encoding'] = encoding
self.writer = UnicodeWriter(f, **writer_kwargs)
self._save()
finally:
if close:
f.close()
def _save_header(self):
writer = self.writer
obj = self.obj
index_label = self.index_label
cols = self.cols
has_mi_columns = self.has_mi_columns
header = self.header
encoded_labels = []
has_aliases = isinstance(header, (tuple, list, np.ndarray, Index))
if not (has_aliases or self.header):
return
if has_aliases:
if len(header) != len(cols):
raise ValueError(('Writing {ncols} cols but got {nalias} '
'aliases'.format(ncols=len(cols),
nalias=len(header))))
else:
write_cols = header
else:
write_cols = cols
if self.index:
# should write something for index label
if index_label is not False:
if index_label is None:
if isinstance(obj.index, MultiIndex):
index_label = []
for i, name in enumerate(obj.index.names):
if name is None:
name = ''
index_label.append(name)
else:
index_label = obj.index.name
if index_label is None:
index_label = ['']
else:
index_label = [index_label]
elif not isinstance(index_label,
(list, tuple, np.ndarray, Index)):
# given a string for a DF with Index
index_label = [index_label]
encoded_labels = list(index_label)
else:
encoded_labels = []
if not has_mi_columns or has_aliases:
encoded_labels += list(write_cols)
writer.writerow(encoded_labels)
else:
# write out the mi
columns = obj.columns
# write out the names for each level, then ALL of the values for
# each level
for i in range(columns.nlevels):
# we need at least 1 index column to write our col names
col_line = []
if self.index:
# name is the first column
col_line.append(columns.names[i])
if isinstance(index_label, list) and len(index_label) > 1:
col_line.extend([''] * (len(index_label) - 1))
col_line.extend(columns._get_level_values(i))
writer.writerow(col_line)
# Write out the index line if it's not empty.
# Otherwise, we will print out an extraneous
# blank line between the mi and the data rows.
if encoded_labels and set(encoded_labels) != set(['']):
encoded_labels.extend([''] * len(columns))
writer.writerow(encoded_labels)
def _save(self):
self._save_header()
nrows = len(self.data_index)
# write in chunksize bites
chunksize = self.chunksize
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self._save_chunk(start_i, end_i)
def _save_chunk(self, start_i, end_i):
data_index = self.data_index
# create the data for a chunk
slicer = slice(start_i, end_i)
for i in range(len(self.blocks)):
b = self.blocks[i]
d = b.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
for col_loc, col in zip(b.mgr_locs, d):
# self.data is a preallocated list
self.data[col_loc] = col
ix = data_index.to_native_types(slicer=slicer, na_rep=self.na_rep,
float_format=self.float_format,
decimal=self.decimal,
date_format=self.date_format,
quoting=self.quoting)
lib.write_csv_rows(self.data, ix, self.nlevels, self.cols, self.writer)
# ----------------------------------------------------------------------
# Array formatters
def format_array(values, formatter, float_format=None, na_rep='NaN',
digits=None, space=None, justify='right', decimal='.'):
if is_categorical_dtype(values):
fmt_klass = CategoricalArrayFormatter
elif is_interval_dtype(values):
fmt_klass = IntervalArrayFormatter
elif is_float_dtype(values.dtype):
fmt_klass = FloatArrayFormatter
elif is_period_arraylike(values):
fmt_klass = PeriodArrayFormatter
elif is_integer_dtype(values.dtype):
fmt_klass = IntArrayFormatter
elif is_datetimetz(values):
fmt_klass = Datetime64TZFormatter
elif is_datetime64_dtype(values.dtype):
fmt_klass = Datetime64Formatter
elif is_timedelta64_dtype(values.dtype):
fmt_klass = Timedelta64Formatter
else:
fmt_klass = GenericArrayFormatter
if space is None:
space = get_option("display.column_space")
if float_format is None:
float_format = get_option("display.float_format")
if digits is None:
digits = get_option("display.precision")
fmt_obj = fmt_klass(values, digits=digits, na_rep=na_rep,
float_format=float_format, formatter=formatter,
space=space, justify=justify, decimal=decimal)
return fmt_obj.get_result()
class GenericArrayFormatter(object):
def __init__(self, values, digits=7, formatter=None, na_rep='NaN',
space=12, float_format=None, justify='right', decimal='.',
quoting=None, fixed_width=True):
self.values = values
self.digits = digits
self.na_rep = na_rep
self.space = space
self.formatter = formatter
self.float_format = float_format
self.justify = justify
self.decimal = decimal
self.quoting = quoting
self.fixed_width = fixed_width
def get_result(self):
fmt_values = self._format_strings()
return _make_fixed_width(fmt_values, self.justify)
def _format_strings(self):
if self.float_format is None:
float_format = get_option("display.float_format")
if float_format is None:
fmt_str = ('{{x: .{prec:d}g}}'
.format(prec=get_option("display.precision")))
float_format = lambda x: fmt_str.format(x=x)
else:
float_format = self.float_format
formatter = (
self.formatter if self.formatter is not None else
(lambda x: pprint_thing(x, escape_chars=('\t', '\r', '\n'))))
def _format(x):
if self.na_rep is not None and is_scalar(x) and isna(x):
if x is None:
return 'None'
elif x is pd.NaT:
return 'NaT'
return self.na_rep
elif isinstance(x, PandasObject):
return u'{x}'.format(x=x)
else:
# object dtype
return u'{x}'.format(x=formatter(x))
vals = self.values
if isinstance(vals, Index):
vals = vals._values
elif isinstance(vals, ABCSparseArray):
vals = vals.values
is_float_type = lib.map_infer(vals, is_float) & notna(vals)
leading_space = is_float_type.any()
fmt_values = []
for i, v in enumerate(vals):
if not is_float_type[i] and leading_space:
fmt_values.append(u' {v}'.format(v=_format(v)))
elif is_float_type[i]:
fmt_values.append(float_format(v))
else:
fmt_values.append(u' {v}'.format(v=_format(v)))
return fmt_values
class FloatArrayFormatter(GenericArrayFormatter):
"""
"""
def __init__(self, *args, **kwargs):
GenericArrayFormatter.__init__(self, *args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(self, float_format=None, threshold=None):
"""Returns a function to be applied on each value to format it
"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
return float_format(value=v) if notna(v) else self.na_rep
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != '.':
def decimal_formatter(v):
return base_formatter(v).replace('.', self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self):
"""
Returns the float values converted into strings using
the parameters given at initalisation, as a numpy array
"""
if self.formatter is not None:
return np.array([self.formatter(x) for x in self.values])
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# separate the wheat from the chaff
values = self.values
mask = isna(values)
if hasattr(values, 'to_dense'): # sparse numpy ndarray
values = values.to_dense()
values = np.array(values, dtype='object')
values[mask] = self.na_rep
imask = (~mask).ravel()
values.flat[imask] = np.array([formatter(val)
for val in values.ravel()[imask]])
if self.fixed_width:
return _trim_zeros(values, self.na_rep)
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
if self.float_format is None:
if self.fixed_width:
float_format = partial('{value: .{digits:d}f}'.format,
digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
with np.errstate(invalid='ignore'):
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10**(-self.digits)) &
(abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
float_format = partial('{value: .{digits:d}e}'.format,
digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self):
# shortcut
if self.formatter is not None:
return [self.formatter(x) for x in self.values]
return list(self.get_result_as_array())
class IntArrayFormatter(GenericArrayFormatter):
def _format_strings(self):
formatter = self.formatter or (lambda x: '{x: d}'.format(x=x))
fmt_values = [formatter(x) for x in self.values]
return fmt_values
class Datetime64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', date_format=None, **kwargs):
super(Datetime64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self):
""" we by definition have DO NOT have a TZ """
values = self.values
if not isinstance(values, DatetimeIndex):
values = DatetimeIndex(values)
if self.formatter is not None and callable(self.formatter):
return [self.formatter(x) for x in values]
fmt_values = format_array_from_datetime(
values.asi8.ravel(),
format=_get_format_datetime64_from_values(values,
self.date_format),
na_rep=self.nat_rep).reshape(values.shape)
return fmt_values.tolist()
class IntervalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
formatter = self.formatter or str
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
class PeriodArrayFormatter(IntArrayFormatter):
def _format_strings(self):
from pandas.core.indexes.period import IncompatibleFrequency
try:
values = PeriodIndex(self.values).to_native_types()
except IncompatibleFrequency:
# periods may contains different freq
values = Index(self.values, dtype='object').to_native_types()
formatter = self.formatter or (lambda x: '{x}'.format(x=x))
fmt_values = [formatter(x) for x in values]
return fmt_values
class CategoricalArrayFormatter(GenericArrayFormatter):
def __init__(self, values, *args, **kwargs):
GenericArrayFormatter.__init__(self, values, *args, **kwargs)
def _format_strings(self):
fmt_values = format_array(self.values.get_values(), self.formatter,
float_format=self.float_format,
na_rep=self.na_rep, digits=self.digits,
space=self.space, justify=self.justify)
return fmt_values
def format_percentiles(percentiles):
"""
Outputs rounded and formatted percentiles.
Parameters
----------
percentiles : list-like, containing floats from interval [0,1]
Returns
-------
formatted : list of strings
Notes
-----
Rounding precision is chosen so that: (1) if any two elements of
``percentiles`` differ, they remain different after rounding
(2) no entry is *rounded* to 0% or 100%.
Any non-integer is always rounded to at least 1 decimal place.
Examples
--------
Keeps all entries different after rounding:
>>> format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
['1.999%', '2.001%', '50%', '66.667%', '99.99%']
No element is rounded to 0% or 100% (unless already equal to it).
Duplicates are allowed:
>>> format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
['0%', '50%', '2.0%', '50%', '66.67%', '99.99%']
"""
percentiles = np.asarray(percentiles)
# It checks for np.NaN as well
with np.errstate(invalid='ignore'):
if not is_numeric_dtype(percentiles) or not np.all(percentiles >= 0) \
or not np.all(percentiles <= 1):
raise ValueError("percentiles should all be in the interval [0,1]")
percentiles = 100 * percentiles
int_idx = (percentiles.astype(int) == percentiles)
if np.all(int_idx):
out = percentiles.astype(int).astype(str)
return [i + '%' for i in out]
unique_pcts = np.unique(percentiles)
to_begin = unique_pcts[0] if unique_pcts[0] > 0 else None
to_end = 100 - unique_pcts[-1] if unique_pcts[-1] < 100 else None
# Least precision that keeps percentiles unique after rounding
prec = -np.floor(np.log10(np.min(
np.ediff1d(unique_pcts, to_begin=to_begin, to_end=to_end)
))).astype(int)
prec = max(1, prec)
out = np.empty_like(percentiles, dtype=object)
out[int_idx] = percentiles[int_idx].astype(int).astype(str)
out[~int_idx] = percentiles[~int_idx].round(prec).astype(str)
return [i + '%' for i in out]
def _is_dates_only(values):
# return a boolean if we are only dates (and don't have a timezone)
values = DatetimeIndex(values)
if values.tz is not None:
return False
values_int = values.asi8
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
if even_days:
return True
return False
def _format_datetime64(x, tz=None, nat_rep='NaT'):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if tz is not None or not isinstance(x, Timestamp):
x = Timestamp(x, tz=tz)
return str(x)
def _format_datetime64_dateonly(x, nat_rep='NaT', date_format=None):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timestamp):
x = Timestamp(x)
if date_format:
return x.strftime(date_format)
else:
return x._date_repr
def _get_format_datetime64(is_dates_only, nat_rep='NaT', date_format=None):
if is_dates_only:
return lambda x, tz=None: _format_datetime64_dateonly(
x, nat_rep=nat_rep, date_format=date_format)
else:
return lambda x, tz=None: _format_datetime64(x, tz=tz, nat_rep=nat_rep)
def _get_format_datetime64_from_values(values, date_format):
""" given values and a date_format, return a string format """
is_dates_only = _is_dates_only(values)
if is_dates_only:
return date_format or "%Y-%m-%d"
return date_format
class Datetime64TZFormatter(Datetime64Formatter):
def _format_strings(self):
""" we by definition have a TZ """
values = self.values.astype(object)
is_dates_only = _is_dates_only(values)
formatter = (self.formatter or
_get_format_datetime64(is_dates_only,
date_format=self.date_format))
fmt_values = [formatter(x) for x in values]
return fmt_values
class Timedelta64Formatter(GenericArrayFormatter):
def __init__(self, values, nat_rep='NaT', box=False, **kwargs):
super(Timedelta64Formatter, self).__init__(values, **kwargs)
self.nat_rep = nat_rep
self.box = box
def _format_strings(self):
formatter = (self.formatter or
_get_format_timedelta64(self.values, nat_rep=self.nat_rep,
box=self.box))
fmt_values = np.array([formatter(x) for x in self.values])
return fmt_values
def _get_format_timedelta64(values, nat_rep='NaT', box=False):
"""
Return a formatter function for a range of timedeltas.
These will all have the same format argument
If box, then show the return in quotes
"""
values_int = values.astype(np.int64)
consider_values = values_int != iNaT
one_day_nanos = (86400 * 1e9)
even_days = np.logical_and(consider_values,
values_int % one_day_nanos != 0).sum() == 0
all_sub_day = np.logical_and(
consider_values, np.abs(values_int) >= one_day_nanos).sum() == 0
if even_days:
format = None
elif all_sub_day:
format = 'sub_day'
else:
format = 'long'
def _formatter(x):
if x is None or (is_scalar(x) and isna(x)):
return nat_rep
if not isinstance(x, Timedelta):
x = Timedelta(x)
result = x._repr_base(format=format)
if box:
result = "'{res}'".format(res=result)
return result
return _formatter
def _make_fixed_width(strings, justify='right', minimum=None, adj=None):
if len(strings) == 0 or justify == 'all':
return strings
if adj is None:
adj = _get_adjustment()
max_len = max(adj.len(x) for x in strings)
if minimum is not None:
max_len = max(minimum, max_len)
conf_max = get_option("display.max_colwidth")
if conf_max is not None and max_len > conf_max:
max_len = conf_max
def just(x):
if conf_max is not None:
if (conf_max > 3) & (adj.len(x) > max_len):
x = x[:max_len - 3] + '...'
return x
strings = [just(x) for x in strings]
result = adj.justify(strings, max_len, mode=justify)
return result
def _trim_zeros(str_floats, na_rep='NaN'):
"""
Trims zeros, leaving just one before the decimal points if need be.
"""
trimmed = str_floats
def _cond(values):
non_na = [x for x in values if x != na_rep]
return (len(non_na) > 0 and all(x.endswith('0') for x in non_na) and
not (any(('e' in x) or ('E' in x) for x in non_na)))
while _cond(trimmed):
trimmed = [x[:-1] if x != na_rep else x for x in trimmed]
# leave one 0 after the decimal points if need be.
return [x + "0" if x.endswith('.') and x != na_rep else x for x in trimmed]
def single_column_table(column, align=None, style=None):
table = '<table'
if align is not None:
table += (' align="{align}"'.format(align=align))
if style is not None:
table += (' style="{style}"'.format(style=style))
table += '><tbody>'
for i in column:
table += ('<tr><td>{i!s}</td></tr>'.format(i=i))
table += '</tbody></table>'
return table
def single_row_table(row): # pragma: no cover
table = '<table><tbody><tr>'
for i in row:
table += ('<td>{i!s}</td>'.format(i=i))
table += '</tr></tbody></table>'
return table
def _has_names(index):
if isinstance(index, MultiIndex):
return _any_not_none(*index.names)
else:
return index.name is not None
class EngFormatter(object):
"""
Formats float values according to engineering format.
Based on matplotlib.ticker.EngFormatter
"""
# The SI engineering prefixes
ENG_PREFIXES = {
-24: "y",
-21: "z",
-18: "a",
-15: "f",
-12: "p",
-9: "n",
-6: "u",
-3: "m",
0: "",
3: "k",
6: "M",
9: "G",
12: "T",
15: "P",
18: "E",
21: "Z",
24: "Y"
}
def __init__(self, accuracy=None, use_eng_prefix=False):
self.accuracy = accuracy
self.use_eng_prefix = use_eng_prefix
def __call__(self, num):
""" Formats a number in engineering notation, appending a letter
representing the power of 1000 of the original number. Some examples:
>>> format_eng(0) # for self.accuracy = 0
' 0'
>>> format_eng(1000000) # for self.accuracy = 1,
# self.use_eng_prefix = True
' 1.0M'
>>> format_eng("-1e-6") # for self.accuracy = 2
# self.use_eng_prefix = False
'-1.00E-06'
@param num: the value to represent
@type num: either a numeric value or a string that can be converted to
a numeric value (as per decimal.Decimal constructor)
@return: engineering formatted string
"""
import decimal
import math
dnum = decimal.Decimal(str(num))
if decimal.Decimal.is_nan(dnum):
return 'NaN'
if decimal.Decimal.is_infinite(dnum):
return 'inf'
sign = 1
if dnum < 0: # pragma: no cover
sign = -1
dnum = -dnum
if dnum != 0:
pow10 = decimal.Decimal(int(math.floor(dnum.log10() / 3) * 3))
else:
pow10 = decimal.Decimal(0)
pow10 = pow10.min(max(self.ENG_PREFIXES.keys()))
pow10 = pow10.max(min(self.ENG_PREFIXES.keys()))
int_pow10 = int(pow10)
if self.use_eng_prefix:
prefix = self.ENG_PREFIXES[int_pow10]
else:
if int_pow10 < 0:
prefix = 'E-{pow10:02d}'.format(pow10=-int_pow10)
else:
prefix = 'E+{pow10:02d}'.format(pow10=int_pow10)
mant = sign * dnum / (10**pow10)
if self.accuracy is None: # pragma: no cover
format_str = u("{mant: g}{prefix}")
else:
format_str = (u("{{mant: .{acc:d}f}}{{prefix}}")
.format(acc=self.accuracy))
formatted = format_str.format(mant=mant, prefix=prefix)
return formatted # .strip()
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
"""
Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter.
"""
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9))
def _put_lines(buf, lines):
if any(isinstance(x, compat.text_type) for x in lines):
lines = [compat.text_type(x) for x in lines]
buf.write('\n'.join(lines))
def _binify(cols, line_width):
adjoin_width = 1
bins = []
curr_width = 0
i_last_column = len(cols) - 1
for i, w in enumerate(cols):
w_adjoined = w + adjoin_width
curr_width += w_adjoined
if i_last_column == i:
wrap = curr_width + 1 > line_width and i > 0
else:
wrap = curr_width + 2 > line_width and i > 0
if wrap:
bins.append(i)
curr_width = w_adjoined
bins.append(len(cols))
return bins
| bsd-3-clause |
yiakwy/PyMatrix3 | setup.py | 1 | 1891 | """
PyMatrix is for easier multi-Dimension matrix manipulation iterface.
It provide basic Matrix utilities and vector based operator for easy access and compute elements.
PyMatrix will act as glue between pure mathmatical interface and fast numpy computation core. You deem vector as row or col vector.
With this interface your life will be easier. This is originally designed in 2014 when I am not satisfied with numpy, pandas and so on.
Use them altogether, you will find more about the package.
"""
import os
import sys
if sys.version_info[:2] < (3,4):
raise Exception("Python version >= 3.4 required, we might consider support older version in the future")
CLASSIFIERS = """
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
#from distutils.core import setup
from setuptools import setup
meta = dict(name='matrix_array',
version='%s.%s.%s'%(0,1,0),# Major, Minor, Maintenance
description='N-Dimension Matrix Object Container for ubiquitous purposes',
long_description=__doc__,
download_url="https://github.com/yiakwy/PyMatrix3.git",
license="MIT",
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
author='Wang Lei',
author_email="[email protected]",
url="www.yiak.co",
packages=['matrix_array', 'utils'],
)
def setup_package():
setup(**meta)
if __name__ == "__main__":
setup_package()
| mit |
kdebrab/pandas | pandas/tests/indexes/datetimes/test_astype.py | 3 | 13529 | import pytest
import pytz
import dateutil
import numpy as np
from datetime import datetime
from dateutil.tz import tzlocal
import pandas as pd
import pandas.util.testing as tm
from pandas import (DatetimeIndex, date_range, Series, NaT, Index, Timestamp,
Int64Index, Period)
class TestDatetimeIndex(object):
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
# GH 18951: tz-aware to tz-aware
idx = date_range('20170101', periods=4, tz='US/Pacific')
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101 03:00:00', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
# GH 18951: tz-naive to tz-aware
idx = date_range('20170101', periods=4)
result = idx.astype('datetime64[ns, US/Eastern]')
expected = date_range('20170101', periods=4, tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returning NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_object(self):
rng = date_range('1/1/2000', periods=20)
casted = rng.astype('O')
exp_values = list(rng)
tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))
assert casted.tolist() == exp_values
@pytest.mark.parametrize('tz', [None, 'Asia/Tokyo'])
def test_astype_object_tz(self, tz):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz=tz)
expected_list = [Timestamp('2013-01-31', tz=tz),
Timestamp('2013-02-28', tz=tz),
Timestamp('2013-03-31', tz=tz),
Timestamp('2013-04-30', tz=tz)]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [Timestamp('2013-01-01'),
Timestamp('2013-01-02'), pd.NaT,
Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.astype(object)
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
@pytest.mark.parametrize('dtype', [
float, 'timedelta64', 'timedelta64[ns]', 'datetime64',
'datetime64[D]'])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
msg = 'Cannot cast DatetimeIndex to dtype'
with tm.assert_raises_regex(TypeError, msg):
idx.astype(dtype)
def test_index_convert_to_datetime_array(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519', tz='US/Eastern')
rng_utc = date_range('20090415', '20090519', tz='utc')
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_convert_to_datetime_array_explicit_pytz(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519',
tz=pytz.timezone('US/Eastern'))
rng_utc = date_range('20090415', '20090519', tz=pytz.utc)
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
def test_index_convert_to_datetime_array_dateutil(self):
def _check_rng(rng):
converted = rng.to_pydatetime()
assert isinstance(converted, np.ndarray)
for x, stamp in zip(converted, rng):
assert isinstance(x, datetime)
assert x == stamp.to_pydatetime()
assert x.tzinfo == stamp.tzinfo
rng = date_range('20090415', '20090519')
rng_eastern = date_range('20090415', '20090519',
tz='dateutil/US/Eastern')
rng_utc = date_range('20090415', '20090519', tz=dateutil.tz.tzutc())
_check_rng(rng)
_check_rng(rng_eastern)
_check_rng(rng_utc)
@pytest.mark.parametrize('tz, dtype', [
['US/Pacific', 'datetime64[ns, US/Pacific]'],
[None, 'datetime64[ns]']])
def test_integer_index_astype_datetime(self, tz, dtype):
# GH 20997, 20964
val = [pd.Timestamp('2018-01-01', tz=tz).value]
result = pd.Index(val).astype(dtype)
expected = pd.DatetimeIndex(['2018-01-01'], tz=tz)
tm.assert_index_equal(result, expected)
class TestToPeriod(object):
def setup_method(self, method):
data = [Timestamp('2007-01-01 10:11:12.123456Z'),
Timestamp('2007-01-01 10:11:13.789123Z')]
self.index = DatetimeIndex(data)
def test_to_period_millisecond(self):
index = self.index
period = index.to_period(freq='L')
assert 2 == len(period)
assert period[0] == Period('2007-01-01 10:11:12.123Z', 'L')
assert period[1] == Period('2007-01-01 10:11:13.789Z', 'L')
def test_to_period_microsecond(self):
index = self.index
period = index.to_period(freq='U')
assert 2 == len(period)
assert period[0] == Period('2007-01-01 10:11:12.123456Z', 'U')
assert period[1] == Period('2007-01-01 10:11:13.789123Z', 'U')
def test_to_period_tz_pytz(self):
from pytz import utc as UTC
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=UTC)
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_to_period_tz_explicit_pytz(self):
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz=pytz.timezone('US/Eastern'))
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=pytz.utc)
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_to_period_tz_dateutil(self):
xp = date_range('1/1/2000', '4/1/2000').to_period()
ts = date_range('1/1/2000', '4/1/2000', tz='dateutil/US/Eastern')
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=dateutil.tz.tzutc())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
ts = date_range('1/1/2000', '4/1/2000', tz=tzlocal())
result = ts.to_period()[0]
expected = ts[0].to_period()
assert result == expected
tm.assert_index_equal(ts.to_period(), xp)
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
pytest.raises(ValueError, idx.to_period)
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'],
freq='infer')
assert idx.freqstr == 'D'
expected = pd.PeriodIndex(['2000-01-01', '2000-01-02',
'2000-01-03'], freq='D')
tm.assert_index_equal(idx.to_period(), expected)
# GH 7606
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-03'])
assert idx.freqstr is None
tm.assert_index_equal(idx.to_period(), expected)
| bsd-3-clause |
victorbergelin/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
sanjanalab/GUIDES | static/data/gtex/gtex_mouse/generate_exons.py | 2 | 2101 | ### Generate exon start and stop sites
### Mouse
import pandas as pd
import pickle
import time
start_time = time.time()
# Create ENSG -> cdsStart, cdsStop mapping
# this is then used inside exon_info generator.
# Find all the CCDS associated with some ENSG.
ccds_ensg_map = {}
with open('ENSMUSG-CCDS.txt', 'r') as ensg_ccds:
for line in ensg_ccds:
comps = line.strip('\n').split('\t')
ensg = comps[0]
ccds = comps[1] + '.'
if len(ccds) > 1:
if ensg not in ccds_ensg_map:
ccds_ensg_map[ensg] = [ccds]
else:
ccds_ensg_map[ensg].append(ccds)
# CCDS -> range mapping (str |-> list)
with open('CCDS_coords.csv', 'r') as ccds_coords_file:
df = pd.read_csv(ccds_coords_file, sep="\t", header=0)
for i, row in df.iterrows():
if i == 10: break
starts_list = [int(num) for num in row['exonStarts'].split(',')[:-1]]
ends_list = [int(num) for num in row['exonEnds'].split(',')[:-1]]
# Expand to include intronic sequences (5 each side)
for k in range(len(starts_list)):
starts_list[k] -= 5
for k in range(len(ends_list)):
ends_list[k] += 5
# recombine into string
starts_list_str = [(','.join([str(n) for n in x]) + ',') for x in starts_list]
ends_list_str = [(','.join([str(n) for n in x]) + ',') for x in ends_list]
# reassign
df.set_value(i, 'exonStarts', starts_list_str)
df.set_value(i, 'exonEnds', ends_list_str)
# if we have ccds info...
if row['name'] in ccds_ensg_map:
df.set_value(i, 'name', ccds_ensg_map[row['name']])
else:
print "could not find in map", row['name']
# write exon_info
exon_info = df[["name", "chrom", "strand", "exonCount", "exonStarts", "exonEnds"]]
with open("results/exon_info_mus.p", "wb") as f:
pickle.dump(exon_info, f)
# write new refGene
df.to_csv('results/refGene_mus.txt', sep="\t", index=False, header=False)
end_time = time.time()
hours, rem = divmod(end_time-start_time, 3600)
minutes, seconds = divmod(rem, 60)
print "time elapsed"
print("{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
| bsd-3-clause |
idbedead/RNA-sequence-tools | rsem_to_matrix.py | 2 | 2152 | import os
import fnmatch
import pandas as pd
paths = ['/Volumes/Drobo/Seq_data/rsem_pdgfra_and_pteng']
gene_dict_tpm = {}
gene_dict_fpkm = {}
iso_dict_tpm = {}
iso_dict_fpkm = {}
sample_list = []
sample_folder = []
for path_to_file in paths:
for root, dirnames, filenames in os.walk(path_to_file):
for f in filenames:
if '.genes.results' in f:
name = os.path.basename(root)
print(name)
gene_df = pd.read_csv(os.path.join(root,f), sep=None, engine='python')
index_gene = gene_df['gene_id']
gene_dict_tpm[name] = gene_df['TPM']
gene_dict_fpkm[name] = gene_df['FPKM']
sample_list.append(os.path.basename(root))
sample_folder.append(root)
if '.isoforms.results' in f:
name = os.path.basename(root)
iso_df = pd.read_csv(os.path.join(root,f), sep=None, engine='python')
index_iso = iso_df['transcript_id']
iso_dict_tpm[name] = iso_df['TPM']
iso_dict_fpkm[name] = iso_df['FPKM']
sample_df = pd.DataFrame.from_dict({'run':sample_list, 'folder':sample_folder})
gene_tpm_df = pd.DataFrame.from_dict(gene_dict_tpm)
gene_fpkm_df = pd.DataFrame.from_dict(gene_dict_fpkm)
iso_tpm_df = pd.DataFrame.from_dict(iso_dict_tpm)
iso_fpkm_df = pd.DataFrame.from_dict(iso_dict_fpkm)
gene_tpm_df.set_index(index_gene, inplace=True)
gene_fpkm_df.set_index(index_gene, inplace=True)
iso_tpm_df.set_index(index_iso, inplace=True)
iso_fpkm_df.set_index(index_iso, inplace=True)
sample_df.to_csv(os.path.join(path_to_file,os.path.basename(path_to_file)+'_samples.txt'), sep='\t')
gene_tpm_df.to_csv(os.path.join(path_to_file,os.path.basename(path_to_file)+'_gene_tpm_matrix.txt'), sep='\t')
gene_fpkm_df.to_csv(os.path.join(path_to_file,os.path.basename(path_to_file)+'_gene_fpkm_matrix.txt'), sep='\t')
iso_tpm_df.to_csv(os.path.join(path_to_file,os.path.basename(path_to_file)+'_isoform_tpm_matrix.txt'), sep='\t')
iso_fpkm_df.to_csv(os.path.join(path_to_file,os.path.basename(path_to_file)+'_isoform_fpkm_matrix.txt'), sep='\t')
| mit |
igormarfin/trading-with-python | lib/interactivebrokers.py | 77 | 18140 | """
Copyright: Jev Kuznetsov
Licence: BSD
Interface to interactive brokers together with gui widgets
"""
import sys
# import os
from time import sleep
from PyQt4.QtCore import (SIGNAL, SLOT)
from PyQt4.QtGui import (QApplication, QFileDialog, QDialog, QVBoxLayout, QHBoxLayout, QDialogButtonBox,
QTableView, QPushButton, QWidget, QLabel, QLineEdit, QGridLayout, QHeaderView)
import ib
from ib.ext.Contract import Contract
from ib.opt import ibConnection, message
from ib.ext.Order import Order
import logger as logger
from qtpandas import DataFrameModel, TableView
from eventSystem import Sender
import numpy as np
import pandas
from pandas import DataFrame, Index
from datetime import datetime
import os
import datetime as dt
import time
priceTicks = {1: 'bid', 2: 'ask', 4: 'last', 6: 'high', 7: 'low', 9: 'close', 14: 'open'}
timeFormat = "%Y%m%d %H:%M:%S"
dateFormat = "%Y%m%d"
def createContract(symbol, secType='STK', exchange='SMART', currency='USD'):
""" contract factory function """
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = secType
contract.m_exchange = exchange
contract.m_currency = currency
return contract
def _str2datetime(s):
""" convert string to datetime """
return datetime.strptime(s, '%Y%m%d')
def readActivityFlex(fName):
"""
parse trade log in a csv file produced by IB 'Activity Flex Query'
the file should contain these columns:
['Symbol','TradeDate','Quantity','TradePrice','IBCommission']
Returns:
A DataFrame with parsed trade data
"""
import csv
rows = []
with open(fName, 'rb') as f:
reader = csv.reader(f)
for row in reader:
rows.append(row)
header = ['TradeDate', 'Symbol', 'Quantity', 'TradePrice', 'IBCommission']
types = dict(zip(header, [_str2datetime, str, int, float, float]))
idx = dict(zip(header, [rows[0].index(h) for h in header]))
data = dict(zip(header, [[] for h in header]))
for row in rows[1:]:
print row
for col in header:
val = types[col](row[idx[col]])
data[col].append(val)
return DataFrame(data)[header].sort(column='TradeDate')
class Subscriptions(DataFrameModel, Sender):
""" a data table containing price & subscription data """
def __init__(self, tws=None):
super(Subscriptions, self).__init__()
self.df = DataFrame() # this property holds the data in a table format
self._nextId = 1
self._id2symbol = {} # id-> symbol lookup dict
self._header = ['id', 'position', 'bid', 'ask', 'last'] # columns of the _data table
# register callbacks
if tws is not None:
tws.register(self.priceHandler, message.TickPrice)
tws.register(self.accountHandler, message.UpdatePortfolio)
def add(self, symbol, subId=None):
"""
Add a subscription to data table
return : subscription id
"""
if subId is None:
subId = self._nextId
data = dict(zip(self._header, [subId, 0, np.nan, np.nan, np.nan]))
row = DataFrame(data, index=Index([symbol]))
self.df = self.df.append(row[self._header]) # append data and set correct column order
self._nextId = subId + 1
self._rebuildIndex()
self.emit(SIGNAL("layoutChanged()"))
return subId
def priceHandler(self, msg):
""" handler function for price updates. register this with ibConnection class """
if priceTicks[msg.field] not in self._header: # do nothing for ticks that are not in _data table
return
self.df[priceTicks[msg.field]][self._id2symbol[msg.tickerId]] = msg.price
#notify viewer
col = self._header.index(priceTicks[msg.field])
row = self.df.index.tolist().index(self._id2symbol[msg.tickerId])
idx = self.createIndex(row, col)
self.emit(SIGNAL("dataChanged(QModelIndex,QModelIndex)"), idx, idx)
def accountHandler(self, msg):
if msg.contract.m_symbol in self.df.index.tolist():
self.df['position'][msg.contract.m_symbol] = msg.position
def _rebuildIndex(self):
""" udate lookup dictionary id-> symbol """
symbols = self.df.index.tolist()
ids = self.df['id'].values.tolist()
self._id2symbol = dict(zip(ids, symbols))
def __repr__(self):
return str(self.df)
class Broker(object):
"""
Broker class acts as a wrapper around ibConnection
from ibPy. It tracks current subscriptions and provides
data models to viewiers .
"""
def __init__(self, name='broker'):
""" initialize broker class
"""
self.name = name
self.log = logger.getLogger(self.name)
self.log.debug('Initializing broker. Pandas version={0}'.format(pandas.__version__))
self.contracts = {} # a dict to keep track of subscribed contracts
self.tws = ibConnection() # tws interface
self.nextValidOrderId = None
self.dataModel = Subscriptions(self.tws) # data container
self.tws.registerAll(self.defaultHandler)
#self.tws.register(self.debugHandler,message.TickPrice)
self.tws.register(self.nextValidIdHandler, 'NextValidId')
self.log.debug('Connecting to tws')
self.tws.connect()
self.tws.reqAccountUpdates(True, '')
def subscribeStk(self, symbol, secType='STK', exchange='SMART', currency='USD'):
""" subscribe to stock data """
self.log.debug('Subscribing to ' + symbol)
# if symbol in self.data.symbols:
# print 'Already subscribed to {0}'.format(symbol)
# return
c = Contract()
c.m_symbol = symbol
c.m_secType = secType
c.m_exchange = exchange
c.m_currency = currency
subId = self.dataModel.add(symbol)
self.tws.reqMktData(subId, c, '', False)
self.contracts[symbol] = c
return subId
@property
def data(self):
return self.dataModel.df
def placeOrder(self, symbol, shares, limit=None, exchange='SMART', transmit=0):
""" place an order on already subscribed contract """
if symbol not in self.contracts.keys():
self.log.error("Can't place order, not subscribed to %s" % symbol)
return
action = {-1: 'SELL', 1: 'BUY'}
o = Order()
o.m_orderId = self.getOrderId()
o.m_action = action[cmp(shares, 0)]
o.m_totalQuantity = abs(shares)
o.m_transmit = transmit
if limit is not None:
o.m_orderType = 'LMT'
o.m_lmtPrice = limit
self.log.debug('Placing %s order for %i %s (id=%i)' % (o.m_action, o.m_totalQuantity, symbol, o.m_orderId))
self.tws.placeOrder(o.m_orderId, self.contracts[symbol], o)
def getOrderId(self):
self.nextValidOrderId += 1
return self.nextValidOrderId - 1
def unsubscribeStk(self, symbol):
self.log.debug('Function not implemented')
def disconnect(self):
self.tws.disconnect()
def __del__(self):
"""destructor, clean up """
print 'Broker is cleaning up after itself.'
self.tws.disconnect()
def debugHandler(self, msg):
print msg
def defaultHandler(self, msg):
""" default message handler """
#print msg.typeName
if msg.typeName == 'Error':
self.log.error(msg)
def nextValidIdHandler(self, msg):
self.nextValidOrderId = msg.orderId
self.log.debug('Next valid order id:{0}'.format(self.nextValidOrderId))
def saveData(self, fname):
""" save current dataframe to csv """
self.log.debug("Saving data to {0}".format(fname))
self.dataModel.df.to_csv(fname)
# def __getattr__(self, name):
# """ x.__getattr__('name') <==> x.name
# an easy way to call ibConnection methods
# @return named attribute from instance tws
# """
# return getattr(self.tws, name)
class _HistDataHandler(object):
""" handles incoming messages """
def __init__(self, tws):
self._log = logger.getLogger('DH')
tws.register(self.msgHandler, message.HistoricalData)
self.reset()
def reset(self):
self._log.debug('Resetting data')
self.dataReady = False
self._timestamp = []
self._data = {'open': [], 'high': [], 'low': [], 'close': [], 'volume': [], 'count': [], 'WAP': []}
def msgHandler(self, msg):
#print '[msg]', msg
if msg.date[:8] == 'finished':
self._log.debug('Data recieved')
self.dataReady = True
return
if len(msg.date) > 8:
self._timestamp.append(dt.datetime.strptime(msg.date, timeFormat))
else:
self._timestamp.append(dt.datetime.strptime(msg.date, dateFormat))
for k in self._data.keys():
self._data[k].append(getattr(msg, k))
@property
def data(self):
""" return downloaded data as a DataFrame """
df = DataFrame(data=self._data, index=Index(self._timestamp))
return df
class Downloader(object):
def __init__(self, debug=False):
self._log = logger.getLogger('DLD')
self._log.debug(
'Initializing data dwonloader. Pandas version={0}, ibpy version:{1}'.format(pandas.__version__, ib.version))
self.tws = ibConnection()
self._dataHandler = _HistDataHandler(self.tws)
if debug:
self.tws.registerAll(self._debugHandler)
self.tws.unregister(self._debugHandler, message.HistoricalData)
self._log.debug('Connecting to tws')
self.tws.connect()
self._timeKeeper = TimeKeeper() # keep track of past requests
self._reqId = 1 # current request id
def _debugHandler(self, msg):
print '[debug]', msg
def requestData(self, contract, endDateTime, durationStr='1 D', barSizeSetting='30 secs', whatToShow='TRADES',
useRTH=1, formatDate=1):
self._log.debug('Requesting data for %s end time %s.' % (contract.m_symbol, endDateTime))
while self._timeKeeper.nrRequests(timeSpan=600) > 59:
print 'Too many requests done. Waiting... '
time.sleep(10)
self._timeKeeper.addRequest()
self._dataHandler.reset()
self.tws.reqHistoricalData(self._reqId, contract, endDateTime, durationStr, barSizeSetting, whatToShow, useRTH,
formatDate)
self._reqId += 1
#wait for data
startTime = time.time()
timeout = 3
while not self._dataHandler.dataReady and (time.time() - startTime < timeout):
sleep(2)
if not self._dataHandler.dataReady:
self._log.error('Data timeout')
print self._dataHandler.data
return self._dataHandler.data
def getIntradayData(self, contract, dateTuple):
""" get full day data on 1-s interval
date: a tuple of (yyyy,mm,dd)
"""
openTime = dt.datetime(*dateTuple) + dt.timedelta(hours=16)
closeTime = dt.datetime(*dateTuple) + dt.timedelta(hours=22)
timeRange = pandas.date_range(openTime, closeTime, freq='30min')
datasets = []
for t in timeRange:
datasets.append(self.requestData(contract, t.strftime(timeFormat)))
return pandas.concat(datasets)
def disconnect(self):
self.tws.disconnect()
class TimeKeeper(object):
def __init__(self):
self._log = logger.getLogger('TK')
dataDir = os.path.expanduser('~') + '/twpData'
if not os.path.exists(dataDir):
os.mkdir(dataDir)
self._timeFormat = "%Y%m%d %H:%M:%S"
self.dataFile = os.path.normpath(os.path.join(dataDir, 'requests.txt'))
self._log.debug('Data file: {0}'.format(self.dataFile))
def addRequest(self):
""" adds a timestamp of current request"""
with open(self.dataFile, 'a') as f:
f.write(dt.datetime.now().strftime(self._timeFormat) + '\n')
def nrRequests(self, timeSpan=600):
""" return number of requests in past timespan (s) """
delta = dt.timedelta(seconds=timeSpan)
now = dt.datetime.now()
requests = 0
with open(self.dataFile, 'r') as f:
lines = f.readlines()
for line in lines:
if now - dt.datetime.strptime(line.strip(), self._timeFormat) < delta:
requests += 1
if requests == 0: # erase all contents if no requests are relevant
open(self.dataFile, 'w').close()
self._log.debug('past requests: {0}'.format(requests))
return requests
#---------------test functions-----------------
def dummyHandler(msg):
print msg
def testConnection():
""" a simple test to check working of streaming prices etc """
tws = ibConnection()
tws.registerAll(dummyHandler)
tws.connect()
c = createContract('SPY')
tws.reqMktData(1, c, '', False)
sleep(3)
print 'testConnection done.'
def testSubscriptions():
s = Subscriptions()
s.add('SPY')
#s.add('XLE')
print s
def testBroker():
b = Broker()
sleep(2)
b.subscribeStk('SPY')
b.subscribeStk('XLE')
b.subscribeStk('GOOG')
b.placeOrder('ABC', 125, 55.1)
sleep(3)
return b
#---------------------GUI stuff--------------------------------------------
class AddSubscriptionDlg(QDialog):
def __init__(self, parent=None):
super(AddSubscriptionDlg, self).__init__(parent)
symbolLabel = QLabel('Symbol')
self.symbolEdit = QLineEdit()
secTypeLabel = QLabel('secType')
self.secTypeEdit = QLineEdit('STK')
exchangeLabel = QLabel('exchange')
self.exchangeEdit = QLineEdit('SMART')
currencyLabel = QLabel('currency')
self.currencyEdit = QLineEdit('USD')
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok |
QDialogButtonBox.Cancel)
lay = QGridLayout()
lay.addWidget(symbolLabel, 0, 0)
lay.addWidget(self.symbolEdit, 0, 1)
lay.addWidget(secTypeLabel, 1, 0)
lay.addWidget(self.secTypeEdit, 1, 1)
lay.addWidget(exchangeLabel, 2, 0)
lay.addWidget(self.exchangeEdit, 2, 1)
lay.addWidget(currencyLabel, 3, 0)
lay.addWidget(self.currencyEdit, 3, 1)
lay.addWidget(buttonBox, 4, 0, 1, 2)
self.setLayout(lay)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.setWindowTitle("Add subscription")
class BrokerWidget(QWidget):
def __init__(self, broker, parent=None):
super(BrokerWidget, self).__init__(parent)
self.broker = broker
self.dataTable = TableView()
self.dataTable.setModel(self.broker.dataModel)
self.dataTable.horizontalHeader().setResizeMode(QHeaderView.Stretch)
#self.dataTable.resizeColumnsToContents()
dataLabel = QLabel('Price Data')
dataLabel.setBuddy(self.dataTable)
dataLayout = QVBoxLayout()
dataLayout.addWidget(dataLabel)
dataLayout.addWidget(self.dataTable)
addButton = QPushButton("&Add Symbol")
saveDataButton = QPushButton("&Save Data")
#deleteButton = QPushButton("&Delete")
buttonLayout = QVBoxLayout()
buttonLayout.addWidget(addButton)
buttonLayout.addWidget(saveDataButton)
buttonLayout.addStretch()
layout = QHBoxLayout()
layout.addLayout(dataLayout)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.connect(addButton, SIGNAL('clicked()'), self.addSubscription)
self.connect(saveDataButton, SIGNAL('clicked()'), self.saveData)
#self.connect(deleteButton,SIGNAL('clicked()'),self.deleteSubscription)
def addSubscription(self):
dialog = AddSubscriptionDlg(self)
if dialog.exec_():
self.broker.subscribeStk(str(dialog.symbolEdit.text()), str(dialog.secTypeEdit.text()),
str(dialog.exchangeEdit.text()), str(dialog.currencyEdit.text()))
def saveData(self):
""" save data to a .csv file """
fname = unicode(QFileDialog.getSaveFileName(self, caption="Save data to csv", filter='*.csv'))
if fname:
self.broker.saveData(fname)
# def deleteSubscription(self):
# pass
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.resize(640, 480)
self.setWindowTitle('Broker test')
self.broker = Broker()
self.broker.subscribeStk('SPY')
self.broker.subscribeStk('XLE')
self.broker.subscribeStk('GOOG')
brokerWidget = BrokerWidget(self.broker, self)
lay = QVBoxLayout()
lay.addWidget(brokerWidget)
self.setLayout(lay)
def startGui():
app = QApplication(sys.argv)
form = Form()
form.show()
app.exec_()
if __name__ == "__main__":
import ib
print 'iby version:', ib.version
#testConnection()
#testBroker()
#testSubscriptions()
print message.messageTypeNames()
startGui()
print 'All done'
| bsd-3-clause |
jeremykid/FunAlgorithm | python_practice/linearRegression.py | 1 | 2601 | # Reference https://glowingpython.blogspot.ca/2012/03/linear-regression-with-numpy.html
from numpy import arange,array,ones,linalg
from pylab import plot,show
xi = arange(0,9)
A = array([ xi, ones(9)])
# linearly generated sequence
y = [19, 20, 20.5, 21.5, 22, 23, 23, 25.5, 24]
w = linalg.lstsq(A.T,y)[0] # obtaining the parameters
# plotting the line
line = w[0]*xi+w[1] # regression line
plot(xi,line,'r-',xi,y,'o')
show()
from numpy import arange,array,ones#,random,linalg
from pylab import plot,show
from scipy import stats
xi = arange(0,9)
A = array([ xi, ones(9)])
# linearly generated sequence
y = [19, 20, 20.5, 21.5, 22, 23, 23, 25.5, 24]
slope, intercept, r_value, p_value, std_err = stats.linregress(xi,y)
print 'r value', r_value
print 'p_value', p_value
print 'standard deviation', std_err
line = slope*xi+intercept
plot(xi,line,'r-',xi,y,'o')
show()
#Tensflow
import tempfile
import urllib
train_file = tempfile.NamedTemporaryFile()
test_file = tempfile.NamedTemporaryFile()
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data", train_file.name)
urllib.urlretrieve("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test", test_file.name)
import pandas as pd
CSV_COLUMNS = [
"age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
df_train = pd.read_csv(train_file.name, names=CSV_COLUMNS, skipinitialspace=True)
df_test = pd.read_csv(test_file.name, names=CSV_COLUMNS, skipinitialspace=True, skiprows=1)
train_labels = (df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
test_labels = (df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
def input_fn(data_file, num_epochs, shuffle):
"""Input builder function."""
df_data = pd.read_csv(
tf.gfile.Open(data_file),
names=CSV_COLUMNS,
skipinitialspace=True,
engine="python",
skiprows=1)
# remove NaN elements
df_data = df_data.dropna(how="any", axis=0)
labels = df_data["income_bracket"].apply(lambda x: ">50K" in x).astype(int)
return tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=100,
num_epochs=num_epochs,
shuffle=shuffle,
num_threads=5)
gender = tf.feature_column.categorical_column_with_vocabulary_list(
"gender", ["Female", "Male"])
occupation = tf.feature_column.categorical_column_with_hash_bucket(
"occupation", hash_bucket_size=1000)
| mit |
huobaowangxi/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/backends/backend_ps.py | 4 | 62609 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import StringIO
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
import io
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import mkstemp
from matplotlib import verbose, __version__, rcParams, checkdep_ghostscript
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict, file_requires_unicode
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _path
from matplotlib.transforms import Affine2D
from matplotlib.backends.backend_mixed import MixedModeRenderer
import numpy as np
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
class PsBackendHelper(object):
def __init__(self):
self._cached = {}
@property
def gs_exe(self):
"""
excutable name of ghostscript.
"""
try:
return self._cached["gs_exe"]
except KeyError:
pass
gs_exe, gs_version = checkdep_ghostscript()
if gs_exe is None:
gs_exe = 'gs'
self._cached["gs_exe"] = gs_exe
return gs_exe
@property
def gs_version(self):
"""
version of ghostscript.
"""
try:
return self._cached["gs_version"]
except KeyError:
pass
from matplotlib.compat.subprocess import Popen, PIPE
s = Popen(self.gs_exe + " --version",
shell=True, stdout=PIPE)
pipe, stderr = s.communicate()
if six.PY3:
ver = pipe.decode('ascii')
else:
ver = pipe
try:
gs_version = tuple(map(int, ver.strip().split(".")))
except ValueError:
# if something went wrong parsing return null version number
gs_version = (0, 0)
self._cached["gs_version"] = gs_version
return gs_version
@property
def supports_ps2write(self):
"""
True if the installed ghostscript supports ps2write device.
"""
return self.gs_version[0] >= 9
ps_backend_helper = PsBackendHelper()
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = list(six.iterkeys(papersize))
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return np.alltrue(np.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self._hatches = {}
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
self._afm_font_dir = os.path.join(
rcParams['datapath'], 'fonts', 'afm')
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in six.iteritems(other):
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
linewidth = float(linewidth)
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname, fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def create_hatch(self, hatch):
sidelen = 72
if hatch in self._hatches:
return self._hatches[hatch]
name = 'H%d' % len(self._hatches)
self._pswriter.write("""\
<< /PatternType 1
/PaintType 2
/TilingType 2
/BBox[0 0 %(sidelen)d %(sidelen)d]
/XStep %(sidelen)d
/YStep %(sidelen)d
/PaintProc {
pop
0 setlinewidth
""" % locals())
self._pswriter.write(
self._convert_path(Path.hatch(hatch), Affine2D().scale(72.0),
simplify=False))
self._pswriter.write("""\
stroke
} bind
>>
matrix
makepattern
/%(name)s exch def
""" % locals())
self._hatches[hatch] = name
return name
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
w, h, d = texmanager.get_text_width_height_descent(s, fontsize,
renderer=self)
return w, h, d
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm', directory=self._afm_font_dir)
if fname is None:
fname = findfont(
"Helvetica", fontext='afm', directory=self._afm_font_dir)
font = self.afmfontd.get(fname)
if font is None:
with io.open(fname, 'rb') as fh:
font = AFM(fh)
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(fname)
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = np.fromstring(s, np.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[::-1,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = np.fromstring(rgbat[2], np.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba = rgba[::-1]
rgba_f = rgba.astype(np.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(np.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def option_scale_image(self):
"""
ps backend support arbitrary scaling of image.
"""
return True
def option_image_nocomposite(self):
"""
return whether to generate a composite image from multiple images on
a set of axes
"""
return not rcParams['image.composite_image']
def _get_image_h_w_bits_command(self, im):
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
return h, w, bits, imagecmd
def draw_image(self, gc, x, y, im, dx=None, dy=None, transform=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
dx, dy is the width and height of the image. If a transform
(which must be an affine transform) is given, x, y, dx, dy are
interpreted as the coordinate of the transform.
"""
h, w, bits, imagecmd = self._get_image_h_w_bits_command(im)
hexlines = b'\n'.join(self._hex_lines(bits)).decode('ascii')
if dx is None:
xscale = w / self.image_magnification
else:
xscale = dx
if dy is None:
yscale = h/self.image_magnification
else:
yscale = dy
if transform is None:
matrix = "1 0 0 1 0 0"
else:
matrix = " ".join(map(str, transform.to_values()))
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
bbox = gc.get_clip_rectangle()
clippath, clippath_trans = gc.get_clip_path()
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
[%(matrix)s] concat
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
def _convert_path(self, path, transform, clip=False, simplify=None):
ps = []
last_points = None
if clip:
clip = (0.0, 0.0, self.width * 72.0,
self.height * 72.0)
else:
clip = None
return _path.convert_to_string(
path, transform, clip, simplify, None,
6, [b'm', b'l', b'', b'c', b'cl'], True).decode('ascii')
def _get_clip_path(self, clippath, clippath_transform):
key = (clippath, id(clippath_transform))
pid = self._clip_paths.get(key)
if pid is None:
pid = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % pid]
ps_cmd.append(self._convert_path(clippath, clippath_transform,
simplify=False))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[key] = pid
return pid
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
clip = (rgbFace is None and gc.get_hatch_path() is None)
simplify = path.should_simplify and clip
ps = self._convert_path(
path, transform, clip=clip, simplify=simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace[:3]
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
lw = gc.get_linewidth()
stroke = lw != 0.0
if stroke:
ps_cmd.append('%.1f setlinewidth' % lw)
jint = gc.get_joinstyle()
ps_cmd.append('%d setlinejoin' % jint)
cint = gc.get_capstyle()
ps_cmd.append('%d setlinecap' % cint)
ps_cmd.append(self._convert_path(marker_path, marker_trans,
simplify=False))
if rgbFace:
if stroke:
ps_cmd.append('gsave')
ps_cmd.extend([ps_color, 'fill'])
if stroke:
ps_cmd.append('grestore')
if stroke:
ps_cmd.append('stroke')
ps_cmd.extend(['grestore', '} bind def'])
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.width*72, self.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is
# (len_path + 2) * uses_per_path
# cost of definition+use is
# (len_path + 3) + 3 * uses_per_path
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + 3 * uses_per_path + 3 < (len_path + 2) * uses_per_path
if not should_do_optimization:
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform, simplify=False))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, master_transform, all_transforms, path_codes, offsets,
offsetTrans, facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc0, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!', mtext=None):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'][0], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
corr = 0#w/2*(fontsize-10)/10
if rcParams['text.latex.preview']:
# use baseline alignment!
pos = _nums_to_str(x-corr, y)
self.psfrag.append(r'\psfrag{%s}[Bl][Bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
else:
# stick to the bottom alignment, but this may give incorrect baseline some times.
pos = _nums_to_str(x-corr, y-bl)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
sfnt = font.get_sfnt()
try:
ps_name = sfnt[(1,0,0,6)].decode('macroman')
except KeyError:
ps_name = sfnt[(3,1,0x0409,6)].decode(
'utf-16be')
ps_name = ps_name.encode('ascii', 'replace').decode('ascii')
self.set_font(ps_name, prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = 0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def new_gc(self):
return GraphicsContextPS()
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_gouraud_triangle(self, gc, points, colors, trans):
self.draw_gouraud_triangles(gc, points.reshape((1, 3, 2)),
colors.reshape((1, 3, 4)), trans)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] == 4
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
flat_points = trans.transform(flat_points)
flat_colors = colors.reshape((shape[0] * shape[1], 4))
points_min = np.min(flat_points, axis=0) - (1 << 12)
points_max = np.max(flat_points, axis=0) + (1 << 12)
factor = np.ceil(float(2 ** 32 - 1) / (points_max - points_min))
xmin, ymin = points_min
xmax, ymax = points_max
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (3,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :3] * 255.0
stream = quote_ps_string(streamarr.tostring())
self._pswriter.write("""
gsave
<< /ShadingType 4
/ColorSpace [/DeviceRGB]
/BitsPerCoordinate 32
/BitsPerComponent 8
/BitsPerFlag 8
/AntiAlias true
/Decode [ %(xmin)f %(xmax)f %(ymin)f %(ymax)f 0 1 0 1 0 1 ]
/DataSource (%(stream)s)
>>
shfill
grestore
""" % locals())
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = gc.shouldstroke()
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
if stroke:
write("grestore\n")
hatch = gc.get_hatch()
if hatch:
hatch_name = self.create_hatch(hatch)
write("gsave\n")
write("[/Pattern [/DeviceRGB]] setcolorspace %f %f %f " % gc.get_rgb()[:3])
write("%s setcolor fill grestore\n" % hatch_name)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def shouldstroke(self):
return (self.get_linewidth() > 0.0 and
(len(self.get_rgb()) <= 3 or self.get_rgb()[3] != 0.0))
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPS(figure)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
_renderer_class = RendererPS
fixed_dpi = 72
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.pop("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join(six.iterkeys(papersize))))
orientation = kwargs.pop("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.pop("dpi", 72)
facecolor = kwargs.pop("facecolor", "w")
edgecolor = kwargs.pop("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None,
**kwargs):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height, self._pswriter,
imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
def print_figure_impl():
# write the PostScript headers
if isEPSF: print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
else: print("%!PS-Adobe-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%Orientation: " + orientation, file=fh)
if not isEPSF: print("%%DocumentPaperSizes: "+papertype, file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
if not isEPSF: print("%%Pages: 1", file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
if not rcParams['ps.useafm']:
Ndict += len(ps_renderer.used_characters)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
if not rcParams['ps.useafm']:
for font_filename, chars in six.itervalues(ps_renderer.used_characters):
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
fonttype = rcParams['ps.fonttype']
# Can not use more than 255 characters from a
# single font for Type 3
if len(glyph_ids) > 255:
fonttype = 42
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fh.flush()
convert_ttf_to_ps(
font_filename.encode(sys.getfilesystemencoding()),
fh, fonttype, glyph_ids)
print("end", file=fh)
print("%%EndProlog", file=fh)
if not isEPSF: print("%%Page: 1 1", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
if rotation: print("%d rotate"%rotation, file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# Disable any sort of miter limit
print("%s setmiterlimit" % 100000, file=fh)
# write the figure
content = self._pswriter.getvalue()
if not isinstance(content, six.text_type):
content = content.decode('ascii')
print(content, file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
if not isEPSF: print("%%EOF", file=fh)
fh.flush()
if rcParams['ps.usedistiller']:
# We are going to use an external program to process the output.
# Write to a temporary file.
fd, tmpfile = mkstemp()
with io.open(fd, 'w', encoding='latin-1') as fh:
print_figure_impl()
else:
# Write directly to outfile.
if passed_in_file_object:
requires_unicode = file_requires_unicode(outfile)
if (not requires_unicode and
(six.PY3 or not isinstance(outfile, StringIO))):
fh = io.TextIOWrapper(outfile, encoding="latin-1")
# Prevent the io.TextIOWrapper from closing the
# underlying file
def do_nothing():
pass
fh.close = do_nothing
else:
fh = outfile
print_figure_impl()
else:
with io.open(outfile, 'w', encoding='latin-1') as fh:
print_figure_impl()
if rcParams['ps.usedistiller']:
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'w') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype,
**kwargs):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
if is_string_like(outfile):
title = outfile
elif is_writable_file_like(outfile):
title = None
else:
raise ValueError("outfile must be a path or a file-like object")
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
dryrun = kwargs.get("dryrun", False)
if dryrun:
class NullWriter(object):
def write(self, *kl, **kwargs):
pass
self._pswriter = NullWriter()
else:
self._pswriter = io.StringIO()
# mixed mode rendering
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
ps_renderer = self._renderer_class(width, height,
self._pswriter, imagedpi=dpi)
renderer = MixedModeRenderer(self.figure,
width, height, dpi, ps_renderer,
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
if dryrun: # return immediately if dryrun (tightbbox=True)
return
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write to a temp file, we'll move it to outfile when done
fd, tmpfile = mkstemp()
with io.open(fd, 'w', encoding='latin-1') as fh:
# write the Encapsulated PostScript headers
print("%!PS-Adobe-3.0 EPSF-3.0", file=fh)
if title: print("%%Title: "+title, file=fh)
print(("%%Creator: matplotlib version "
+__version__+", http://matplotlib.org/"), file=fh)
print("%%CreationDate: "+time.ctime(time.time()), file=fh)
print("%%%%BoundingBox: %d %d %d %d" % bbox, file=fh)
print("%%EndComments", file=fh)
Ndict = len(psDefs)
print("%%BeginProlog", file=fh)
print("/mpldict %d dict def"%Ndict, file=fh)
print("mpldict begin", file=fh)
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print(l.strip(), file=fh)
print("end", file=fh)
print("%%EndProlog", file=fh)
print("mpldict begin", file=fh)
#print >>fh, "gsave"
print("%s translate"%_nums_to_str(xo, yo), file=fh)
print("%s clipbox"%_nums_to_str(width*72, height*72, 0, 0), file=fh)
# Disable any sort of miter limit
print("%d setmiterlimit" % 100000, file=fh)
# write the figure
print(self._pswriter.getvalue(), file=fh)
# write the trailer
#print >>fh, "grestore"
print("end", file=fh)
print("showpage", file=fh)
fh.flush()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
# set the paper size to the figure size if isEPSF. The
# resulting ps file has the given size with correct bounding
# box so that there is no need to call 'pstoeps'
if isEPSF:
paperWidth, paperHeight = self.figure.get_size_inches()
if isLandscape:
paperWidth, paperHeight = paperHeight, paperWidth
else:
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = ps_renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
psfrag_rotated = convert_psfrags(tmpfile, ps_renderer.psfrag,
font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox,
rotated=psfrag_rotated)
if is_writable_file_like(outfile):
if file_requires_unicode(outfile):
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read().decode('latin-1'))
else:
with io.open(tmpfile, 'rb') as fh:
outfile.write(fh.read())
else:
with io.open(outfile, 'wb') as fh:
pass
mode = os.stat(outfile).st_mode
shutil.move(tmpfile, outfile)
os.chmod(outfile, mode)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\\usepackage{ucs}
\\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = """\\documentclass{article}
%s
%s
%s
\\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\\usepackage{psfrag}
\\usepackage[dvips]{graphicx}
\\usepackage{color}
\\pagestyle{empty}
\\begin{document}
\\begin{figure}
\\centering
\\leavevmode
%s
\\includegraphics*[angle=%s]{%s}
\\end{figure}
\\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
with io.open(latexfile, 'wb') as latexh:
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s.encode('ascii'))
except UnicodeEncodeError:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
# check if the dvips created a ps in landscape paper. Somehow,
# above latex+dvips results in a ps file in a landscape mode for a
# certain figure sizes (e.g., 8.3in,5.8in which is a5). And the
# bounding box of the final output got messed up. We check see if
# the generated ps file is in landscape and return this
# information. The return value is used in pstoeps step to recover
# the correct bounding box. 2010-06-05 JJL
with io.open(tmpfile) as fh:
if "Landscape" in fh.read(1000):
psfrag_rotated = True
else:
psfrag_rotated = False
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
return psfrag_rotated
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
gs_exe = ps_backend_helper.gs_exe
if ps_backend_helper.supports_ps2write: # gs version >= 9
device_name = "ps2write"
else:
device_name = "pswrite"
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=%s %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, device_name,
paper_option, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
# While it is best if above steps preserve the original bounding
# box, there seem to be cases when it is not. For those cases,
# the original bbox can be restored during the pstoeps step.
if eps:
# For some versions of gs, above steps result in an ps file
# where the original bbox is no more correct. Do not adjust
# bbox for now.
if ps_backend_helper.supports_ps2write:
# fo gs version >= 9 w/ ps2write device
pstoeps(tmpfile, bbox, rotated=rotated)
else:
pstoeps(tmpfile)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None, rotated=False):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
if eps: paper_option = "-dEPSCrop"
else: paper_option = "-sPAPERSIZE=%s" % ptype
command = 'ps2pdf -dAutoFilterColorImages=false \
-dAutoFilterGrayImages=false -sGrayImageFilter=FlateEncode \
-sColorImageFilter=FlateEncode %s "%s" "%s" > "%s"'% \
(paper_option, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
with io.open(outfile, 'rb') as fh:
if exit_status:
raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else:
verbose.report(fh.read(), 'debug')
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox_header(lbrt, rotated=False):
"""
return a postscript header stringfor the given bbox lbrt=(l, b, r, t).
Optionally, return rotate command.
"""
l, b, r, t = lbrt
if rotated:
rotate = "%.2f %.2f translate\n90 rotate" % (l+r, 0)
else:
rotate = ""
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info]), rotate
# get_bbox is deprecated. I don't see any reason to use ghostscript to
# find the bounding box, as the required bounding box is alread known.
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
gs_exe = ps_backend_helper.gs_exe
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, np.ceil(r), np.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox=None, rotated=False):
"""
Convert the postscript to encapsulated postscript. The bbox of
the eps file will be replaced with the given *bbox* argument. If
None, original bbox will be used.
"""
# if rotated==True, the output eps file need to be rotated
if bbox:
bbox_info, rotate = get_bbox_header(bbox, rotated=rotated)
else:
bbox_info, rotate = None, None
epsfile = tmpfile + '.eps'
with io.open(epsfile, 'wb') as epsh:
write = epsh.write
with io.open(tmpfile, 'rb') as tmph:
line = tmph.readline()
# Modify the header:
while line:
if line.startswith(b'%!PS'):
write(b"%!PS-Adobe-3.0 EPSF-3.0\n")
if bbox:
write(bbox_info.encode('ascii') + b'\n')
elif line.startswith(b'%%EndComments'):
write(line)
write(b'%%BeginProlog\n')
write(b'save\n')
write(b'countdictstack\n')
write(b'mark\n')
write(b'newpath\n')
write(b'/showpage {} def\n')
write(b'/setpagedevice {pop} def\n')
write(b'%%EndProlog\n')
write(b'%%Page 1 1\n')
if rotate:
write(rotate.encode('ascii') + b'\n')
break
elif bbox and (line.startswith(b'%%Bound') \
or line.startswith(b'%%HiResBound') \
or line.startswith(b'%%DocumentMedia') \
or line.startswith(b'%%Pages')):
pass
else:
write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith(b'%%EOF'):
write(b'cleartomark\n')
write(b'countdictstack\n')
write(b'exch sub { end } repeat\n')
write(b'restore\n')
write(b'showpage\n')
write(b'%%EOF\n')
elif line.startswith(b'%%PageBoundingBox'):
pass
else:
write(line)
line = tmph.readline()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
FigureCanvas = FigureCanvasPS
FigureManager = FigureManagerPS
| mit |
spencerchan/ctabus | flask-bokeh_site/transit_planner.py | 1 | 7334 | import numpy as np
import pandas as pd
from bokeh.io import curdoc
from bokeh.layouts import widgetbox, column, row
from bokeh.models import Band, BoxAnnotation, ColumnDataSource, Range1d
from bokeh.models.widgets import Select, Slider, Paragraph, Button
from bokeh.plotting import figure
import glob
import os
from collections import OrderedDict
import json
# Captures form selection passed as request parameter
# This ensures the data for the correct bus route is loaded
args = curdoc().session_context.request.arguments
args_route = "55"
try:
args_route = args.get('route')[0]
except TypeError:
pass
# Load the Data Set
names = ["tripid", "start", "stop", "day_of_week", "decimal_time", "travel_time", "wait_time"]
all_files = glob.glob(os.path.join("../data/processed/trips_and_waits/" + args_route + "/", "*.csv"))
df_each = (pd.read_csv(f, skiprows=1, names=names) for f in all_files)
df = pd.concat(df_each, ignore_index=True)
# Load the list of bus stops
def load_bus_stops(rt):
with open("../data/processed/stop_lists/" + rt + "_stop_list.json", 'r') as f:
bus_stops = json.load(f, object_pairs_hook=OrderedDict)
return bus_stops
bus_stops = load_bus_stops(args_route)['negative'].keys()
# Create initial graphs
day_of_week = ["All days", "Weekdays", "Saturdays", "Sundays"]
default = {
'start': "MidwayOrange", 'stop': "Ashland",
'day': "All days",
'hour': 17, 'minute': 30
}
# "q" as in query
q = df[(df.start == default['start']) & (df.stop == default['stop'])]
bins = np.arange(0,24.25,0.25)
x_scale = np.arange(0,24,0.25)
grouped = q.groupby([pd.cut(q.decimal_time,bins,labels=x_scale,right=False)])
travel_ninetieth = grouped.travel_time.quantile(0.9)
wait_ninetieth = grouped.wait_time.quantile(0.9)
# Defines travel time plots and sets defaults
title = "{} -> {} ({})".format(default['start'], default['stop'], default['day'])
plot_travel = figure(
title=title,
x_range=Range1d(0, 24, bounds="auto"), y_range=(0,travel_ninetieth.max()+10),
x_axis_label='Time (Decimal Hours)', y_axis_label='Travel Time (Minutes)',
width=600, height=300,
responsive=True
)
line_travel = plot_travel.line(x_scale, grouped.travel_time.median(), color="red")
scatter_travel = plot_travel.circle(q.decimal_time, q.travel_time, color="navy", radius=0.01)
# Defines wait time plots and sets defaults
plot_wait = figure(
x_range=Range1d(0, 24, bounds="auto"), y_range=(0,wait_ninetieth.max()+10),
x_axis_label='Time (Decimal Hours)', y_axis_label='Wait Time (Minutes)',
width=600, height=300,
responsive=True
)
line_wait = plot_wait.line(x_scale, grouped.wait_time.median(), color="red")
scatter_wait = plot_wait.circle(q.decimal_time, q.wait_time, color="navy", radius=0.01)
# Creates paragraph summary of typical travel and wait times at given time of day
time_index = (default['hour'] * 4) + (default['minute'] / 15)
line1 = "At {}:{:02d} the {} bus leaves around every {} minutes from {} going to {}.\n".format(
default['hour'], default['minute'],
args_route,
round(grouped.wait_time.median()[time_index], 1),
default['start'], default['stop']
)
line2 = "The trips take around {} minutes.".format(
round(grouped.travel_time.median()[time_index], 1)
)
paragraph = Paragraph(text=line1+line2, width=800)
# Creates box around selected time band
slider_time = default['hour'] + (default['minute'] / 15) / 4.0
box_travel = BoxAnnotation(
left=slider_time, right=slider_time+0.25,
fill_color='green', fill_alpha=0.1
)
box_wait = BoxAnnotation(
left=slider_time, right=slider_time+0.25,
fill_color='green', fill_alpha=0.1
)
plot_travel.add_layout(box_travel)
plot_wait.add_layout(box_wait)
# Create widgets
select_start = Select(title="Start", value=default['start'], options=bus_stops)
select_stop = Select(title="Stop", value=default['stop'], options=bus_stops)
select_day = Select(title="Days", value=default['day'], options=day_of_week)
slider_hour = Slider(start=0, end=23, value=default['hour'], step=1, title="Hour")
slider_minute = Slider(start=0, end=59, value=default['minute'], step=1, title="Minute")
button = Button(label="Submit", button_type="success")
# Callback functions
def update_start(attr, new, old):
default['start'] = old
def update_stop(attr, new, old):
default['stop'] = old
def update_day(attr, new, old):
default['day'] = old
def update_hour(attr, new, old):
default['hour'] = old
def update_minute(attr, new, old):
default['minute'] = old
def update():
if (default['day'] == "All days"):
a = 0
b = 6
elif (default['day'] == "Weekdays"):
a = 0
b = 4
elif (default['day'] == "Saturdays"):
a = 5
b = 5
else:
a = 6
b = 6
plot_travel.title.text = "{} -> {} ({})".format(
default['start'], default['stop'], default['day']
)
q = df[(df.start == default['start']) & (df.stop == default['stop']) & (df.day_of_week.between(a,b))]
grouped = q.groupby([pd.cut(q.decimal_time, bins, labels=x_scale, right=False)])
travel_ninetieth = grouped.travel_time.quantile(0.9)
wait_ninetieth = grouped.wait_time.quantile(0.9)
slider_time = default['hour'] + (default['minute'] / 15) / 4.0
# Updates travel time plots
plot_travel.y_range.end = travel_ninetieth.max() + 10
scatter_travel.data_source.data = dict(x=q.decimal_time, y=q.travel_time)
line_travel.data_source.data = dict(x=x_scale, y=grouped.travel_time.median())
box_travel.left = slider_time
box_travel.right = slider_time + 0.25
# Updates wait time plots
plot_wait.y_range.end = wait_ninetieth.max() + 10
scatter_wait.data_source.data = dict(x=q.decimal_time, y=q.wait_time)
line_wait.data_source.data = dict(x=x_scale, y=grouped.wait_time.median())
box_wait.left = slider_time
box_wait.right = slider_time + 0.25
# Updates paragraph text
time_index = (default['hour'] * 4) + (default['minute'] / 15)
if np.isnan(grouped.wait_time.median()[time_index]):
paragraph.text = "At {}:{:02d} there are no 55 Garfield buses leaving from {} going to {}. ".format(
default['hour'], default['minute'],
default['start'], default['stop']
)
else:
line1 = "At {}:{:02d} the 55 Garfield bus leaves around every {} minutes from {} going to {}.\n".format(
default['hour'], default['minute'],
round(grouped.wait_time.median()[time_index], 1),
default['start'], default['stop']
)
line2 = "The trips take around {} minutes.".format(
round(grouped.travel_time.median()[time_index], 1)
)
paragraph.text = line1 + line2
# Calls update when widget values are changed
widgets = [select_start, select_stop, select_day, slider_hour, slider_minute, button]
select_start.on_change('value', update_start)
select_stop.on_change('value', update_stop)
select_day.on_change('value', update_day)
slider_hour.on_change('value', update_hour)
slider_minute.on_change('value', update_minute)
button.on_click(update)
inputs = widgetbox(widgets)
text_widget = widgetbox(paragraph)
curdoc().add_root(
column([row([inputs, column([plot_travel, plot_wait])]), row(text_widget)],
sizing_mode='scale_width')
) | gpl-3.0 |
Reaktoro/Reaktoro | demos/python/demo-reactive-transport-calcite-dolomite.py | 1 | 11386 | print('============================================================')
print('Make sure you have the following Python packages installed: ')
print(' numpy, matplotlib, joblib')
print('These can be installed with pip:')
print(' pip install numpy matplotlib joblib')
print('============================================================')
# Step 1: importing the required python packages
from reaktoro import *
from numpy import *
import matplotlib.pyplot as plt
from joblib import Parallel, delayed
import os, time
# Step 2: Auxiliary time related constants
second = 1
minute = 60
hour = 60 * minute
day = 24 * hour
year = 365 * day
# Step 3: Parameters for the reactive transport simulation
xl = 0.0 # the x-coordinate of the left boundary
xr = 1.0 # the x-coordinate of the right boundary
nsteps = 100 # the number of steps in the reactive transport simulation
ncells = 100 # the number of cells in the discretization
D = 1.0e-9 # the diffusion coefficient (in units of m2/s)
v = 1.0/day # the fluid pore velocity (in units of m/s)
dt = 10*minute # the time step (in units of s)
T = 60.0 + 273.15 # the temperature (in units of K)
P = 100 * 1e5 # the pressure (in units of Pa)
dirichlet = False # the parameter that determines whether Dirichlet BC must be used
# Step 4: The list of quantities to be output for each mesh cell, each time step
output_quantities = """
pH
speciesMolality(H+)
speciesMolality(Ca++)
speciesMolality(Mg++)
speciesMolality(HCO3-)
speciesMolality(CO2(aq))
phaseVolume(Calcite)
phaseVolume(Dolomite)
""".split()
# Step 7: Perform the reactive transport simulation
def simulate():
# Step 7.1: Construct the chemical system with its phases and species
db = Database('supcrt98.xml')
editor = ChemicalEditor(db)
editor.addAqueousPhase([
'H2O(l)',
'H+',
'OH-',
'Na+',
'Cl-',
'Ca++',
'Mg++',
'HCO3-',
'CO2(aq)',
'CO3--']) # aqueous species are individually selected for performance reasons
editor.addMineralPhase('Quartz')
editor.addMineralPhase('Calcite')
editor.addMineralPhase('Dolomite')
# Step 7.2: Create the ChemicalSystem object using the configured editor
system = ChemicalSystem(editor)
# Step 7.3: Define the initial condition of the reactive transport modeling problem
problem_ic = EquilibriumProblem(system)
problem_ic.setTemperature(T)
problem_ic.setPressure(P)
problem_ic.add('H2O', 1.0, 'kg')
problem_ic.add('NaCl', 0.7, 'mol')
problem_ic.add('CaCO3', 10, 'mol')
problem_ic.add('SiO2', 10, 'mol')
# Step 7.4: Define the boundary condition of the reactive transport modeling problem
problem_bc = EquilibriumProblem(system)
problem_bc.setTemperature(T)
problem_bc.setPressure(P)
problem_bc.add('H2O', 1.0, 'kg')
problem_bc.add('NaCl', 0.90, 'mol')
problem_bc.add('MgCl2', 0.05, 'mol')
problem_bc.add('CaCl2', 0.01, 'mol')
problem_bc.add('CO2', 0.75, 'mol')
# Step 7.5: Calculate the equilibrium states for the initial and boundary conditions
state_ic = equilibrate(problem_ic)
state_bc = equilibrate(problem_bc)
# Step 7.6: Scale the volumes of the phases in the initial condition
state_ic.scalePhaseVolume('Aqueous', 0.1, 'm3')
state_ic.scalePhaseVolume('Quartz', 0.882, 'm3')
state_ic.scalePhaseVolume('Calcite', 0.018, 'm3')
# Scale the boundary condition state to 1 m3
state_bc.scaleVolume(1.0, 'm3')
# Step 7.7: Partitioning fluid and solid species
# The number of elements in the chemical system
nelems = system.numElements()
# The indices of the fluid and solid species
ifluid_species = system.indicesFluidSpecies()
isolid_species = system.indicesSolidSpecies()
# The concentrations of each element in each mesh cell (in the current time step)
b = zeros((ncells, nelems))
# The concentrations (mol/m3) of each element in the fluid partition, in each mesh cell
bfluid = zeros((ncells, nelems))
# The concentrations (mol/m3) of each element in the solid partition, in each mesh cell
bsolid = zeros((ncells, nelems))
# Initialize the concentrations (mol/m3) of the elements in each mesh cell
b[:] = state_ic.elementAmounts()
# Initialize the concentrations (mol/m3) of each element on the boundary
b_bc = state_bc.elementAmounts()
# Step 7.8: Create a list of chemical states for the mesh cells
# The list of chemical states, one for each cell, initialized to state_ic
states = [state_ic.clone() for _ in range(ncells)]
# Step 7.9: Create the equilibrium solver object for the repeated equilibrium calculation
# The interval [xl, xr] split into ncells
x = linspace(xl, xr, ncells + 1)
# The length of each mesh cell (in units of m)
dx = (xr - xl)/ncells
# Step 7.10: Create the equilibrium solver object for the repeated equilibrium calculation
solver = EquilibriumSolver(system)
# Step 7.11: The auxiliary function to create an output file each time step
def outputstate():
# Create the instance of ChemicalOutput class
output = ChemicalOutput(system)
# The number of digits in number of steps (e.g., 100 has 3 digits)
ndigits = len(str(nsteps))
# Provide the output file name, which will correspond
output.filename('results/{}.txt'.format(str(step).zfill(ndigits)))
# We define the columns' tags filled with the name of the quantities
# The first column has a tag 'x' (which corresponds to the center coordinates of the cells )
output.add('tag', 'x') # The value of the center coordinates of the cells
# The rest of the columns correspond to the requested properties
for quantity in output_quantities:
output.add(quantity)
# We update the file with states that correspond to the cells' coordinates stored in x
output.open()
for state, tag in zip(states, x):
output.update(state, tag)
output.close()
# Step 7.12: Running the reactive transport simulation loop
step = 0 # the current step number
t = 0.0 # the current time (in seconds)
while step <= nsteps:
# Print the progress of the simulation
print("Progress: {}/{} steps, {} min".format(step, nsteps, t/minute))
# Output the current state of the reactive transport calculation
outputstate()
# Collect the amounts of elements from fluid and solid partitions
for icell in range(ncells):
bfluid[icell] = states[icell].elementAmountsInSpecies(ifluid_species)
bsolid[icell] = states[icell].elementAmountsInSpecies(isolid_species)
# Transport each element in the fluid phase
for j in range(nelems):
transport(bfluid[:, j], dt, dx, v, D, b_bc[j])
# Update the amounts of elements in both fluid and solid partitions
b[:] = bsolid + bfluid
# Equilibrating all cells with the updated element amounts
for icell in range(ncells):
solver.solve(states[icell], T, P, b[icell])
# Increment time step and number of time steps
t += dt
step += 1
print("Finished!")
# Step 10: Return a string for the title of a figure in the format Time: #h##m
def titlestr(t):
t = t / minute # Convert from seconds to minutes
h = int(t) / 60 # The number of hours
m = int(t) % 60 # The number of remaining minutes
return 'Time: {:>3}h{:>2}m'.format(h, str(m).zfill(2))
# Step 9: Generate figures for a result file
def plotfile(file):
step = int(file.split('.')[0])
print('Plotting figure', step, '...')
t = step * dt
filearray = loadtxt('results/' + file, skiprows=1)
data = filearray.T
ndigits = len(str(nsteps))
plt.figure()
plt.xlim(left=-0.02, right=0.52)
plt.ylim(bottom=2.5, top=10.5)
plt.title(titlestr(t))
plt.xlabel('Distance [m]')
plt.ylabel('pH')
plt.plot(data[0], data[1])
plt.tight_layout()
plt.savefig('figures/ph/{}.png'.format(str(step).zfill(ndigits)))
plt.figure()
plt.xlim(left=-0.02, right=0.52)
plt.ylim(bottom=-0.1, top=2.1)
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.title(titlestr(t))
plt.xlabel('Distance [m]')
plt.ylabel('Mineral Volume [%$_{\mathsf{vol}}$]')
plt.plot(data[0], data[7] * 100, label='Calcite')
plt.plot(data[0], data[8] * 100, label='Dolomite')
plt.legend(loc='center right')
plt.tight_layout()
plt.savefig('figures/calcite-dolomite/{}.png'.format(str(step).zfill(ndigits)))
plt.figure()
plt.yscale('log')
plt.xlim(left=-0.02, right=0.52)
plt.ylim(bottom=0.5e-5, top=2)
plt.title(titlestr(t))
plt.xlabel('Distance [m]')
plt.ylabel('Concentration [molal]')
plt.plot(data[0], data[3], label='Ca++')
plt.plot(data[0], data[4], label='Mg++')
plt.plot(data[0], data[5], label='HCO3-')
plt.plot(data[0], data[6], label='CO2(aq)')
plt.plot(data[0], data[2], label='H+')
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig('figures/aqueous-species/{}.png'.format(str(step).zfill(ndigits)))
plt.close('all')
# Step 8: Plot all result files and generate a video
def plot():
# Plot all result files
files = sorted(os.listdir('results'))
Parallel(n_jobs=16)(delayed(plotfile)(file) for file in files)
# Create videos for the figures
ffmpegstr = 'ffmpeg -y -r 30 -i figures/{0}/%03d.png -codec:v mpeg4 -flags:v +qscale -global_quality:v 0 videos/{0}.mp4'
os.system(ffmpegstr.format('calcite-dolomite'))
os.system(ffmpegstr.format('aqueous-species'))
os.system(ffmpegstr.format('ph'))
# Step 7.10.2: Solve a tridiagonal matrix equation using Thomas algorithm (or TriDiagonal Matrix Algorithm (TDMA))
def thomas(a, b, c, d):
n = len(d)
c[0] /= b[0]
for i in range(1, n - 1):
c[i] /= b[i] - a[i]*c[i - 1]
d[0] /= b[0]
for i in range(1, n):
d[i] = (d[i] - a[i]*d[i - 1])/(b[i] - a[i]*c[i - 1])
x = d
for i in reversed(range(0, n - 1)):
x[i] -= c[i]*x[i + 1]
return x
# Step 7.10.1: Perform a transport step
def transport(u, dt, dx, v, D, g):
# Number of DOFs
n = len(u)
alpha = D*dt/dx**2
beta = v*dt/dx
a = full(n, -beta - alpha)
b = full(n, 1 + beta + 2*alpha)
c = full(n, -alpha)
# Set the boundary condition
if dirichlet:
# Use Dirichlet BC boundary conditions
b[0] = 1.0
c[0] = 0.0
u[0] = g
else:
# Use flux boundary conditions
u[0] += beta*g
b[0] = 1 + beta + alpha
b[-1] = 1 + beta + alpha
# Solve a tridiagonal matrix equation
thomas(a, b, c, u)
# Step 6: Creating folders for the results' output
def make_results_folders():
os.system('mkdir -p results')
os.system('mkdir -p figures/ph')
os.system('mkdir -p figures/aqueous-species')
os.system('mkdir -p figures/calcite-dolomite')
os.system('mkdir -p videos')
# Step 5: Define the main function
if __name__ == '__main__':
# Create folders for the results
make_results_folders()
# Run the reactive transport simulations
simulate()
# Plotting the result
plot()
| gpl-3.0 |
spennihana/h2o-3 | h2o-py/tests/testdir_misc/pyunit_export_file_multi.py | 6 | 2157 | from __future__ import print_function
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import string
import os
import glob
import random
import pandas as pd
from pandas.util.testing import assert_frame_equal
'''
Export file with h2o.export_file and compare with Python counterpart when re importing file to check for parity.
This is a variation of a default h2o.export_file test. This tests makes sure that support for export to multiple
'part' files is working. This test checks that when user specifies number of part files a directory is created
instead of just a single file. It doesn't check the actual number of part files.
'''
def export_file_multipart():
pros_hex = h2o.upload_file(pyunit_utils.locate("smalldata/prostate/prostate.csv"))
pros_hex[1] = pros_hex[1].asfactor()
pros_hex[3] = pros_hex[3].asfactor()
pros_hex[4] = pros_hex[4].asfactor()
pros_hex[5] = pros_hex[5].asfactor()
pros_hex[8] = pros_hex[8].asfactor()
p_sid = pros_hex.runif()
pros_train = pros_hex[p_sid > .2, :]
pros_test = pros_hex[p_sid <= .2, :]
glm = H2OGeneralizedLinearEstimator(family="binomial")
myglm = glm.train(x=list(range(2, pros_hex.ncol)), y=1, training_frame=pros_train)
mypred = glm.predict(pros_test)
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
path = pyunit_utils.locate("results")
dname = os.path.join(path, id_generator() + "_prediction")
h2o.export_file(mypred, dname, parts=-1)
assert os.path.isdir(dname)
part_files = glob.glob(os.path.join(dname, "part-m-?????"))
print(part_files)
py_pred = pd.concat((pd.read_csv(f) for f in part_files))
print(py_pred.head())
h_pred = mypred.as_data_frame(True)
print(h_pred.head())
#Test to check if py_pred & h_pred are identical
assert_frame_equal(py_pred,h_pred)
if __name__ == "__main__":
pyunit_utils.standalone_test(export_file_multipart)
else:
export_file_multipart()
| apache-2.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/core/indexes/base.py | 3 | 138092 | import datetime
import warnings
import operator
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timestamp, Timedelta, )
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.dtypes.generic import ABCSeries, ABCMultiIndex, ABCPeriodIndex
from pandas.core.dtypes.missing import isnull, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_object_dtype,
is_categorical_dtype,
is_interval_dtype,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_timedelta64_dtype,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.common import (is_bool_indexer,
_values_from_object,
_asarray_tuplesafe)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.base as base
from pandas.util._decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.common as com
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.io.formats.printing import pprint_thing
from pandas.core.ops import _comp_method_OBJECT_ARRAY
from pandas.core.strings import StringAccessorMixin
from pandas.core.config import get_option
# simplify
default_pprint = lambda x, max_seq_items=None: \
pprint_thing(x, escape_chars=('\t', '\r', '\n'), quote_strings=True,
max_seq_items=max_seq_items)
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, StringAccessorMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_arrmap = libalgos.arrmap_object
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_box_scalars = False
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_allow_index_ops = True
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, copy=copy, name=name, **kwargs)
# interval
if is_interval_dtype(data):
from .interval import IntervalIndex
return IntervalIndex.from_intervals(data, name=name,
copy=copy)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
data = np.array(data, copy=copy, dtype=dtype)
elif inferred in ['floating', 'mixed-integer-float']:
if isnull(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = _asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex.from_intervals(subarr, name=name,
copy=copy)
elif inferred == 'boolean':
# don't support boolean explicity ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if (tupleize_cols and isinstance(data, list) and data and
isinstance(data[0], tuple)):
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
try:
# must be orderable in py3
if compat.PY3:
sorted(data)
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
except (TypeError, KeyError):
# python2 - MultiIndex fails on mixed types
pass
# other iterable of some kind
subarr = _asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if values is None and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
def get_values(self):
""" return the underlying data as an ndarray """
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
def tolist(self):
"""
return a list of the Index values
"""
return list(self.values)
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index. Refer to `numpy.ndarray.repeat`
for more information about the `repeats` argument.
See also
--------
numpy.ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
values = np.where(cond, self.values, other)
dtype = self.dtype
if self._is_numeric_dtype and np.any(isnull(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to
# overflow), so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return dict([(k, getattr(self, k, None)) for k in self._attributes])
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isnull(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join([u("%s=%s") % (k, v) for k, v in attrs])
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatted data as a unicode string
"""
return default_pprint
def _format_data(self):
"""
Return the formatted data as a unicode string
"""
from pandas.io.formats.console import get_console_size
from pandas.io.formats.format import _get_adjustment
display_width, _ = get_console_size()
if display_width is None:
display_width = get_option('display.width') or 80
space1 = "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
space2 = "\n%s" % (' ' * (len(self.__class__.__name__) + 2))
n = len(self)
sep = ','
max_seq_items = get_option('display.max_seq_items') or n
formatter = self._formatter_func
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
# are we a truncated display
is_truncated = n > max_seq_items
# adj can optionaly handle unicode eastern asian width
adj = _get_adjustment()
def _extend_line(s, line, value, display_width, next_line_prefix):
if (adj.len(line.rstrip()) + adj.len(value.rstrip()) >=
display_width):
s += line.rstrip()
line = next_line_prefix
line += value
return s, line
def best_len(values):
if values:
return max([adj.len(x) for x in values])
else:
return 0
if n == 0:
summary = '[], '
elif n == 1:
first = formatter(self[0])
summary = '[%s], ' % first
elif n == 2:
first = formatter(self[0])
last = formatter(self[-1])
summary = '[%s, %s], ' % (first, last)
else:
if n > max_seq_items:
n = min(max_seq_items // 2, 10)
head = [formatter(x) for x in self[:n]]
tail = [formatter(x) for x in self[-n:]]
else:
head = []
tail = [formatter(x) for x in self]
# adjust all values to max length if needed
if is_justify:
# however, if we are not truncated and we are only a single
# line, then don't justify
if (is_truncated or
not (len(', '.join(head)) < display_width and
len(', '.join(tail)) < display_width)):
max_len = max(best_len(head), best_len(tail))
head = [x.rjust(max_len) for x in head]
tail = [x.rjust(max_len) for x in tail]
summary = ""
line = space2
for i in range(len(head)):
word = head[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
if is_truncated:
# remove trailing space of last line
summary += line.rstrip() + space2 + '...'
line = space2
for i in range(len(tail) - 1):
word = tail[i] + sep + ' '
summary, line = _extend_line(summary, line, word,
display_width, space2)
# last value: no sep added + 1 space of width used for trailing ','
summary, line = _extend_line(summary, line, tail[-1],
display_width - 2, space2)
summary += line
summary += '],'
if len(summary) > (display_width):
summary += space1
else: # one row
summary += ' '
# remove initial space
summary = '[' + summary[len(space2):]
return summary
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = []
attrs.append(('dtype', "'%s'" % self.dtype))
if self.name is not None:
attrs.append(('name', default_pprint(self.name)))
max_seq_items = get_option('display.max_seq_items') or len(self)
if len(self) > max_seq_items:
attrs.append(('length', len(self)))
return attrs
def to_series(self, **kwargs):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
return Series(self._to_embed(),
index=self._shallow_copy(),
name=self.name)
def _to_embed(self, keep_tz=False):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def to_datetime(self, dayfirst=False):
"""
DEPRECATED: use :meth:`pandas.to_datetime` instead.
For an Index containing strings or datetime.datetime objects, attempt
conversion to DatetimeIndex
"""
warnings.warn("to_datetime is deprecated. Use pd.to_datetime(...)",
FutureWarning, stacklevel=2)
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'string':
from dateutil.parser import parse
parser = lambda x: parse(x, dayfirst=dayfirst)
parsed = lib.try_parse_dates(self.values, parser=parser)
return DatetimeIndex(parsed)
else:
return DatetimeIndex(self.values)
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not lib.isscalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
if level is not None and self.nlevels == 1:
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
def reshape(self, *args, **kwargs):
"""
NOT IMPLEMENTED: do not call this method, as reshaping is not
supported for Index objects and will raise an error.
Reshape an Index.
"""
raise NotImplementedError("reshaping is not supported "
"for Index objects")
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def summary(self, name=None):
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self._engine.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return self._engine.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly(allow_setting=True)
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = _asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
from collections import defaultdict
counter = defaultdict(lambda: 0)
for k in self.values:
counter[k] += 1
return sorted(k for k, v in compat.iteritems(counter) if v > 1)
_get_duplicates = get_duplicates
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __iter__(self):
return iter(self.values)
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except TypeError:
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except TypeError:
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if is_bool_indexer(key):
key = np.asarray(key)
key = _values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = set([obj.name for obj in to_concat])
name = None if len(names) > 1 else self.name
if self.is_categorical():
# if calling index is category, don't check dtype of others
from pandas.core.indexes.category import CategoricalIndex
return CategoricalIndex._append_same_dtype(self, to_concat, name)
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._append_same_dtype(to_concat, name=name)
return _concat._concat_index_asobject(to_concat, name=name)
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
# must be overrided in specific classes
return _concat._concat_index_asobject(to_concat, name)
_index_shared_docs['take'] = """
return a new %(klass)s of the values selected by the indices
For internal compatibility with numpy arrays.
Parameters
----------
indices : list
Indices to be taken
axis : int, optional
The axis over which to select values, always 0.
allow_fill : bool, default True
fill_value : bool, default None
If allow_fill=True and fill_value is not None, indices specified by
-1 is regarded as NA. If Index doesn't hold NA, raise ValueError
See also
--------
numpy.ndarray.take
"""
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
if kwargs:
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
if self._can_hold_na:
taken = self._assert_take_fillable(self.values, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=self._na_value)
else:
if allow_fill and fill_value is not None:
msg = 'Unable to fill values because {0} cannot contain NA'
raise ValueError(msg.format(self.__class__.__name__))
taken = self.values.take(indices)
return self._shallow_copy(taken)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=np.nan):
""" Internal method to handle NA filling of take """
indices = _ensure_platform_int(indices)
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = values.take(indices)
mask = indices == -1
if mask.any():
taken[mask] = na_value
else:
taken = values.take(indices)
return taken
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
if self._can_hold_na:
return isnull(self)
else:
# shouldn't reach to this condition by checking hasnans beforehand
values = np.empty(len(self), dtype=np.bool_)
values.fill(False)
return values
@cache_readonly
def _nan_idxs(self):
if self._can_hold_na:
w, = self._isnan.nonzero()
return w
else:
return np.array([], dtype=np.int64)
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
if self._can_hold_na:
return self._isnan.any()
else:
return False
def isnull(self):
"""
Detect missing values
.. versionadded:: 0.20.0
Returns
-------
a boolean array of whether my values are null
See also
--------
pandas.isnull : pandas version
"""
return self._isnan
def notnull(self):
"""
Reverse of isnull
.. versionadded:: 0.20.0
Returns
-------
a boolean array of whether my values are not null
See also
--------
pandas.notnull : pandas version
"""
return ~self.isnull()
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
See also
--------
numpy.ndarray.putmask
"""
values = self.values.copy()
try:
np.putmask(values, mask, self._convert_for_op(value))
return self._shallow_copy(values)
except (ValueError, TypeError):
# coerces to object
return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
Render a string representation of the Index
"""
header = []
if name:
header.append(pprint_thing(self.name,
escape_chars=('\t', '\r', '\n')) if
self.name is not None else '')
if formatter is not None:
return header + list(self.map(formatter))
return self._format_with_header(header, **kwargs)
def _format_with_header(self, header, na_rep='NaN', **kwargs):
values = self.values
from pandas.io.formats.format import format_array
if is_categorical_dtype(values.dtype):
values = np.array(values)
elif is_object_dtype(values.dtype):
values = lib.maybe_convert_objects(values, safe=1)
if is_object_dtype(values.dtype):
result = [pprint_thing(x, escape_chars=('\t', '\r', '\n'))
for x in values]
# could have nans
mask = isnull(values)
if mask.any():
result = np.array(result)
result[mask] = na_rep
result = result.tolist()
else:
result = _trim_front(format_array(values, None, justify='left'))
return header + result
def to_native_types(self, slicer=None, **kwargs):
"""
Format specified values of `self` and return them.
Parameters
----------
slicer : int, array-like
An indexer into `self` that specifies which values
are used in the formatting process.
kwargs : dict
Options for specifying how the values should be formatted.
These options include the following:
1) na_rep : str
The value that serves as a placeholder for NULL values
2) quoting : bool or None
Whether or not there are quoted values in `self`
3) date_format : str
The format used to represent date-like values
"""
values = self
if slicer is not None:
values = values[slicer]
return values._format_native_types(**kwargs)
def _format_native_types(self, na_rep='', quoting=None, **kwargs):
""" actually format my specific types """
mask = isnull(self)
if not self.is_object() and not quoting:
values = np.asarray(self).astype(str)
else:
values = np.array(self, dtype=object, copy=True)
values[mask] = na_rep
return values
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if is_object_dtype(self) and not is_object_dtype(other):
# if other is not object, use other's logic for coercion
return other.equals(self)
try:
return array_equivalent(_values_from_object(self),
_values_from_object(other))
except:
return False
def identical(self, other):
"""Similar to equals, but check that other comparable attributes are
also equal
"""
return (self.equals(other) and
all((getattr(self, c, None) == getattr(other, c, None)
for c in self._comparables)) and
type(self) == type(other))
def asof(self, label):
"""
For a sorted index, return the most recent label up to and including
the passed label. Return NaN if not found.
See also
--------
get_loc : asof is a thin wrapper around get_loc with method='pad'
"""
try:
loc = self.get_loc(label, method='pad')
except KeyError:
return _get_na_value(self.dtype)
else:
if isinstance(loc, slice):
loc = loc.indices(len(self))[-1]
return self[loc]
def asof_locs(self, where, mask):
"""
where : array of timestamps
mask : array of booleans where data is not NA
"""
locs = self.values[mask].searchsorted(where.values, side='right')
locs = np.where(locs > 0, locs - 1, 0)
result = np.arange(len(self))[mask].take(locs)
first = mask.argmax()
result[(locs == 0) & (where < self.values[first])] = -1
return result
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
if return_indexer:
return sorted_index, _as
else:
return sorted_index
def sort(self, *args, **kwargs):
raise TypeError("cannot sort an Index object in-place, use "
"sort_values instead")
def sortlevel(self, level=None, ascending=True, sort_remaining=None):
"""
For internal compatibility with with the Index API
Sort the Index. This is for compat with MultiIndex
Parameters
----------
ascending : boolean, default True
False to sort in descending order
level, sort_remaining are compat parameters
Returns
-------
sorted_index : Index
"""
return self.sort_values(return_indexer=True, ascending=ascending)
def shift(self, periods=1, freq=None):
"""
Shift Index containing datetime objects by input number of periods and
DateOffset
Returns
-------
shifted : Index
"""
raise NotImplementedError("Not supported for type %s" %
type(self).__name__)
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See also
--------
numpy.ndarray.argsort
"""
result = self.asi8
if result is None:
result = np.array(self)
return result.argsort(*args, **kwargs)
def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
return Index(other + np.array(self))
__iadd__ = __add__
def __sub__(self, other):
raise TypeError("cannot perform __sub__ with this index type: "
"{typ}".format(typ=type(self)))
def __and__(self, other):
return self.intersection(other)
def __or__(self, other):
return self.union(other)
def __xor__(self, other):
return self.symmetric_difference(other)
def _get_consensus_name(self, other):
"""
Given 2 indexes, give a consensus name meaning
we take the not None one, or None if the names differ.
Return a new object if we are resetting the name
"""
if self.name != other.name:
if self.name is None or other.name is None:
name = self.name or other.name
else:
name = None
if self.name != name:
return self._shallow_copy(name=name)
return self
def union(self, other):
"""
Form the union of two Index objects and sorts if possible.
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.union(idx2)
Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if len(other) == 0 or self.equals(other):
return self._get_consensus_name(other)
if len(self) == 0:
return other._get_consensus_name(self)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.union(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._outer_indexer(self._values, other._values)[0]
except TypeError:
# incomparable objects
result = list(self._values)
# worth making this faster? a very unusual case
value_set = set(self._values)
result.extend([x for x in other._values if x not in value_set])
else:
indexer = self.get_indexer(other)
indexer, = (indexer == -1).nonzero()
if len(indexer) > 0:
other_diff = algos.take_nd(other._values, indexer,
allow_fill=False)
result = _concat._concat_compat((self._values, other_diff))
try:
self._values[0] < other_diff[0]
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
else:
types = frozenset((self.inferred_type,
other.inferred_type))
if not types & _unsortable_types:
result.sort()
else:
result = self._values
try:
result = np.sort(result)
except TypeError as e:
warnings.warn("%s, sort order is undefined for "
"incomparable objects" % e, RuntimeWarning,
stacklevel=3)
# for subclasses
return self._wrap_union_result(other, result)
def _wrap_union_result(self, other, result):
name = self.name if self.name == other.name else None
return self.__class__(result, name=name)
def intersection(self, other):
"""
Form the intersection of two Index objects.
This returns a new Index with elements common to the index and `other`,
preserving the order of the calling index.
Parameters
----------
other : Index or array-like
Returns
-------
intersection : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.intersection(idx2)
Int64Index([3, 4], dtype='int64')
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
if self.equals(other):
return self._get_consensus_name(other)
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.intersection(other)
if self.is_monotonic and other.is_monotonic:
try:
result = self._inner_indexer(self._values, other._values)[0]
return self._wrap_union_result(other, result)
except TypeError:
pass
try:
indexer = Index(other._values).get_indexer(self._values)
indexer = indexer.take((indexer != -1).nonzero()[0])
except:
# duplicates
indexer = Index(other._values).get_indexer_non_unique(
self._values)[0].unique()
indexer = indexer[indexer != -1]
taken = other.take(indexer)
if self.name != other.name:
taken.name = None
return taken
def difference(self, other):
"""
Return a new Index with elements from the index that are not in
`other`.
This is the set difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
Returns
-------
difference : Index
Examples
--------
>>> idx1 = pd.Index([1, 2, 3, 4])
>>> idx2 = pd.Index([3, 4, 5, 6])
>>> idx1.difference(idx2)
Int64Index([1, 2], dtype='int64')
"""
self._assert_can_do_setop(other)
if self.equals(other):
return Index([], name=self.name)
other, result_name = self._convert_can_do_setop(other)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
the_diff = this.values.take(label_diff)
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
return this._shallow_copy(the_diff, name=result_name, freq=None)
def symmetric_difference(self, other, result_name=None):
"""
Compute the symmetric difference of two Index objects.
It's sorted if sorting is possible.
Parameters
----------
other : Index or array-like
result_name : str
Returns
-------
symmetric_difference : Index
Notes
-----
``symmetric_difference`` contains elements that appear in either
``idx1`` or ``idx2`` but not both. Equivalent to the Index created by
``idx1.difference(idx2) | idx2.difference(idx1)`` with duplicates
dropped.
Examples
--------
>>> idx1 = Index([1, 2, 3, 4])
>>> idx2 = Index([2, 3, 4, 5])
>>> idx1.symmetric_difference(idx2)
Int64Index([1, 5], dtype='int64')
You can also use the ``^`` operator:
>>> idx1 ^ idx2
Int64Index([1, 5], dtype='int64')
"""
self._assert_can_do_setop(other)
other, result_name_update = self._convert_can_do_setop(other)
if result_name is None:
result_name = result_name_update
this = self._get_unique_index()
other = other._get_unique_index()
indexer = this.get_indexer(other)
# {this} minus {other}
common_indexer = indexer.take((indexer != -1).nonzero()[0])
left_indexer = np.setdiff1d(np.arange(this.size), common_indexer,
assume_unique=True)
left_diff = this.values.take(left_indexer)
# {other} minus {this}
right_indexer = (indexer == -1).nonzero()[0]
right_diff = other.values.take(right_indexer)
the_diff = _concat._concat_compat([left_diff, right_diff])
try:
the_diff = algos.safe_sort(the_diff)
except TypeError:
pass
attribs = self._get_attributes_dict()
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
return self._shallow_copy_with_infer(the_diff, **attribs)
sym_diff = deprecate('sym_diff', symmetric_difference)
def _get_unique_index(self, dropna=False):
"""
Returns an index containing unique values.
Parameters
----------
dropna : bool
If True, NaN values are dropped.
Returns
-------
uniques : index
"""
if self.is_unique and not dropna:
return self
values = self.values
if not self.is_unique:
values = self.unique()
if dropna:
try:
if self.hasnans:
values = values[~isnull(values)]
except NotImplementedError:
pass
return self._shallow_copy(values)
_index_shared_docs['get_loc'] = """
Get integer location for requested label.
Parameters
----------
key : label
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
tolerance : optional
Maximum distance from index value for inexact matches. The value of
the index at the matching location most satisfy the equation
``abs(index[loc] - key) <= tolerance``.
.. versionadded:: 0.17.0
Returns
-------
loc : int if unique index, possibly slice or mask if not
"""
@Appender(_index_shared_docs['get_loc'])
def get_loc(self, key, method=None, tolerance=None):
if method is None:
if tolerance is not None:
raise ValueError('tolerance argument only valid if using pad, '
'backfill or nearest lookups')
key = _values_from_object(key)
try:
return self._engine.get_loc(key)
except KeyError:
return self._engine.get_loc(self._maybe_cast_indexer(key))
indexer = self.get_indexer([key], method=method, tolerance=tolerance)
if indexer.ndim > 1 or indexer.size > 1:
raise TypeError('get_loc requires scalar valued input')
loc = indexer.item()
if loc == -1:
raise KeyError(key)
return loc
def get_value(self, series, key):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
# if we have something that is Index-like, then
# use this, e.g. DatetimeIndex
s = getattr(series, '_values', None)
if isinstance(s, Index) and is_scalar(key):
try:
return s[key]
except (IndexError, ValueError):
# invalid type as an indexer
pass
s = _values_from_object(series)
k = _values_from_object(key)
k = self._convert_scalar_indexer(k, kind='getitem')
try:
return self._engine.get_value(s, k,
tz=getattr(series.dtype, 'tz', None))
except KeyError as e1:
if len(self) > 0 and self.inferred_type in ['integer', 'boolean']:
raise
try:
return libts.get_value_box(s, key)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# python 3
if is_scalar(key): # pragma: no cover
raise IndexError(key)
raise InvalidIndexError(key)
def set_value(self, arr, key, value):
"""
Fast lookup of value from 1-dimensional ndarray. Only use this if you
know what you're doing
"""
self._engine.set_value(_values_from_object(arr),
_values_from_object(key), value)
def _get_level_values(self, level):
"""
Return an Index of values for requested level, equal to the length
of the index
Parameters
----------
level : int
Returns
-------
values : Index
"""
self._validate_index_level(level)
return self
get_level_values = _get_level_values
_index_shared_docs['get_indexer'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
method : {None, 'pad'/'ffill', 'backfill'/'bfill', 'nearest'}, optional
* default: exact matches only.
* pad / ffill: find the PREVIOUS index value if no exact match.
* backfill / bfill: use NEXT index value if no exact match
* nearest: use the NEAREST index value if no exact match. Tied
distances are broken by preferring the larger index value.
limit : int, optional
Maximum number of consecutive labels in ``target`` to match for
inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
.. versionadded:: 0.17.0
Examples
--------
>>> indexer = index.get_indexer(new_index)
>>> new_values = cur_values.take(indexer)
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer(ptarget, method=method, limit=limit,
tolerance=tolerance)
if not is_dtype_equal(self.dtype, target.dtype):
this = self.astype(object)
target = target.astype(object)
return this.get_indexer(target, method=method, limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise InvalidIndexError('Reindexing only valid with uniquely'
' valued Index objects')
if method == 'pad' or method == 'backfill':
indexer = self._get_fill_indexer(target, method, limit, tolerance)
elif method == 'nearest':
indexer = self._get_nearest_indexer(target, limit, tolerance)
else:
if tolerance is not None:
raise ValueError('tolerance argument only valid if doing pad, '
'backfill or nearest reindexing')
if limit is not None:
raise ValueError('limit argument only valid if doing pad, '
'backfill or nearest reindexing')
indexer = self._engine.get_indexer(target._values)
return _ensure_platform_int(indexer)
def _convert_tolerance(self, tolerance):
# override this method on subclasses
return tolerance
def _get_fill_indexer(self, target, method, limit=None, tolerance=None):
if self.is_monotonic_increasing and target.is_monotonic_increasing:
method = (self._engine.get_pad_indexer if method == 'pad' else
self._engine.get_backfill_indexer)
indexer = method(target._values, limit)
else:
indexer = self._get_fill_indexer_searchsorted(target, method,
limit)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target._values, indexer,
tolerance)
return indexer
def _get_fill_indexer_searchsorted(self, target, method, limit=None):
"""
Fallback pad/backfill get_indexer that works for monotonic decreasing
indexes and non-monotonic targets
"""
if limit is not None:
raise ValueError('limit argument for %r method only well-defined '
'if index and target are monotonic' % method)
side = 'left' if method == 'pad' else 'right'
# find exact matches first (this simplifies the algorithm)
indexer = self.get_indexer(target)
nonexact = (indexer == -1)
indexer[nonexact] = self._searchsorted_monotonic(target[nonexact],
side)
if side == 'left':
# searchsorted returns "indices into a sorted array such that,
# if the corresponding elements in v were inserted before the
# indices, the order of a would be preserved".
# Thus, we need to subtract 1 to find values to the left.
indexer[nonexact] -= 1
# This also mapped not found values (values of 0 from
# np.searchsorted) to -1, which conveniently is also our
# sentinel for missing values
else:
# Mark indices to the right of the largest value as not found
indexer[indexer == len(self)] = -1
return indexer
def _get_nearest_indexer(self, target, limit, tolerance):
"""
Get the indexer for the nearest index labels; requires an index with
values that can be subtracted from each other (e.g., not strings or
tuples).
"""
left_indexer = self.get_indexer(target, 'pad', limit=limit)
right_indexer = self.get_indexer(target, 'backfill', limit=limit)
target = np.asarray(target)
left_distances = abs(self.values[left_indexer] - target)
right_distances = abs(self.values[right_indexer] - target)
op = operator.lt if self.is_monotonic_increasing else operator.le
indexer = np.where(op(left_distances, right_distances) |
(right_indexer == -1), left_indexer, right_indexer)
if tolerance is not None:
indexer = self._filter_indexer_tolerance(target, indexer,
tolerance)
return indexer
def _filter_indexer_tolerance(self, target, indexer, tolerance):
distance = abs(self.values[indexer] - target)
indexer = np.where(distance <= tolerance, indexer, -1)
return indexer
_index_shared_docs['get_indexer_non_unique'] = """
Compute indexer and mask for new index given the current index. The
indexer should be then used as an input to ndarray.take to align the
current data to the new index.
Parameters
----------
target : %(target_klass)s
Returns
-------
indexer : ndarray of int
Integers from 0 to n - 1 indicating that the index at these
positions matches the corresponding target values. Missing values
in the target are marked by -1.
missing : ndarray of int
An indexer into the target of the values not found.
These correspond to the -1 in the indexer array
"""
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
target = _ensure_index(target)
pself, ptarget = self._maybe_promote(target)
if pself is not self or ptarget is not target:
return pself.get_indexer_non_unique(ptarget)
if self.is_all_dates:
self = Index(self.asi8)
tgt_values = target.asi8
else:
tgt_values = target._values
indexer, missing = self._engine.get_indexer_non_unique(tgt_values)
return Index(indexer), missing
def get_indexer_for(self, target, **kwargs):
"""
guaranteed return of an indexer even when non-unique
This dispatches to get_indexer or get_indexer_nonunique as appropriate
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer
def _maybe_promote(self, other):
# A hack, but it works
from pandas.core.indexes.datetimes import DatetimeIndex
if self.inferred_type == 'date' and isinstance(other, DatetimeIndex):
return DatetimeIndex(self), other
elif self.inferred_type == 'boolean':
if not is_object_dtype(self.dtype):
return self.astype('object'), other.astype('object')
return self, other
def groupby(self, values):
"""
Group the index labels by a given array of values.
Parameters
----------
values : array
Values used to determine the groups.
Returns
-------
groups : dict
{group name -> group labels}
"""
# TODO: if we are a MultiIndex, we can do better
# that converting to tuples
from .multi import MultiIndex
if isinstance(values, MultiIndex):
values = values.values
values = _ensure_categorical(values)
result = values._reverse_indexer()
# map to the label
result = {k: self.take(v) for k, v in compat.iteritems(result)}
return result
def map(self, mapper):
"""Apply mapper function to an index.
Parameters
----------
mapper : callable
Function to be applied.
Returns
-------
applied : Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
from .multi import MultiIndex
mapped_values = self._arrmap(self.values, mapper)
attributes = self._get_attributes_dict()
if mapped_values.size and isinstance(mapped_values[0], tuple):
return MultiIndex.from_tuples(mapped_values,
names=attributes.get('name'))
attributes['copy'] = False
return Index(mapped_values, **attributes)
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or list-like
Sought values.
.. versionadded:: 0.18.1
Support for values as a set
level : str or int, optional
Name or position of the index level to use (if the index is a
MultiIndex).
Notes
-----
If `level` is specified:
- if it is the name of one *and only one* index level, use that level;
- otherwise it should be a number indicating level position.
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
return algos.isin(np.array(self), values)
def _can_reindex(self, indexer):
"""
*this is an internal non-public method*
Check if we are allowing reindexing with this particular indexer
Parameters
----------
indexer : an integer indexer
Raises
------
ValueError if its a duplicate axis
"""
# trying to reindex on an axis with duplicates
if not self.is_unique and len(indexer):
raise ValueError("cannot reindex from a duplicate axis")
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'name')
# GH7774: preserve dtype/tz if target is empty and not an Index.
target = _ensure_has_len(target) # target may be an iterator
if not isinstance(target, Index) and len(target) == 0:
attrs = self._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = self._simple_new(None, dtype=self.dtype, **attrs)
else:
target = _ensure_index(target)
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
_, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True)
else:
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
if method is not None or limit is not None:
raise ValueError("cannot reindex a non-unique index "
"with a method or limit")
indexer, missing = self.get_indexer_non_unique(target)
if preserve_names and target.nlevels == 1 and target.name != self.name:
target = target.copy()
target.name = self.name
return target, indexer
def _reindex_non_unique(self, target):
"""
*this is an internal non-public method*
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
target = _ensure_index(target)
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
l = np.arange(len(indexer))
missing = _ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = _ensure_int64(l[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = _ensure_int64(l[check])
new_labels = np.empty(tuple([len(indexer)]), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer = indexer.values
indexer[~check] = 0
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
new_index = self._shallow_copy_with_infer(new_labels, freq=None)
return new_index, indexer, new_indexer
_index_shared_docs['join'] = """
*this is an internal non-public method*
Compute join_index and indexers to conform data
structures to the new index.
Parameters
----------
other : Index
how : {'left', 'right', 'inner', 'outer'}
level : int or level name, default None
return_indexers : boolean, default False
sort : boolean, default False
Sort the join keys lexicographically in the result Index. If False,
the order of the join keys depends on the join type (how keyword)
.. versionadded:: 0.20.0
Returns
-------
join_index, (left_indexer, right_indexer)
"""
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# try to figure out the join level
# GH3662
if level is None and (self_is_mi or other_is_mi):
# have the same levels/names so a simple join
if self.names == other.names:
pass
else:
return self._join_multi(other, how=how,
return_indexers=return_indexers)
# join on the level
if level is not None and (self_is_mi or other_is_mi):
return self._join_level(other, level, how=how,
return_indexers=return_indexers)
other = _ensure_index(other)
if len(other) == 0 and how in ('left', 'outer'):
join_index = self._shallow_copy()
if return_indexers:
rindexer = np.repeat(-1, len(join_index))
return join_index, None, rindexer
else:
return join_index
if len(self) == 0 and how in ('right', 'outer'):
join_index = other._shallow_copy()
if return_indexers:
lindexer = np.repeat(-1, len(join_index))
return join_index, lindexer, None
else:
return join_index
if self._join_precedence < other._join_precedence:
how = {'right': 'left', 'left': 'right'}.get(how, how)
result = other.join(self, how=how, level=level,
return_indexers=return_indexers)
if return_indexers:
x, y, z = result
result = x, z, y
return result
if not is_dtype_equal(self.dtype, other.dtype):
this = self.astype('O')
other = other.astype('O')
return this.join(other, how=how, return_indexers=return_indexers)
_validate_join_method(how)
if not self.is_unique and not other.is_unique:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif not self.is_unique or not other.is_unique:
if self.is_monotonic and other.is_monotonic:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
else:
return self._join_non_unique(other, how=how,
return_indexers=return_indexers)
elif self.is_monotonic and other.is_monotonic:
try:
return self._join_monotonic(other, how=how,
return_indexers=return_indexers)
except TypeError:
pass
if how == 'left':
join_index = self
elif how == 'right':
join_index = other
elif how == 'inner':
join_index = self.intersection(other)
elif how == 'outer':
join_index = self.union(other)
if sort:
join_index = join_index.sort_values()
if return_indexers:
if join_index is self:
lindexer = None
else:
lindexer = self.get_indexer(join_index)
if join_index is other:
rindexer = None
else:
rindexer = other.get_indexer(join_index)
return join_index, lindexer, rindexer
else:
return join_index
def _join_multi(self, other, how, return_indexers=True):
from .multi import MultiIndex
self_is_mi = isinstance(self, MultiIndex)
other_is_mi = isinstance(other, MultiIndex)
# figure out join names
self_names = [n for n in self.names if n is not None]
other_names = [n for n in other.names if n is not None]
overlap = list(set(self_names) & set(other_names))
# need at least 1 in common, but not more than 1
if not len(overlap):
raise ValueError("cannot join with no level specified and no "
"overlapping names")
if len(overlap) > 1:
raise NotImplementedError("merging with more than one level "
"overlap on a multi-index is not "
"implemented")
jl = overlap[0]
# make the indices into mi's that match
if not (self_is_mi and other_is_mi):
flip_order = False
if self_is_mi:
self, other = other, self
flip_order = True
# flip if join method is right or left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = other.names.index(jl)
result = self._join_level(other, level, how=how,
return_indexers=return_indexers)
if flip_order:
if isinstance(result, tuple):
return result[0], result[2], result[1]
return result
# 2 multi-indexes
raise NotImplementedError("merging with both multi-indexes is not "
"implemented")
def _join_non_unique(self, other, how='left', return_indexers=False):
from pandas.core.reshape.merge import _get_join_indexers
left_idx, right_idx = _get_join_indexers([self.values],
[other._values], how=how,
sort=True)
left_idx = _ensure_platform_int(left_idx)
right_idx = _ensure_platform_int(right_idx)
join_index = self.values.take(left_idx)
mask = left_idx == -1
np.putmask(join_index, mask, other._values.take(right_idx))
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
return join_index, left_idx, right_idx
else:
return join_index
def _join_level(self, other, level, how='left', return_indexers=False,
keep_order=True):
"""
The join method *only* affects the level of the resulting
MultiIndex. Otherwise it just exactly aligns the Index data to the
labels of the level in the MultiIndex. If `keep_order` == True, the
order of the data indexed by the MultiIndex will not be changed;
otherwise, it will tie out with `other`.
"""
from .multi import MultiIndex
def _get_leaf_sorter(labels):
"""
returns sorter for the inner most level while preserving the
order of higher levels
"""
if labels[0].size == 0:
return np.empty(0, dtype='int64')
if len(labels) == 1:
lab = _ensure_int64(labels[0])
sorter, _ = libalgos.groupsort_indexer(lab, 1 + lab.max())
return sorter
# find indexers of begining of each set of
# same-key labels w.r.t all but last level
tic = labels[0][:-1] != labels[0][1:]
for lab in labels[1:-1]:
tic |= lab[:-1] != lab[1:]
starts = np.hstack(([True], tic, [True])).nonzero()[0]
lab = _ensure_int64(labels[-1])
return lib.get_level_sorter(lab, _ensure_int64(starts))
if isinstance(self, MultiIndex) and isinstance(other, MultiIndex):
raise TypeError('Join on level between two MultiIndex objects '
'is ambiguous')
left, right = self, other
flip_order = not isinstance(self, MultiIndex)
if flip_order:
left, right = right, left
how = {'right': 'left', 'left': 'right'}.get(how, how)
level = left._get_level_number(level)
old_level = left.levels[level]
if not right.is_unique:
raise NotImplementedError('Index._join_level on non-unique index '
'is not implemented')
new_level, left_lev_indexer, right_lev_indexer = \
old_level.join(right, how=how, return_indexers=True)
if left_lev_indexer is None:
if keep_order or len(left) == 0:
left_indexer = None
join_index = left
else: # sort the leaves
left_indexer = _get_leaf_sorter(left.labels[:level + 1])
join_index = left[left_indexer]
else:
left_lev_indexer = _ensure_int64(left_lev_indexer)
rev_indexer = lib.get_reverse_indexer(left_lev_indexer,
len(old_level))
new_lev_labels = algos.take_nd(rev_indexer, left.labels[level],
allow_fill=False)
new_labels = list(left.labels)
new_labels[level] = new_lev_labels
new_levels = list(left.levels)
new_levels[level] = new_level
if keep_order: # just drop missing values. o.w. keep order
left_indexer = np.arange(len(left), dtype=np.intp)
mask = new_lev_labels != -1
if not mask.all():
new_labels = [lab[mask] for lab in new_labels]
left_indexer = left_indexer[mask]
else: # tie out the order with other
if level == 0: # outer most level, take the fast route
ngroups = 1 + new_lev_labels.max()
left_indexer, counts = libalgos.groupsort_indexer(
new_lev_labels, ngroups)
# missing values are placed first; drop them!
left_indexer = left_indexer[counts[0]:]
new_labels = [lab[left_indexer] for lab in new_labels]
else: # sort the leaves
mask = new_lev_labels != -1
mask_all = mask.all()
if not mask_all:
new_labels = [lab[mask] for lab in new_labels]
left_indexer = _get_leaf_sorter(new_labels[:level + 1])
new_labels = [lab[left_indexer] for lab in new_labels]
# left_indexers are w.r.t masked frame.
# reverse to original frame!
if not mask_all:
left_indexer = mask.nonzero()[0][left_indexer]
join_index = MultiIndex(levels=new_levels, labels=new_labels,
names=left.names, verify_integrity=False)
if right_lev_indexer is not None:
right_indexer = algos.take_nd(right_lev_indexer,
join_index.labels[level],
allow_fill=False)
else:
right_indexer = join_index.labels[level]
if flip_order:
left_indexer, right_indexer = right_indexer, left_indexer
if return_indexers:
left_indexer = (None if left_indexer is None
else _ensure_platform_int(left_indexer))
right_indexer = (None if right_indexer is None
else _ensure_platform_int(right_indexer))
return join_index, left_indexer, right_indexer
else:
return join_index
def _join_monotonic(self, other, how='left', return_indexers=False):
if self.equals(other):
ret_index = other if how == 'right' else self
if return_indexers:
return ret_index, None, None
else:
return ret_index
sv = self._values
ov = other._values
if self.is_unique and other.is_unique:
# We can perform much better than the general case
if how == 'left':
join_index = self
lidx = None
ridx = self._left_indexer_unique(sv, ov)
elif how == 'right':
join_index = other
lidx = self._left_indexer_unique(ov, sv)
ridx = None
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
else:
if how == 'left':
join_index, lidx, ridx = self._left_indexer(sv, ov)
elif how == 'right':
join_index, ridx, lidx = self._left_indexer(ov, sv)
elif how == 'inner':
join_index, lidx, ridx = self._inner_indexer(sv, ov)
elif how == 'outer':
join_index, lidx, ridx = self._outer_indexer(sv, ov)
join_index = self._wrap_joined_index(join_index, other)
if return_indexers:
lidx = None if lidx is None else _ensure_platform_int(lidx)
ridx = None if ridx is None else _ensure_platform_int(ridx)
return join_index, lidx, ridx
else:
return join_index
def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
return Index(joined, name=name)
def _get_string_slice(self, key, use_lhs=True, use_rhs=True):
# this is for partial string indexing,
# overridden in DatetimeIndex, TimedeltaIndex and PeriodIndex
raise NotImplementedError
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
For an ordered Index, compute the slice indexer for input labels and
step
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, default None
kind : string, default None
Returns
-------
indexer : ndarray or slice
Notes
-----
This function assumes that the data is sorted, so use at your own peril
"""
start_slice, end_slice = self.slice_locs(start, end, step=step,
kind=kind)
# return a slice
if not is_scalar(start_slice):
raise AssertionError("Start slice bound is non-scalar")
if not is_scalar(end_slice):
raise AssertionError("End slice bound is non-scalar")
return slice(start_slice, end_slice, step)
def _maybe_cast_indexer(self, key):
"""
If we have a float key and are not a floating index
then try to cast to an int if equivalent
"""
if is_float(key) and not self.is_floating():
try:
ckey = int(key)
if ckey == key:
key = ckey
except (ValueError, TypeError):
pass
return key
def _validate_indexer(self, form, key, kind):
"""
if we are positional indexer
validate that we have appropriate typed bounds
must be an integer
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc']
if key is None:
pass
elif is_integer(key):
pass
elif kind in ['iloc', 'getitem']:
self._invalid_indexer(form, key)
return key
_index_shared_docs['_maybe_cast_slice_bound'] = """
This function should be overloaded in subclasses that allow non-trivial
casting on label-slice bounds, e.g. datetime-like indices allowing
strings containing formatted datetimes.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
Returns
-------
label : object
Notes
-----
Value of `side` parameter should be validated in caller.
"""
@Appender(_index_shared_docs['_maybe_cast_slice_bound'])
def _maybe_cast_slice_bound(self, label, side, kind):
assert kind in ['ix', 'loc', 'getitem', None]
# We are a plain index here (sub-class override this method if they
# wish to have special treatment for floats/ints, e.g. Float64Index and
# datetimelike Indexes
# reject them
if is_float(label):
if not (kind in ['ix'] and (self.holds_integer() or
self.is_floating())):
self._invalid_indexer('slice', label)
# we are trying to find integer bounds on a non-integer based index
# this is rejected (generally .loc gets you here)
elif is_integer(label):
self._invalid_indexer('slice', label)
return label
def _searchsorted_monotonic(self, label, side='left'):
if self.is_monotonic_increasing:
return self.searchsorted(label, side=side)
elif self.is_monotonic_decreasing:
# np.searchsorted expects ascending sort order, have to reverse
# everything for it to work (element ordering, search side and
# resulting value).
pos = self[::-1].searchsorted(label, side='right' if side == 'left'
else 'right')
return len(self) - pos
raise ValueError('index must be monotonic increasing or decreasing')
def _get_loc_only_exact_matches(self, key):
"""
This is overriden on subclasses (namely, IntervalIndex) to control
get_slice_bound.
"""
return self.get_loc(key)
def get_slice_bound(self, label, side, kind):
"""
Calculate slice bound that corresponds to given label.
Returns leftmost (one-past-the-rightmost if ``side=='right'``) position
of given label.
Parameters
----------
label : object
side : {'left', 'right'}
kind : {'ix', 'loc', 'getitem'}
"""
assert kind in ['ix', 'loc', 'getitem', None]
if side not in ('left', 'right'):
raise ValueError("Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" %
(side, ))
original_label = label
# For datetime indices label may be a string that has to be converted
# to datetime boundary according to its resolution.
label = self._maybe_cast_slice_bound(label, side, kind)
# we need to look up the label
try:
slc = self._get_loc_only_exact_matches(label)
except KeyError as err:
try:
return self._searchsorted_monotonic(label, side)
except ValueError:
# raise the original KeyError
raise err
if isinstance(slc, np.ndarray):
# get_loc may return a boolean array or an array of indices, which
# is OK as long as they are representable by a slice.
if is_bool_dtype(slc):
slc = lib.maybe_booleans_to_slice(slc.view('u1'))
else:
slc = lib.maybe_indices_to_slice(slc.astype('i8'), len(self))
if isinstance(slc, np.ndarray):
raise KeyError("Cannot get %s slice bound for non-unique "
"label: %r" % (side, original_label))
if isinstance(slc, slice):
if side == 'left':
return slc.start
else:
return slc.stop
else:
if side == 'right':
return slc + 1
else:
return slc
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
Compute slice locations for input labels.
Parameters
----------
start : label, default None
If None, defaults to the beginning
end : label, default None
If None, defaults to the end
step : int, defaults None
If None, defaults to 1
kind : {'ix', 'loc', 'getitem'} or None
Returns
-------
start, end : int
"""
inc = (step is None or step >= 0)
if not inc:
# If it's a reverse slice, temporarily swap bounds.
start, end = end, start
start_slice = None
if start is not None:
start_slice = self.get_slice_bound(start, 'left', kind)
if start_slice is None:
start_slice = 0
end_slice = None
if end is not None:
end_slice = self.get_slice_bound(end, 'right', kind)
if end_slice is None:
end_slice = len(self)
if not inc:
# Bounds at this moment are swapped, swap them back and shift by 1.
#
# slice_locs('B', 'A', step=-1): s='B', e='A'
#
# s='A' e='B'
# AFTER SWAP: | |
# v ------------------> V
# -----------------------------------
# | | |A|A|A|A| | | | | |B|B| | | | |
# -----------------------------------
# ^ <------------------ ^
# SHOULD BE: | |
# end=s-1 start=e-1
#
end_slice, start_slice = start_slice - 1, end_slice - 1
# i == -1 triggers ``len(self) + i`` selection that points to the
# last element, not before-the-first one, subtracting len(self)
# compensates that.
if end_slice == -1:
end_slice -= len(self)
if start_slice == -1:
start_slice -= len(self)
return start_slice, end_slice
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
"""
_self = np.asarray(self)
item = self._coerce_scalar_to_index(item)._values
idx = np.concatenate((_self[:loc], item, _self[loc:]))
return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
Make new Index with passed list of labels deleted
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
"""
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
return self.delete(indexer)
@Appender(base._shared_docs['unique'] % _index_doc_kwargs)
def unique(self):
result = super(Index, self).unique()
return self._shallow_copy(result)
@Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, keep='first'):
return super(Index, self).drop_duplicates(keep=keep)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
return super(Index, self).duplicated(keep=keep)
_index_shared_docs['fillna'] = """
Fill NA/NaN values with the specified value
Parameters
----------
value : scalar
Scalar value to use to fill holes (e.g. 0).
This value cannot be a list-likes.
downcast : dict, default is None
a dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible)
Returns
-------
filled : %(klass)s
"""
@Appender(_index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
self._assert_can_do_op(value)
if self.hasnans:
result = self.putmask(self._isnan, value)
if downcast is None:
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
return self._shallow_copy()
_index_shared_docs['dropna'] = """
Return Index without NA/NaN values
Parameters
----------
how : {'any', 'all'}, default 'any'
If the Index is a MultiIndex, drop the value when any or all levels
are NaN.
Returns
-------
valid : Index
"""
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
if how not in ('any', 'all'):
raise ValueError("invalid how option: {0}".format(how))
if self.hasnans:
return self._shallow_copy(self.values[~self._isnan])
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
def _evaluate_with_datetime_like(self, other, op, opstr):
raise TypeError("can only perform ops with datetime like values")
def _evalute_compare(self, op):
raise base.AbstractMethodError(self)
@classmethod
def _add_comparison_methods(cls):
""" add in comparison methods """
def _make_compare(op):
def _evaluate_compare(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if (is_object_dtype(self) and
self.nlevels == 1):
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = _comp_method_OBJECT_ARRAY(
op, self.values, other)
else:
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
return _evaluate_compare
cls.__eq__ = _make_compare(operator.eq)
cls.__ne__ = _make_compare(operator.ne)
cls.__lt__ = _make_compare(operator.lt)
cls.__gt__ = _make_compare(operator.gt)
cls.__le__ = _make_compare(operator.le)
cls.__ge__ = _make_compare(operator.ge)
@classmethod
def _add_numeric_methods_add_sub_disabled(cls):
""" add in the numeric add/sub methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__add__ = cls.__radd__ = __iadd__ = _make_invalid_op('__add__') # noqa
cls.__sub__ = __isub__ = _make_invalid_op('__sub__') # noqa
@classmethod
def _add_numeric_methods_disabled(cls):
""" add in numeric methods to disable other than add/sub """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.__pow__ = cls.__rpow__ = _make_invalid_op('__pow__')
cls.__mul__ = cls.__rmul__ = _make_invalid_op('__mul__')
cls.__floordiv__ = cls.__rfloordiv__ = _make_invalid_op('__floordiv__')
cls.__truediv__ = cls.__rtruediv__ = _make_invalid_op('__truediv__')
if not compat.PY3:
cls.__div__ = cls.__rdiv__ = _make_invalid_op('__div__')
cls.__neg__ = _make_invalid_op('__neg__')
cls.__pos__ = _make_invalid_op('__pos__')
cls.__abs__ = _make_invalid_op('__abs__')
cls.__inv__ = _make_invalid_op('__inv__')
def _maybe_update_attributes(self, attrs):
""" Update Index attributes (e.g. freq) depending on op """
return attrs
def _validate_for_numeric_unaryop(self, op, opstr):
""" validate if we can perform a numeric unary operation """
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
def _validate_for_numeric_binop(self, other, op, opstr):
"""
return valid other, evaluate or raise TypeError
if we are not of the appropriate type
internal method called by ops
"""
from pandas.tseries.offsets import DateOffset
# if we are an inheritor of numeric,
# but not actually numeric (e.g. DatetimeIndex/PeriodInde)
if not self._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op {opstr} "
"for type: {typ}".format(
opstr=opstr,
typ=type(self))
)
if isinstance(other, Index):
if not other._is_numeric_dtype:
raise TypeError("cannot evaluate a numeric op "
"{opstr} with type: {typ}".format(
opstr=type(self),
typ=type(other))
)
elif isinstance(other, np.ndarray) and not other.ndim:
other = other.item()
if isinstance(other, (Index, ABCSeries, np.ndarray)):
if len(self) != len(other):
raise ValueError("cannot evaluate a numeric op with "
"unequal lengths")
other = _values_from_object(other)
if other.dtype.kind not in ['f', 'i', 'u']:
raise TypeError("cannot evaluate a numeric op "
"with a non-numeric dtype")
elif isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
# higher up to handle
pass
elif isinstance(other, (Timestamp, np.datetime64)):
# higher up to handle
pass
else:
if not (is_float(other) or is_integer(other)):
raise TypeError("can only perform ops with scalar values")
return other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods """
def _make_evaluate_binop(op, opstr, reversed=False, constructor=Index):
def _evaluate_numeric_binop(self, other):
from pandas.tseries.offsets import DateOffset
other = self._validate_for_numeric_binop(other, op, opstr)
# handle time-based others
if isinstance(other, (DateOffset, np.timedelta64,
Timedelta, datetime.timedelta)):
return self._evaluate_with_timedelta_like(other, op, opstr)
elif isinstance(other, (Timestamp, np.datetime64)):
return self._evaluate_with_datetime_like(other, op, opstr)
# if we are a reversed non-communative op
values = self.values
if reversed:
values, other = other, values
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
with np.errstate(all='ignore'):
result = op(values, other)
return constructor(result, **attrs)
return _evaluate_numeric_binop
cls.__add__ = cls.__radd__ = _make_evaluate_binop(
operator.add, '__add__')
cls.__sub__ = _make_evaluate_binop(
operator.sub, '__sub__')
cls.__rsub__ = _make_evaluate_binop(
operator.sub, '__sub__', reversed=True)
cls.__mul__ = cls.__rmul__ = _make_evaluate_binop(
operator.mul, '__mul__')
cls.__rpow__ = _make_evaluate_binop(
operator.pow, '__pow__', reversed=True)
cls.__pow__ = _make_evaluate_binop(
operator.pow, '__pow__')
cls.__mod__ = _make_evaluate_binop(
operator.mod, '__mod__')
cls.__floordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__')
cls.__rfloordiv__ = _make_evaluate_binop(
operator.floordiv, '__floordiv__', reversed=True)
cls.__truediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__')
cls.__rtruediv__ = _make_evaluate_binop(
operator.truediv, '__truediv__', reversed=True)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(
operator.div, '__div__')
cls.__rdiv__ = _make_evaluate_binop(
operator.div, '__div__', reversed=True)
cls.__divmod__ = _make_evaluate_binop(
divmod,
'__divmod__',
constructor=lambda result, **attrs: (
Index(result[0], **attrs),
Index(result[1], **attrs),
),
)
@classmethod
def _add_numeric_methods_unary(cls):
""" add in numeric unary methods """
def _make_evaluate_unary(op, opstr):
def _evaluate_numeric_unary(self):
self._validate_for_numeric_unaryop(op, opstr)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(op(self.values), **attrs)
return _evaluate_numeric_unary
cls.__neg__ = _make_evaluate_unary(lambda x: -x, '__neg__')
cls.__pos__ = _make_evaluate_unary(lambda x: x, '__pos__')
cls.__abs__ = _make_evaluate_unary(np.abs, '__abs__')
cls.__inv__ = _make_evaluate_unary(lambda x: -x, '__inv__')
@classmethod
def _add_numeric_methods(cls):
cls._add_numeric_methods_unary()
cls._add_numeric_methods_binary()
@classmethod
def _add_logical_methods(cls):
""" add in logical methods """
_doc = """
%(desc)s
Parameters
----------
All arguments to numpy.%(outname)s are accepted.
Returns
-------
%(outname)s : bool or array_like (if axis is specified)
A single element array_like may be converted to bool."""
def _make_logical_function(name, desc, f):
@Substitution(outname=name, desc=desc)
@Appender(_doc)
def logical_func(self, *args, **kwargs):
result = f(self.values)
if (isinstance(result, (np.ndarray, ABCSeries, Index)) and
result.ndim == 0):
# return NumPy type
return result.dtype.type(result.item())
else: # pragma: no cover
return result
logical_func.__name__ = name
return logical_func
cls.all = _make_logical_function('all', 'Return whether all elements '
'are True',
np.all)
cls.any = _make_logical_function('any',
'Return whether any element is True',
np.any)
@classmethod
def _add_logical_methods_disabled(cls):
""" add in logical methods to disable """
def _make_invalid_op(name):
def invalid_op(self, other=None):
raise TypeError("cannot perform {name} with this index type: "
"{typ}".format(name=name, typ=type(self)))
invalid_op.__name__ = name
return invalid_op
cls.all = _make_invalid_op('all')
cls.any = _make_invalid_op('any')
Index._add_numeric_methods_disabled()
Index._add_logical_methods()
Index._add_comparison_methods()
def _ensure_index(index_like, copy=False):
if isinstance(index_like, Index):
if copy:
index_like = index_like.copy()
return index_like
if hasattr(index_like, 'name'):
return Index(index_like, name=index_like.name, copy=copy)
# must check for exactly list here because of strict type
# check in clean_index_list
if isinstance(index_like, list):
if type(index_like) != list:
index_like = list(index_like)
# 2200 ?
converted, all_arrays = lib.clean_index_list(index_like)
if len(converted) > 0 and all_arrays:
from .multi import MultiIndex
return MultiIndex.from_arrays(converted)
else:
index_like = converted
else:
# clean_index_list does the equivalent of copying
# so only need to do this if not list instance
if copy:
from copy import copy
index_like = copy(index_like)
return Index(index_like)
def _get_na_value(dtype):
if is_datetime64_any_dtype(dtype) or is_timedelta64_dtype(dtype):
return libts.NaT
return {np.datetime64: libts.NaT,
np.timedelta64: libts.NaT}.get(dtype, np.nan)
def _ensure_has_len(seq):
"""If seq is an iterator, put its values into a list."""
try:
len(seq)
except TypeError:
return list(seq)
else:
return seq
def _trim_front(strings):
"""
Trims zeros and decimal points
"""
trimmed = strings
while len(strings) > 0 and all([x[0] == ' ' for x in trimmed]):
trimmed = [x[1:] for x in trimmed]
return trimmed
def _validate_join_method(method):
if method not in ['left', 'right', 'inner', 'outer']:
raise ValueError('do not recognize join method %s' % method)
| mit |
louispotok/pandas | pandas/tests/indexes/datetimes/test_timezones.py | 1 | 41691 | # -*- coding: utf-8 -*-
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import datetime, timedelta, tzinfo, date, time
from distutils.version import LooseVersion
import pytest
import pytz
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import pandas as pd
from pandas._libs import tslib
from pandas._libs.tslibs import timezones
from pandas.compat import lrange, zip, PY3
from pandas import (DatetimeIndex, date_range, bdate_range,
Timestamp, isna, to_datetime, Index)
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, '-07:00')
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones(object):
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Eastern'))
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='UTC'))
dates = ['2010-12-01 00:00', '2010-12-02 00:00', pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize('US/Pacific')
tm.assert_index_equal(idx, DatetimeIndex(dates, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 03:00', '2010-12-02 03:00', pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx + pd.offsets.Hour(5)
expected = ['2010-12-01 08:00', '2010-12-02 08:00', pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
idx = idx.tz_convert('US/Pacific')
expected = ['2010-12-01 05:00', '2010-12-02 05:00', pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx + np.timedelta64(3, 'h')
expected = ['2010-12-01 08:00', '2010-12-02 08:00', pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Pacific'))
idx = idx.tz_convert('US/Eastern')
expected = ['2010-12-01 11:00', '2010-12-02 11:00', pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz='US/Eastern'))
@pytest.mark.parametrize('prefix', ['', 'dateutil/'])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates, tz=prefix + 'US/Eastern')
conv = idx[0].tz_convert(prefix + 'US/Pacific')
expected = idx.tz_convert(prefix + 'US/Pacific')[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2009-05-12 09:50:32']
tt = DatetimeIndex(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2009-05-12 13:50:32']
tt = DatetimeIndex(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ['2008-05-12 09:50:00',
'2008-12-12 09:50:35',
'2008-05-12 09:50:32']
tt = DatetimeIndex(ts).tz_localize('US/Eastern')
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ['2008-05-12 13:50:00',
'2008-12-12 14:50:35',
'2008-05-12 13:50:32']
tt = DatetimeIndex(ts).tz_localize('UTC')
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2009-05-12 09:50:32', tz=tz)]
tt = DatetimeIndex(ts)
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2009-05-12 13:50:32', tz='UTC')]
tt = DatetimeIndex(ts)
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [Timestamp('2008-05-12 09:50:00', tz=tz),
Timestamp('2008-12-12 09:50:35', tz=tz),
Timestamp('2008-05-12 09:50:32', tz=tz)]
tt = DatetimeIndex(ts)
ut = tt.tz_convert('UTC')
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [Timestamp('2008-05-12 13:50:00', tz='UTC'),
Timestamp('2008-12-12 14:50:35', tz='UTC'),
Timestamp('2008-05-12 13:50:32', tz='UTC')]
tt = DatetimeIndex(ts)
ut = tt.tz_convert('US/Eastern')
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize('freq, n', [('H', 1), ('T', 60), ('S', 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23),
datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize('UTC')
idx = idx.tz_convert('Europe/Moscow')
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [('H', 1), ('T', 60), ('S', 3600)]:
# Start DST
idx = date_range('2014-03-08 23:00', '2014-03-09 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([18, 19, 20, 21, 22, 23,
0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-03-08 18:00', '2014-03-09 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range('2014-11-01 23:00', '2014-11-02 09:00', freq=freq,
tz='UTC')
idx = idx.tz_convert('US/Eastern')
expected = np.repeat(np.array([19, 20, 21, 22, 23,
0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range('2014-11-01 18:00', '2014-11-02 05:00', freq=freq,
tz='US/Eastern')
idx = idx.tz_convert('UTC')
expected = np.repeat(np.array([22, 23, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n,
n, n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range('2014-03-08 00:00', '2014-03-09 00:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range('2014-11-01 00:00', '2014-11-02 00:00', freq='D',
tz='UTC')
idx = idx.tz_convert('US/Eastern')
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range('2014-11-01 00:00', '2014-11-02 000:00', freq='D',
tz='US/Eastern')
idx = idx.tz_convert('UTC')
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M',
tz='UTC')
exp1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D',
tz='UTC')
exp2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H',
tz='UTC')
exp3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T',
tz='UTC')
exp4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3),
(idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert('UTC').tz_localize(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start='2001-01-01', end='2001-03-01', tz='UTC')
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern',
pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range('2012-03-09', freq='H', periods=100, tz='utc')
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ['2015-03-08 01:00', '2015-03-08 02:00', '2015-03-08 03:00']
index = DatetimeIndex(times)
tz = 'US/Eastern'
with pytest.raises(pytz.NonExistentTimeError):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError):
index.tz_localize(tz=tz, errors='raise')
result = index.tz_localize(tz=tz, errors='coerce')
test_times = ['2015-03-08 01:00-05:00', 'NaT',
'2015-03-08 03:00-04:00']
dti = DatetimeIndex(test_times)
expected = dti.tz_localize('UTC').tz_convert('US/Eastern')
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=pd.offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='infer')
tm.assert_index_equal(dr, localized)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous='infer'))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous='infer')
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3,
freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(datetime(2011, 3, 13, 3, 30), periods=3,
freq=pd.offsets.Hour(), tz=tz)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3,
freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(datetime(2011, 3, 13), periods=48,
freq=pd.offsets.Minute(30), tz=pytz.utc)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize('prefix', ['', 'dateutil/'])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + 'US/Eastern'
dti = DatetimeIndex(start='1/1/2005', end='1/1/2005 0:00:30.256',
freq='L')
dti2 = dti.tz_localize(tzstr)
dti_utc = DatetimeIndex(start='1/1/2005 05:00',
end='1/1/2005 5:00:30.256', freq='L', tz='utc')
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + 'US/Pacific')
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = DatetimeIndex(start='11/6/2011 1:59', end='11/6/2011 2:00',
freq='L')
with pytest.raises(pytz.AmbiguousTimeError):
dti.tz_localize(tzstr)
dti = DatetimeIndex(start='3/13/2011 1:59', end='3/13/2011 2:00',
freq='L')
with pytest.raises(pytz.NonExistentTimeError):
dti.tz_localize(tzstr)
@pytest.mark.parametrize('tz', ['US/Eastern', 'dateutil/US/Eastern',
pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range('3/10/2012', '3/11/2012', freq='30T')
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range('3/11/2012', '3/12/2012', freq='30T')
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start='2014-01-01', end='2014-12-31', freq='M')
idx2 = date_range(start='2014-01-01', end='2014-12-31', freq='D')
idx3 = date_range(start='2014-01-01', end='2014-03-01', freq='H')
idx4 = date_range(start='2014-08-01', end='2014-10-31', freq='T')
for idx in [idx1, idx2, idx3, idx4]:
localized = idx.tz_localize(tz)
expected = date_range(start=idx[0], end=idx[-1], freq=idx.freq,
tz=tz)
tm.assert_index_equal(localized, expected)
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
tm.assert_index_equal(reset, idx)
assert reset.tzinfo is None
def test_dti_tz_localize_naive(self):
rng = date_range('1/1/2011', periods=100, freq='H')
conv = rng.tz_localize('US/Pacific')
exp = date_range('1/1/2011', periods=100, freq='H', tz='US/Pacific')
tm.assert_index_equal(conv, exp)
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start='2001-01-01', end='2001-03-01')
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start='2001-01-01', end='2001-03-01',
tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous='NaT')
times = ['11/06/2011 00:00', np.NaN, np.NaN, '11/06/2011 02:00',
'11/06/2011 03:00']
di_test = DatetimeIndex(times, tz='US/Eastern')
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5,
freq=pd.offsets.Hour(), tz=tz)
times = ['11/06/2011 00:00', '11/06/2011 01:00', '11/06/2011 01:00',
'11/06/2011 02:00', '11/06/2011 03:00']
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
tm.assert_index_equal(dr, DatetimeIndex(times, tz=tz,
ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz,
ambiguous=np.array(is_dst).astype('bool'))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
with pytest.raises(Exception):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10,
freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize('tz', ['Europe/London', 'dateutil/Europe/London'])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
# FIXME: This next block fails to raise; it was taken from an older
# version of this test that had an indention mistake that caused it
# to not get executed.
# with pytest.raises(pytz.AmbiguousTimeError):
# date_range("2013-10-26 23:00", "2013-10-27 01:00",
# tz="Europe/London", freq="H")
times = date_range("2013-10-26 23:00", "2013-10-27 01:00", freq="H",
tz=tz, ambiguous='infer')
assert times[0] == Timestamp('2013-10-26 23:00', tz=tz, freq="H")
if str(tz).startswith('dateutil'):
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# see GH#14621
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert times[-1] == Timestamp('2013-10-27 01:00:00+0100',
tz=tz, freq="H")
else:
assert times[-1] == Timestamp('2013-10-27 01:00:00+0000',
tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range('1/1/2009', '1/1/2010')
dr_utc = pd.bdate_range('1/1/2009', '1/1/2010', tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz='US/Eastern')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz='US/Eastern')
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz='UTC')
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz='UTC')
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range('1/1/2000 9:30', periods=10, freq='D', tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D', tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.parametrize('timezone', ['US/Pacific', 'US/Eastern', 'UTC',
'Asia/Kolkata', 'Asia/Shanghai',
'Australia/Canberra'])
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range('1/1/2000 9:30', periods=10, freq='D',
tz=tzlocal())
result = rng.normalize()
expected = date_range('1/1/2000', periods=10, freq='D',
tz=tzlocal())
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
# ------------------------------------------------------------
# DatetimeIndex.__new__
@pytest.mark.parametrize('prefix', ['', 'dateutil/'])
def test_dti_constructor_static_tzinfo(self, prefix):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + 'EST')
index.hour
index[0]
def test_dti_constructor_with_fixed_tz(self):
off = FixedOffset(420, '+07:00')
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range('3/11/2012 05:00:00+07:00',
'6/11/2012 05:00:00+07:00')
assert (rng.values == rng3.values).all()
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_convert_datetime_list(self, tzstr):
dr = date_range('2012-06-02', periods=10,
tz=tzstr, name='foo')
dr2 = DatetimeIndex(list(dr), name='foo')
tm.assert_index_equal(dr, dr2)
assert dr.tz == dr2.tz
assert dr2.name == 'foo'
def test_dti_construction_univalent(self):
rng = date_range('03/12/2012 00:00', periods=10, freq='W-FRI',
tz='US/Eastern')
rng2 = DatetimeIndex(data=rng, tz='US/Eastern')
tm.assert_index_equal(rng, rng2)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_from_tzaware_datetime(self, tz):
d = [datetime(2012, 8, 19, tzinfo=tz)]
index = DatetimeIndex(d)
assert timezones.tz_compare(index.tz, tz)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_tz_constructors(self, tzstr):
""" Test different DatetimeIndex constructions with timezone
Follow-up of GH#4229
"""
arr = ['11/10/2005 08:00:00', '11/10/2005 09:00:00']
idx1 = to_datetime(arr).tz_localize(tzstr)
idx2 = DatetimeIndex(start="2005-11-10 08:00:00", freq='H', periods=2,
tz=tzstr)
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
# -------------------------------------------------------------
# Unsorted
def test_join_utc_convert(self, join_type):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng.tz_convert('US/Eastern')
right = rng.tz_convert('Europe/Berlin')
result = left.join(left[:-5], how=join_type)
assert isinstance(result, DatetimeIndex)
assert result.tz == left.tz
result = left.join(right[:-5], how=join_type)
assert isinstance(result, DatetimeIndex)
assert result.tz.zone == 'UTC'
@pytest.mark.parametrize("dtype", [
None, 'datetime64[ns, CET]',
'datetime64[ns, EST]', 'datetime64[ns, UTC]'
])
def test_date_accessor(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), pd.NaT])
index = DatetimeIndex(['2018-06-04 10:00:00', pd.NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [
None, 'datetime64[ns, CET]',
'datetime64[ns, EST]', 'datetime64[ns, UTC]'
])
def test_time_accessor(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), pd.NaT])
index = DatetimeIndex(['2018-06-04 10:20:30', pd.NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_date_range_localize(self):
rng = date_range('3/11/2012 03:00', periods=15, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 03:00', '3/11/2012 04:00'],
tz='US/Eastern')
rng3 = date_range('3/11/2012 03:00', periods=15, freq='H')
rng3 = rng3.tz_localize('US/Eastern')
tm.assert_index_equal(rng, rng3)
# DST transition time
val = rng[0]
exp = Timestamp('3/11/2012 03:00', tz='US/Eastern')
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range('3/11/2012 00:00', periods=2, freq='H',
tz='US/Eastern')
rng2 = DatetimeIndex(['3/11/2012 00:00', '3/11/2012 01:00'],
tz='US/Eastern')
tm.assert_index_equal(rng, rng2)
exp = Timestamp('3/11/2012 00:00', tz='US/Eastern')
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp('3/11/2012 01:00', tz='US/Eastern')
assert exp.hour == 1
assert rng[1] == exp
rng = date_range('3/11/2012 00:00', periods=10, freq='H',
tz='US/Eastern')
assert rng[2].hour == 3
def test_timestamp_equality_different_timezones(self):
utc_range = date_range('1/1/2000', periods=20, tz='UTC')
eastern_range = utc_range.tz_convert('US/Eastern')
berlin_range = utc_range.tz_convert('Europe/Berlin')
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_dti_intersection(self):
rng = date_range('1/1/2011', periods=100, freq='H', tz='utc')
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_dti_equals_with_tz(self):
left = date_range('1/1/2011', periods=100, freq='H', tz='utc')
right = date_range('1/1/2011', periods=100, freq='H', tz='US/Eastern')
assert not left.equals(right)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_tz_nat(self, tzstr):
idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT])
assert isna(idx[1])
assert idx[0].tzinfo is not None
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_astype_asobject_tzinfos(self, tzstr):
# GH#1345
# dates around a dst transition
rng = date_range('2/13/2010', '5/6/2010', tz=tzstr)
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
objs = rng.astype(object)
for i, x in enumerate(objs):
exval = rng[i]
assert x == exval
assert x.tzinfo == exval.tzinfo
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_with_timezone_repr(self, tzstr):
rng = date_range('4/13/2010', '5/6/2010')
rng_eastern = rng.tz_localize(tzstr)
rng_repr = repr(rng_eastern)
assert '2010-04-13 00:00:00' in rng_repr
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_take_dont_lose_meta(self, tzstr):
rng = date_range('1/1/2000', periods=20, tz=tzstr)
result = rng.take(lrange(5))
assert result.tz == rng.tz
assert result.freq == rng.freq
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_utc_box_timestamp_and_localize(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range('3/11/2012', '3/12/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(tzstr)
expected = rng[-1].astimezone(tz)
stamp = rng_eastern[-1]
assert stamp == expected
assert stamp.tzinfo == expected.tzinfo
# right tzinfo
rng = date_range('3/13/2012', '3/14/2012', freq='H', tz='utc')
rng_eastern = rng.tz_convert(tzstr)
# test not valid for dateutil timezones.
# assert 'EDT' in repr(rng_eastern[0].tzinfo)
assert ('EDT' in repr(rng_eastern[0].tzinfo) or
'tzfile' in repr(rng_eastern[0].tzinfo))
def test_dti_to_pydatetime(self):
dt = dateutil.parser.parse('2012-06-13T01:39:00Z')
dt = dt.replace(tzinfo=tzlocal())
arr = np.array([dt], dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
rng = date_range('2012-11-03 03:00', '2012-11-05 03:00', tz=tzlocal())
arr = rng.to_pydatetime()
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_dti_to_pydatetime_fizedtz(self):
dates = np.array([datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)])
dti = DatetimeIndex(dates)
result = dti.to_pydatetime()
tm.assert_numpy_array_equal(dates, result)
result = dti._mpl_repr()
tm.assert_numpy_array_equal(dates, result)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Central'),
gettz('US/Central')])
def test_with_tz(self, tz):
# just want it to work
start = datetime(2011, 3, 12, tzinfo=pytz.utc)
dr = bdate_range(start, periods=50, freq=pd.offsets.Hour())
assert dr.tz is pytz.utc
# DateRange with naive datetimes
dr = bdate_range('1/1/2005', '1/1/2009', tz=pytz.utc)
dr = bdate_range('1/1/2005', '1/1/2009', tz=tz)
# normalized
central = dr.tz_convert(tz)
assert central.tz is tz
naive = central[0].to_pydatetime().replace(tzinfo=None)
comp = tslib._localize_pydatetime(naive, tz).tzinfo
assert central[0].tz is comp
# compare vs a localized tz
naive = dr[0].to_pydatetime().replace(tzinfo=None)
comp = tslib._localize_pydatetime(naive, tz).tzinfo
assert central[0].tz is comp
# datetimes with tzinfo set
dr = bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc),
datetime(2009, 1, 1, tzinfo=pytz.utc))
with pytest.raises(Exception):
bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), '1/1/2009',
tz=tz)
@pytest.mark.parametrize('prefix', ['', 'dateutil/'])
def test_field_access_localize(self, prefix):
strdates = ['1/1/2012', '3/1/2012', '4/1/2012']
rng = DatetimeIndex(strdates, tz=prefix + 'US/Eastern')
assert (rng.hour == 0).all()
# a more unusual time zone, #1946
dr = date_range('2011-10-02 00:00', freq='h', periods=10,
tz=prefix + 'America/Atikokan')
expected = Index(np.arange(10, dtype=np.int64))
tm.assert_index_equal(dr.hour, expected)
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern')])
def test_dti_convert_tz_aware_datetime_datetime(self, tz):
# GH#1581
dates = [datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)]
dates_aware = [tslib._localize_pydatetime(x, tz) for x in dates]
result = DatetimeIndex(dates_aware)
assert timezones.tz_compare(result.tz, tz)
converted = to_datetime(dates_aware, utc=True)
ex_vals = np.array([Timestamp(x).value for x in dates_aware])
tm.assert_numpy_array_equal(converted.asi8, ex_vals)
assert converted.tz is pytz.utc
def test_dti_union_aware(self):
# non-overlapping
rng = date_range("2012-11-15 00:00:00", periods=6, freq="H",
tz="US/Central")
rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="H",
tz="US/Eastern")
result = rng.union(rng2)
assert result.tz.zone == 'UTC'
@pytest.mark.parametrize('tz', [None, 'UTC', "US/Central",
dateutil.tz.tzoffset(None, -28800)])
@pytest.mark.usefixtures("datetime_tz_utc")
@pytest.mark.skipif(not PY3, reason="datetime.timezone not in PY2")
def test_iteration_preserves_nanoseconds(self, tz):
# GH 19603
index = DatetimeIndex(["2018-02-08 15:00:00.168456358",
"2018-02-08 15:00:00.168456359"], tz=tz)
for i, ts in enumerate(index):
assert ts == index[i]
class TestDateRange(object):
"""Tests for date_range with timezones"""
def test_hongkong_tz_convert(self):
# GH#1673 smoke test
dr = date_range('2012-01-01', '2012-01-10', freq='D', tz='Hongkong')
# it works!
dr.hour
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_date_range_span_dst_transition(self, tzstr):
# GH#1778
# Standard -> Daylight Savings Time
dr = date_range('03/06/2012 00:00', periods=200, freq='W-FRI',
tz='US/Eastern')
assert (dr.hour == 0).all()
dr = date_range('2012-11-02', periods=10, tz=tzstr)
assert (dr.hour == 0).all()
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_date_range_timezone_str_argument(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
result = date_range('1/1/2000', periods=10, tz=tzstr)
expected = date_range('1/1/2000', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_date_range_with_fixedoffset_noname(self):
off = fixed_off_no_name
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
idx = Index([start, end])
assert off == idx.tz
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_date_range_with_tz(self, tzstr):
stamp = Timestamp('3/11/2012 05:00', tz=tzstr)
assert stamp.hour == 5
rng = date_range('3/11/2012 04:00', periods=10, freq='H',
tz=tzstr)
assert stamp == rng[1]
class TestToDatetime(object):
"""Tests for the to_datetime constructor with timezones"""
def test_to_datetime_utc(self):
arr = np.array([dateutil.parser.parse('2012-06-13T01:39:00Z')],
dtype=object)
result = to_datetime(arr, utc=True)
assert result.tz is pytz.utc
def test_to_datetime_fixed_offset(self):
dates = [datetime(2000, 1, 1, tzinfo=fixed_off),
datetime(2000, 1, 2, tzinfo=fixed_off),
datetime(2000, 1, 3, tzinfo=fixed_off)]
result = to_datetime(dates)
assert result.tz == fixed_off
| bsd-3-clause |
oemof/oemof_examples | oemof_examples/oemof.solph/v0.4.x/generic_chp/mchp.py | 1 | 2975 | # -*- coding: utf-8 -*-
"""
General description
-------------------
Example that illustrates how to use custom component `GenericCHP` can be used.
In this case it is used to model a motoric chp.
Installation requirements
-------------------------
This example requires the version v0.4.x of oemof. Install by:
pip install 'oemof.solph>=0.4,<0.5'
"""
__copyright__ = "oemof developer group"
__license__ = "GPLv3"
import os
import pandas as pd
from oemof import solph
from oemof.network.network import Node
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
# read sequence data
full_filename = os.path.join(os.getcwd(), "generic_chp.csv")
data = pd.read_csv(full_filename, sep=",")
# select periods
periods = len(data) - 1
# create an energy system
idx = pd.date_range("1/1/2017", periods=periods, freq="H")
es = solph.EnergySystem(timeindex=idx)
Node.registry = es
# resources
bgas = solph.Bus(label="bgas")
rgas = solph.Source(label="rgas", outputs={bgas: solph.Flow()})
# heat
bth = solph.Bus(label="bth")
# dummy source at high costs that serves the residual load
source_th = solph.Source(
label="source_th", outputs={bth: solph.Flow(variable_costs=1000)}
)
demand_th = solph.Sink(
label="demand_th",
inputs={bth: solph.Flow(fix=data["demand_th"], nominal_value=200)},
)
# power
bel = solph.Bus(label="bel")
demand_el = solph.Sink(
label="demand_el",
inputs={bel: solph.Flow(variable_costs=data["price_el"])},
)
# motoric chp
mchp = solph.components.GenericCHP(
label="motoric_chp",
fuel_input={
bgas: solph.Flow(
H_L_FG_share_max=[0.18 for p in range(0, periods)],
H_L_FG_share_min=[0.41 for p in range(0, periods)],
)
},
electrical_output={
bel: solph.Flow(
P_max_woDH=[200 for p in range(0, periods)],
P_min_woDH=[100 for p in range(0, periods)],
Eta_el_max_woDH=[0.44 for p in range(0, periods)],
Eta_el_min_woDH=[0.40 for p in range(0, periods)],
)
},
heat_output={bth: solph.Flow(Q_CW_min=[0 for p in range(0, periods)])},
Beta=[0 for p in range(0, periods)],
fixed_costs=0,
back_pressure=False,
)
# create an optimization problem and solve it
om = solph.Model(es)
# debugging
# om.write('generic_chp.lp', io_options={'symbolic_solver_labels': True})
# solve model
om.solve(solver="cbc", solve_kwargs={"tee": True})
# create result object
results = solph.processing.results(om)
# plot data
if plt is not None:
# plot PQ diagram from component results
data = results[(mchp, None)]["sequences"]
ax = data.plot(kind="scatter", x="Q", y="P", grid=True)
ax.set_xlabel("Q (MW)")
ax.set_ylabel("P (MW)")
plt.show()
# plot thermal bus
data = solph.views.node(results, "bth")["sequences"]
ax = data.plot(kind="line", drawstyle="steps-post", grid=True)
ax.set_xlabel("Time (h)")
ax.set_ylabel("Q (MW)")
plt.show()
| gpl-3.0 |
louispotok/pandas | asv_bench/benchmarks/multiindex_object.py | 3 | 3894 | import string
import numpy as np
import pandas.util.testing as tm
from pandas import date_range, MultiIndex
from .pandas_vb_common import setup # noqa
class GetLoc(object):
goal_time = 0.2
def setup(self):
self.mi_large = MultiIndex.from_product(
[np.arange(1000), np.arange(20), list(string.ascii_letters)],
names=['one', 'two', 'three'])
self.mi_med = MultiIndex.from_product(
[np.arange(1000), np.arange(10), list('A')],
names=['one', 'two', 'three'])
self.mi_small = MultiIndex.from_product(
[np.arange(100), list('A'), list('A')],
names=['one', 'two', 'three'])
def time_large_get_loc(self):
self.mi_large.get_loc((999, 19, 'Z'))
def time_large_get_loc_warm(self):
for _ in range(1000):
self.mi_large.get_loc((999, 19, 'Z'))
def time_med_get_loc(self):
self.mi_med.get_loc((999, 9, 'A'))
def time_med_get_loc_warm(self):
for _ in range(1000):
self.mi_med.get_loc((999, 9, 'A'))
def time_string_get_loc(self):
self.mi_small.get_loc((99, 'A', 'A'))
def time_small_get_loc_warm(self):
for _ in range(1000):
self.mi_small.get_loc((99, 'A', 'A'))
class Duplicates(object):
goal_time = 0.2
def setup(self):
size = 65536
arrays = [np.random.randint(0, 8192, size),
np.random.randint(0, 1024, size)]
mask = np.random.rand(size) < 0.1
self.mi_unused_levels = MultiIndex.from_arrays(arrays)
self.mi_unused_levels = self.mi_unused_levels[mask]
def time_remove_unused_levels(self):
self.mi_unused_levels.remove_unused_levels()
class Integer(object):
goal_time = 0.2
def setup(self):
self.mi_int = MultiIndex.from_product([np.arange(1000),
np.arange(1000)],
names=['one', 'two'])
self.obj_index = np.array([(0, 10), (0, 11), (0, 12),
(0, 13), (0, 14), (0, 15),
(0, 16), (0, 17), (0, 18),
(0, 19)], dtype=object)
def time_get_indexer(self):
self.mi_int.get_indexer(self.obj_index)
def time_is_monotonic(self):
self.mi_int.is_monotonic
class Duplicated(object):
goal_time = 0.2
def setup(self):
n, k = 200, 5000
levels = [np.arange(n),
tm.makeStringIndex(n).values,
1000 + np.arange(n)]
labels = [np.random.choice(n, (k * n)) for lev in levels]
self.mi = MultiIndex(levels=levels, labels=labels)
def time_duplicated(self):
self.mi.duplicated()
class Sortlevel(object):
goal_time = 0.2
def setup(self):
n = 1182720
low, high = -4096, 4096
arrs = [np.repeat(np.random.randint(low, high, (n // k)), k)
for k in [11, 7, 5, 3, 1]]
self.mi_int = MultiIndex.from_arrays(arrs)[np.random.permutation(n)]
a = np.repeat(np.arange(100), 1000)
b = np.tile(np.arange(1000), 100)
self.mi = MultiIndex.from_arrays([a, b])
self.mi = self.mi.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi_int.sortlevel()
def time_sortlevel_zero(self):
self.mi.sortlevel(0)
def time_sortlevel_one(self):
self.mi.sortlevel(1)
class Values(object):
goal_time = 0.2
def setup_cache(self):
level1 = range(1000)
level2 = date_range(start='1/1/2012', periods=100)
mi = MultiIndex.from_product([level1, level2])
return mi
def time_datetime_level_values_copy(self, mi):
mi.copy().values
def time_datetime_level_values_sliced(self, mi):
mi[:10].values
| bsd-3-clause |
aabadie/scikit-learn | sklearn/cluster/spectral.py | 25 | 18535 | # -*- coding: utf-8 -*-
"""Algorithms for spectral clustering"""
# Author: Gael Varoquaux [email protected]
# Brian Cheung
# Wei LI <[email protected]>
# License: BSD 3 clause
import warnings
import numpy as np
from ..base import BaseEstimator, ClusterMixin
from ..utils import check_random_state, as_float_array
from ..utils.validation import check_array
from ..utils.extmath import norm
from ..metrics.pairwise import pairwise_kernels
from ..neighbors import kneighbors_graph
from ..manifold import spectral_embedding
from .k_means_ import k_means
def discretize(vectors, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like, shape: (n_samples, n_clusters)
The embedding space of the samples.
copy : boolean, optional, default: True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, optional, default: 30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, optional, default: 30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state: int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
of the rotation matrix
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
def spectral_clustering(affinity, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans'):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
affinity : array-like or sparse matrix, shape: (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : integer, optional
Number of clusters to extract.
n_components : integer, optional, default is n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
------
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init)
else:
labels = discretize(maps, random_state=random_state)
return labels
class SpectralClustering(BaseEstimator, ClusterMixin):
"""Apply clustering to a projection to the normalized laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plan.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
-----------
n_clusters : integer, optional
The dimension of the projection subspace.
affinity : string, array-like or callable, default 'rbf'
If a string, this may be one of 'nearest_neighbors', 'precomputed',
'rbf' or one of the kernels supported by
`sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
gamma : float, default=1.0
Scaling factor of RBF, polynomial, exponential chi^2 and
sigmoid affinity kernel. Ignored for
``affinity='nearest_neighbors'``.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
n_neighbors : integer
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization
of the lobpcg eigen vectors decomposition when eigen_solver == 'amg'
and by the K-Means initialization.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, optional, default: 0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default: 'kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
kernel_params : dictionary of string to any, optional
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
affinity_matrix_ : array-like, shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ :
Labels of each point
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
Stella X. Yu, Jianbo Shi
http://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
def __init__(self, n_clusters=8, eigen_solver=None, random_state=None,
n_init=10, gamma=1., affinity='rbf', n_neighbors=10,
eigen_tol=0.0, assign_labels='kmeans', degree=3, coef0=1,
kernel_params=None, n_jobs=1):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Creates an affinity matrix for X using the selected affinity,
then applies spectral clustering to this affinity matrix.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
OR, if affinity==`precomputed`, a precomputed affinity
matrix of shape (n_samples, n_samples)
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
if X.shape[0] == X.shape[1] and self.affinity != "precomputed":
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors, include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels)
return self
@property
def _pairwise(self):
return self.affinity == "precomputed"
| bsd-3-clause |
Asimmetric/influxdb-python | influxdb/tests/influxdb08/dataframe_client_test.py | 8 | 12409 | # -*- coding: utf-8 -*-
"""
unit tests for misc module
"""
from .client_test import _mocked_session
import unittest
import json
import requests_mock
from nose.tools import raises
from datetime import timedelta
from influxdb.tests import skipIfPYpy, using_pypy
import copy
import warnings
if not using_pypy:
import pandas as pd
from pandas.util.testing import assert_frame_equal
from influxdb.influxdb08 import DataFrameClient
@skipIfPYpy
class TestDataFrameClient(unittest.TestCase):
def setUp(self):
# By default, raise exceptions on warnings
warnings.simplefilter('error', FutureWarning)
def test_write_points_from_dataframe(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_float_nan(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[[1, float("NaN"), 1.0], [2, 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
[1, None, 1.0, 0],
[2, 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_in_batches(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
self.assertTrue(cli.write_points({"foo": dataframe}, batch_size=1))
def test_write_points_from_dataframe_with_numeric_column_names(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
# df with numeric column names
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ['0', '1', '2', "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_period_index(self):
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[pd.Period('1970-01-01'),
pd.Period('1970-01-02')],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 86400]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
self.assertListEqual(json.loads(m.last_request.body), points)
def test_write_points_from_dataframe_with_time_precision(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
index=[now, now + timedelta(hours=1)],
columns=["column_one", "column_two",
"column_three"])
points = [
{
"points": [
["1", 1, 1.0, 0],
["2", 2, 2.0, 3600]
],
"name": "foo",
"columns": ["column_one", "column_two", "column_three", "time"]
}
]
points_ms = copy.deepcopy(points)
points_ms[0]["points"][1][-1] = 3600 * 1000
points_us = copy.deepcopy(points)
points_us[0]["points"][1][-1] = 3600 * 1000000
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe}, time_precision='s')
self.assertListEqual(json.loads(m.last_request.body), points)
cli.write_points({"foo": dataframe}, time_precision='m')
self.assertListEqual(json.loads(m.last_request.body), points_ms)
cli.write_points({"foo": dataframe}, time_precision='u')
self.assertListEqual(json.loads(m.last_request.body), points_us)
@raises(TypeError)
def test_write_points_from_dataframe_fails_without_time_index(self):
dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
columns=["column_one", "column_two",
"column_three"])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
@raises(TypeError)
def test_write_points_from_dataframe_fails_with_series(self):
now = pd.Timestamp('1970-01-01 00:00+00:00')
dataframe = pd.Series(data=[1.0, 2.0],
index=[now, now + timedelta(hours=1)])
with requests_mock.Mocker() as m:
m.register_uri(requests_mock.POST,
"http://localhost:8086/db/db/series")
cli = DataFrameClient(database='db')
cli.write_points({"foo": dataframe})
def test_query_into_dataframe(self):
data = [
{
"name": "foo",
"columns": ["time", "sequence_number", "column_one"],
"points": [
[3600, 16, 2], [3600, 15, 1],
[0, 14, 2], [0, 13, 1]
]
}
]
# dataframe sorted ascending by time first, then sequence_number
dataframe = pd.DataFrame(data=[[13, 1], [14, 2], [15, 1], [16, 2]],
index=pd.to_datetime([0, 0,
3600, 3600],
unit='s', utc=True),
columns=['sequence_number', 'column_one'])
with _mocked_session('get', 200, data):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
assert_frame_equal(dataframe, result)
def test_query_multiple_time_series(self):
data = [
{
"name": "series1",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, 323048, 323048, 323048, 0]]
},
{
"name": "series2",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, -2.8233, -2.8503, -2.7832, 0.0173]]
},
{
"name": "series3",
"columns": ["time", "mean", "min", "max", "stddev"],
"points": [[0, -0.01220, -0.01220, -0.01220, 0]]
}
]
dataframes = {
'series1': pd.DataFrame(data=[[323048, 323048, 323048, 0]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev']),
'series2': pd.DataFrame(data=[[-2.8233, -2.8503, -2.7832, 0.0173]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev']),
'series3': pd.DataFrame(data=[[-0.01220, -0.01220, -0.01220, 0]],
index=pd.to_datetime([0], unit='s',
utc=True),
columns=['mean', 'min', 'max', 'stddev'])
}
with _mocked_session('get', 200, data):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query("""select mean(value), min(value), max(value),
stddev(value) from series1, series2, series3""")
self.assertEqual(dataframes.keys(), result.keys())
for key in dataframes.keys():
assert_frame_equal(dataframes[key], result[key])
def test_query_with_empty_result(self):
with _mocked_session('get', 200, []):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
self.assertEqual(result, [])
def test_list_series(self):
response = [
{
'columns': ['time', 'name'],
'name': 'list_series_result',
'points': [[0, 'seriesA'], [0, 'seriesB']]
}
]
with _mocked_session('get', 200, response):
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
series_list = cli.get_list_series()
self.assertEqual(series_list, ['seriesA', 'seriesB'])
def test_datetime_to_epoch(self):
timestamp = pd.Timestamp('2013-01-01 00:00:00.000+00:00')
cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
self.assertEqual(
cli._datetime_to_epoch(timestamp),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='s'),
1356998400.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='m'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='ms'),
1356998400000.0
)
self.assertEqual(
cli._datetime_to_epoch(timestamp, time_precision='u'),
1356998400000000.0
)
| mit |
neuroidss/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/mathtext.py | 69 | 101723 | r"""
:mod:`~matplotlib.mathtext` is a module for parsing a subset of the
TeX math syntax and drawing them to a matplotlib backend.
For a tutorial of its usage see :ref:`mathtext-tutorial`. This
document is primarily concerned with implementation details.
The module uses pyparsing_ to parse the TeX expression.
.. _pyparsing: http://pyparsing.wikispaces.com/
The Bakoma distribution of the TeX Computer Modern fonts, and STIX
fonts are supported. There is experimental support for using
arbitrary fonts, but results may vary without proper tweaking and
metrics for those fonts.
If you find TeX expressions that don't parse or render properly,
please email [email protected], but please check KNOWN ISSUES below first.
"""
from __future__ import division
import os
from cStringIO import StringIO
from math import ceil
try:
set
except NameError:
from sets import Set as set
import unicodedata
from warnings import warn
from numpy import inf, isinf
import numpy as np
from matplotlib.pyparsing import Combine, Group, Optional, Forward, \
Literal, OneOrMore, ZeroOrMore, ParseException, Empty, \
ParseResults, Suppress, oneOf, StringEnd, ParseFatalException, \
FollowedBy, Regex, ParserElement
# Enable packrat parsing
ParserElement.enablePackrat()
from matplotlib.afm import AFM
from matplotlib.cbook import Bunch, get_realpath_and_stat, \
is_string_like, maxdict
from matplotlib.ft2font import FT2Font, FT2Image, KERNING_DEFAULT, LOAD_FORCE_AUTOHINT, LOAD_NO_HINTING
from matplotlib.font_manager import findfont, FontProperties
from matplotlib._mathtext_data import latex_to_bakoma, \
latex_to_standard, tex2uni, latex_to_cmex, stix_virtual_fonts
from matplotlib import get_data_path, rcParams
import matplotlib.colors as mcolors
import matplotlib._png as _png
####################
##############################################################################
# FONTS
def get_unicode_index(symbol):
"""get_unicode_index(symbol) -> integer
Return the integer index (from the Unicode table) of symbol. *symbol*
can be a single unicode character, a TeX command (i.e. r'\pi'), or a
Type1 symbol name (i.e. 'phi').
"""
# From UTF #25: U+2212 minus sign is the preferred
# representation of the unary and binary minus sign rather than
# the ASCII-derived U+002D hyphen-minus, because minus sign is
# unambiguous and because it is rendered with a more desirable
# length, usually longer than a hyphen.
if symbol == '-':
return 0x2212
try:# This will succeed if symbol is a single unicode char
return ord(symbol)
except TypeError:
pass
try:# Is symbol a TeX symbol (i.e. \alpha)
return tex2uni[symbol.strip("\\")]
except KeyError:
message = """'%(symbol)s' is not a valid Unicode character or
TeX/Type1 symbol"""%locals()
raise ValueError, message
class MathtextBackend(object):
"""
The base class for the mathtext backend-specific code. The
purpose of :class:`MathtextBackend` subclasses is to interface
between mathtext and a specific matplotlib graphics backend.
Subclasses need to override the following:
- :meth:`render_glyph`
- :meth:`render_filled_rect`
- :meth:`get_results`
And optionally, if you need to use a Freetype hinting style:
- :meth:`get_hinting_type`
"""
def __init__(self):
self.fonts_object = None
def set_canvas_size(self, w, h, d):
'Dimension the drawing canvas'
self.width = w
self.height = h
self.depth = d
def render_glyph(self, ox, oy, info):
"""
Draw a glyph described by *info* to the reference point (*ox*,
*oy*).
"""
raise NotImplementedError()
def render_filled_rect(self, x1, y1, x2, y2):
"""
Draw a filled black rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
raise NotImplementedError()
def get_results(self, box):
"""
Return a backend-specific tuple to return to the backend after
all processing is done.
"""
raise NotImplementedError()
def get_hinting_type(self):
"""
Get the Freetype hinting type to use with this particular
backend.
"""
return LOAD_NO_HINTING
class MathtextBackendBbox(MathtextBackend):
"""
A backend whose only purpose is to get a precise bounding box.
Only required for the Agg backend.
"""
def __init__(self, real_backend):
MathtextBackend.__init__(self)
self.bbox = [0, 0, 0, 0]
self.real_backend = real_backend
def _update_bbox(self, x1, y1, x2, y2):
self.bbox = [min(self.bbox[0], x1),
min(self.bbox[1], y1),
max(self.bbox[2], x2),
max(self.bbox[3], y2)]
def render_glyph(self, ox, oy, info):
self._update_bbox(ox + info.metrics.xmin,
oy - info.metrics.ymax,
ox + info.metrics.xmax,
oy - info.metrics.ymin)
def render_rect_filled(self, x1, y1, x2, y2):
self._update_bbox(x1, y1, x2, y2)
def get_results(self, box):
orig_height = box.height
orig_depth = box.depth
ship(0, 0, box)
bbox = self.bbox
bbox = [bbox[0] - 1, bbox[1] - 1, bbox[2] + 1, bbox[3] + 1]
self._switch_to_real_backend()
self.fonts_object.set_canvas_size(
bbox[2] - bbox[0],
(bbox[3] - bbox[1]) - orig_depth,
(bbox[3] - bbox[1]) - orig_height)
ship(-bbox[0], -bbox[1], box)
return self.fonts_object.get_results(box)
def get_hinting_type(self):
return self.real_backend.get_hinting_type()
def _switch_to_real_backend(self):
self.fonts_object.mathtext_backend = self.real_backend
self.real_backend.fonts_object = self.fonts_object
self.real_backend.ox = self.bbox[0]
self.real_backend.oy = self.bbox[1]
class MathtextBackendAggRender(MathtextBackend):
"""
Render glyphs and rectangles to an FTImage buffer, which is later
transferred to the Agg image by the Agg backend.
"""
def __init__(self):
self.ox = 0
self.oy = 0
self.image = None
MathtextBackend.__init__(self)
def set_canvas_size(self, w, h, d):
MathtextBackend.set_canvas_size(self, w, h, d)
self.image = FT2Image(ceil(w), ceil(h + d))
def render_glyph(self, ox, oy, info):
info.font.draw_glyph_to_bitmap(
self.image, ox, oy - info.metrics.ymax, info.glyph)
def render_rect_filled(self, x1, y1, x2, y2):
height = max(int(y2 - y1) - 1, 0)
if height == 0:
center = (y2 + y1) / 2.0
y = int(center - (height + 1) / 2.0)
else:
y = int(y1)
self.image.draw_rect_filled(int(x1), y, ceil(x2), y + height)
def get_results(self, box):
return (self.ox,
self.oy,
self.width,
self.height + self.depth,
self.depth,
self.image,
self.fonts_object.get_used_characters())
def get_hinting_type(self):
return LOAD_FORCE_AUTOHINT
def MathtextBackendAgg():
return MathtextBackendBbox(MathtextBackendAggRender())
class MathtextBackendBitmapRender(MathtextBackendAggRender):
def get_results(self, box):
return self.image, self.depth
def MathtextBackendBitmap():
"""
A backend to generate standalone mathtext images. No additional
matplotlib backend is required.
"""
return MathtextBackendBbox(MathtextBackendBitmapRender())
class MathtextBackendPs(MathtextBackend):
"""
Store information to write a mathtext rendering to the PostScript
backend.
"""
def __init__(self):
self.pswriter = StringIO()
self.lastfont = None
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
postscript_name = info.postscript_name
fontsize = info.fontsize
symbol_name = info.symbol_name
if (postscript_name, fontsize) != self.lastfont:
ps = """/%(postscript_name)s findfont
%(fontsize)s scalefont
setfont
""" % locals()
self.lastfont = postscript_name, fontsize
self.pswriter.write(ps)
ps = """%(ox)f %(oy)f moveto
/%(symbol_name)s glyphshow\n
""" % locals()
self.pswriter.write(ps)
def render_rect_filled(self, x1, y1, x2, y2):
ps = "%f %f %f %f rectfill\n" % (x1, self.height - y2, x2 - x1, y2 - y1)
self.pswriter.write(ps)
def get_results(self, box):
ship(0, -self.depth, box)
#print self.depth
return (self.width,
self.height + self.depth,
self.depth,
self.pswriter,
self.fonts_object.get_used_characters())
class MathtextBackendPdf(MathtextBackend):
"""
Store information to write a mathtext rendering to the PDF
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
filename = info.font.fname
oy = self.height - oy + info.offset
self.glyphs.append(
(ox, oy, filename, info.fontsize,
info.num, info.symbol_name))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append((x1, self.height - y2, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects,
self.fonts_object.get_used_characters())
class MathtextBackendSvg(MathtextBackend):
"""
Store information to write a mathtext rendering to the SVG
backend.
"""
def __init__(self):
self.svg_glyphs = []
self.svg_rects = []
def render_glyph(self, ox, oy, info):
oy = self.height - oy + info.offset
thetext = unichr(info.num)
self.svg_glyphs.append(
(info.font, info.fontsize, thetext, ox, oy, info.metrics))
def render_rect_filled(self, x1, y1, x2, y2):
self.svg_rects.append(
(x1, self.height - y1 + 1, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
svg_elements = Bunch(svg_glyphs = self.svg_glyphs,
svg_rects = self.svg_rects)
return (self.width,
self.height + self.depth,
self.depth,
svg_elements,
self.fonts_object.get_used_characters())
class MathtextBackendCairo(MathtextBackend):
"""
Store information to write a mathtext rendering to the Cairo
backend.
"""
def __init__(self):
self.glyphs = []
self.rects = []
def render_glyph(self, ox, oy, info):
oy = oy - info.offset - self.height
thetext = unichr(info.num)
self.glyphs.append(
(info.font, info.fontsize, thetext, ox, oy))
def render_rect_filled(self, x1, y1, x2, y2):
self.rects.append(
(x1, y1 - self.height, x2 - x1, y2 - y1))
def get_results(self, box):
ship(0, -self.depth, box)
return (self.width,
self.height + self.depth,
self.depth,
self.glyphs,
self.rects)
class Fonts(object):
"""
An abstract base class for a system of fonts to use for mathtext.
The class must be able to take symbol keys and font file names and
return the character metrics. It also delegates to a backend class
to do the actual drawing.
"""
def __init__(self, default_font_prop, mathtext_backend):
"""
*default_font_prop*: A
:class:`~matplotlib.font_manager.FontProperties` object to use
for the default non-math font, or the base font for Unicode
(generic) font rendering.
*mathtext_backend*: A subclass of :class:`MathTextBackend`
used to delegate the actual rendering.
"""
self.default_font_prop = default_font_prop
self.mathtext_backend = mathtext_backend
# Make these classes doubly-linked
self.mathtext_backend.fonts_object = self
self.used_characters = {}
def destroy(self):
"""
Fix any cyclical references before the object is about
to be destroyed.
"""
self.used_characters = None
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
"""
Get the kerning distance for font between *sym1* and *sym2*.
*fontX*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*fontclassX*: TODO
*symX*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsizeX*: the fontsize in points
*dpi*: the current dots-per-inch
"""
return 0.
def get_metrics(self, font, font_class, sym, fontsize, dpi):
"""
*font*: one of the TeX font names::
tt, it, rm, cal, sf, bf or default (non-math)
*font_class*: TODO
*sym*: a symbol in raw TeX form. e.g. '1', 'x' or '\sigma'
*fontsize*: font size in points
*dpi*: current dots-per-inch
Returns an object with the following attributes:
- *advance*: The advance distance (in points) of the glyph.
- *height*: The height of the glyph in points.
- *width*: The width of the glyph in points.
- *xmin*, *xmax*, *ymin*, *ymax* - the ink rectangle of the glyph
- *iceberg* - the distance from the baseline to the top of
the glyph. This corresponds to TeX's definition of
"height".
"""
info = self._get_info(font, font_class, sym, fontsize, dpi)
return info.metrics
def set_canvas_size(self, w, h, d):
"""
Set the size of the buffer used to render the math expression.
Only really necessary for the bitmap backends.
"""
self.width, self.height, self.depth = ceil(w), ceil(h), ceil(d)
self.mathtext_backend.set_canvas_size(self.width, self.height, self.depth)
def render_glyph(self, ox, oy, facename, font_class, sym, fontsize, dpi):
"""
Draw a glyph at
- *ox*, *oy*: position
- *facename*: One of the TeX face names
- *font_class*:
- *sym*: TeX symbol name or single character
- *fontsize*: fontsize in points
- *dpi*: The dpi to draw at.
"""
info = self._get_info(facename, font_class, sym, fontsize, dpi)
realpath, stat_key = get_realpath_and_stat(info.font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].add(info.num)
self.mathtext_backend.render_glyph(ox, oy, info)
def render_rect_filled(self, x1, y1, x2, y2):
"""
Draw a filled rectangle from (*x1*, *y1*) to (*x2*, *y2*).
"""
self.mathtext_backend.render_rect_filled(x1, y1, x2, y2)
def get_xheight(self, font, fontsize, dpi):
"""
Get the xheight for the given *font* and *fontsize*.
"""
raise NotImplementedError()
def get_underline_thickness(self, font, fontsize, dpi):
"""
Get the line thickness that matches the given font. Used as a
base unit for drawing lines such as in a fraction or radical.
"""
raise NotImplementedError()
def get_used_characters(self):
"""
Get the set of characters that were used in the math
expression. Used by backends that need to subset fonts so
they know which glyphs to include.
"""
return self.used_characters
def get_results(self, box):
"""
Get the data needed by the backend to render the math
expression. The return value is backend-specific.
"""
return self.mathtext_backend.get_results(box)
def get_sized_alternatives_for_symbol(self, fontname, sym):
"""
Override if your font provides multiple sizes of the same
symbol. Should return a list of symbols matching *sym* in
various sizes. The expression renderer will select the most
appropriate size for a given situation from this list.
"""
return [(fontname, sym)]
class TruetypeFonts(Fonts):
"""
A generic base class for all font setups that use Truetype fonts
(through FT2Font).
"""
class CachedFont:
def __init__(self, font):
self.font = font
self.charmap = font.get_charmap()
self.glyphmap = dict(
[(glyphind, ccode) for ccode, glyphind in self.charmap.iteritems()])
def __repr__(self):
return repr(self.font)
def __init__(self, default_font_prop, mathtext_backend):
Fonts.__init__(self, default_font_prop, mathtext_backend)
self.glyphd = {}
self._fonts = {}
filename = findfont(default_font_prop)
default_font = self.CachedFont(FT2Font(str(filename)))
self._fonts['default'] = default_font
def destroy(self):
self.glyphd = None
Fonts.destroy(self)
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self._fonts.get(basename)
if cached_font is None:
font = FT2Font(basename)
cached_font = self.CachedFont(font)
self._fonts[basename] = cached_font
self._fonts[font.postscript_name] = cached_font
self._fonts[font.postscript_name.lower()] = cached_font
return cached_font
def _get_offset(self, cached_font, glyph, fontsize, dpi):
if cached_font.font.postscript_name == 'Cmex10':
return glyph.height/64.0/2.0 + 256.0/64.0 * dpi/72.0
return 0.
def _get_info(self, fontname, font_class, sym, fontsize, dpi):
key = fontname, font_class, sym, fontsize, dpi
bunch = self.glyphd.get(key)
if bunch is not None:
return bunch
cached_font, num, symbol_name, fontsize, slanted = \
self._get_glyph(fontname, font_class, sym, fontsize)
font = cached_font.font
font.set_size(fontsize, dpi)
glyph = font.load_char(
num,
flags=self.mathtext_backend.get_hinting_type())
xmin, ymin, xmax, ymax = [val/64.0 for val in glyph.bbox]
offset = self._get_offset(cached_font, glyph, fontsize, dpi)
metrics = Bunch(
advance = glyph.linearHoriAdvance/65536.0,
height = glyph.height/64.0,
width = glyph.width/64.0,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = glyph.horiBearingY/64.0 + offset,
slanted = slanted
)
result = self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.postscript_name,
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return result
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
cached_font.font.set_size(fontsize, dpi)
pclt = cached_font.font.get_sfnt_table('pclt')
if pclt is None:
# Some fonts don't store the xHeight, so we do a poor man's xHeight
metrics = self.get_metrics(font, 'it', 'x', fontsize, dpi)
return metrics.iceberg
xHeight = (pclt['xHeight'] / 64.0) * (fontsize / 12.0) * (dpi / 100.0)
return xHeight
def get_underline_thickness(self, font, fontsize, dpi):
# This function used to grab underline thickness from the font
# metrics, but that information is just too un-reliable, so it
# is now hardcoded.
return ((0.75 / 12.0) * fontsize * dpi) / 72.0
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return font.get_kerning(info1.num, info2.num, KERNING_DEFAULT) / 64.0
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
class BakomaFonts(TruetypeFonts):
"""
Use the Bakoma TrueType fonts for rendering.
Symbols are strewn about a number of font files, each of which has
its own proprietary 8-bit encoding.
"""
_fontmap = { 'cal' : 'cmsy10',
'rm' : 'cmr10',
'tt' : 'cmtt10',
'it' : 'cmmi10',
'bf' : 'cmb10',
'sf' : 'cmss10',
'ex' : 'cmex10'
}
fontmap = {}
def __init__(self, *args, **kwargs):
self._stix_fallback = StixFonts(*args, **kwargs)
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, val in self._fontmap.iteritems():
fullpath = findfont(val)
self.fontmap[key] = fullpath
self.fontmap[val] = fullpath
_slanted_symbols = set(r"\int \oint".split())
def _get_glyph(self, fontname, font_class, sym, fontsize):
symbol_name = None
if fontname in self.fontmap and sym in latex_to_bakoma:
basename, num = latex_to_bakoma[sym]
slanted = (basename == "cmmi10") or sym in self._slanted_symbols
try:
cached_font = self._get_font(basename)
except RuntimeError:
pass
else:
symbol_name = cached_font.font.get_glyph_name(num)
num = cached_font.glyphmap[num]
elif len(sym) == 1:
slanted = (fontname == "it")
try:
cached_font = self._get_font(fontname)
except RuntimeError:
pass
else:
num = ord(sym)
gid = cached_font.charmap.get(num)
if gid is not None:
symbol_name = cached_font.font.get_glyph_name(
cached_font.charmap[num])
if symbol_name is None:
return self._stix_fallback._get_glyph(
fontname, font_class, sym, fontsize)
return cached_font, num, symbol_name, fontsize, slanted
# The Bakoma fonts contain many pre-sized alternatives for the
# delimiters. The AutoSizedChar class will use these alternatives
# and select the best (closest sized) glyph.
_size_alternatives = {
'(' : [('rm', '('), ('ex', '\xa1'), ('ex', '\xb3'),
('ex', '\xb5'), ('ex', '\xc3')],
')' : [('rm', ')'), ('ex', '\xa2'), ('ex', '\xb4'),
('ex', '\xb6'), ('ex', '\x21')],
'{' : [('cal', '{'), ('ex', '\xa9'), ('ex', '\x6e'),
('ex', '\xbd'), ('ex', '\x28')],
'}' : [('cal', '}'), ('ex', '\xaa'), ('ex', '\x6f'),
('ex', '\xbe'), ('ex', '\x29')],
# The fourth size of '[' is mysteriously missing from the BaKoMa
# font, so I've ommitted it for both '[' and ']'
'[' : [('rm', '['), ('ex', '\xa3'), ('ex', '\x68'),
('ex', '\x22')],
']' : [('rm', ']'), ('ex', '\xa4'), ('ex', '\x69'),
('ex', '\x23')],
r'\lfloor' : [('ex', '\xa5'), ('ex', '\x6a'),
('ex', '\xb9'), ('ex', '\x24')],
r'\rfloor' : [('ex', '\xa6'), ('ex', '\x6b'),
('ex', '\xba'), ('ex', '\x25')],
r'\lceil' : [('ex', '\xa7'), ('ex', '\x6c'),
('ex', '\xbb'), ('ex', '\x26')],
r'\rceil' : [('ex', '\xa8'), ('ex', '\x6d'),
('ex', '\xbc'), ('ex', '\x27')],
r'\langle' : [('ex', '\xad'), ('ex', '\x44'),
('ex', '\xbf'), ('ex', '\x2a')],
r'\rangle' : [('ex', '\xae'), ('ex', '\x45'),
('ex', '\xc0'), ('ex', '\x2b')],
r'\__sqrt__' : [('ex', '\x70'), ('ex', '\x71'),
('ex', '\x72'), ('ex', '\x73')],
r'\backslash': [('ex', '\xb2'), ('ex', '\x2f'),
('ex', '\xc2'), ('ex', '\x2d')],
r'/' : [('rm', '/'), ('ex', '\xb1'), ('ex', '\x2e'),
('ex', '\xcb'), ('ex', '\x2c')],
r'\widehat' : [('rm', '\x5e'), ('ex', '\x62'), ('ex', '\x63'),
('ex', '\x64')],
r'\widetilde': [('rm', '\x7e'), ('ex', '\x65'), ('ex', '\x66'),
('ex', '\x67')],
r'<' : [('cal', 'h'), ('ex', 'D')],
r'>' : [('cal', 'i'), ('ex', 'E')]
}
for alias, target in [('\leftparen', '('),
('\rightparent', ')'),
('\leftbrace', '{'),
('\rightbrace', '}'),
('\leftbracket', '['),
('\rightbracket', ']')]:
_size_alternatives[alias] = _size_alternatives[target]
def get_sized_alternatives_for_symbol(self, fontname, sym):
return self._size_alternatives.get(sym, [(fontname, sym)])
class UnicodeFonts(TruetypeFonts):
"""
An abstract base class for handling Unicode fonts.
While some reasonably complete Unicode fonts (such as DejaVu) may
work in some situations, the only Unicode font I'm aware of with a
complete set of math symbols is STIX.
This class will "fallback" on the Bakoma fonts when a required
symbol can not be found in the font.
"""
fontmap = {}
use_cmex = True
def __init__(self, *args, **kwargs):
# This must come first so the backend's owner is set correctly
if rcParams['mathtext.fallback_to_cm']:
self.cm_fallback = BakomaFonts(*args, **kwargs)
else:
self.cm_fallback = None
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for texfont in "cal rm tt it bf sf".split():
prop = rcParams['mathtext.' + texfont]
font = findfont(prop)
self.fontmap[texfont] = font
prop = FontProperties('cmex10')
font = findfont(prop)
self.fontmap['ex'] = font
_slanted_symbols = set(r"\int \oint".split())
def _map_virtual_font(self, fontname, font_class, uniindex):
return fontname, uniindex
def _get_glyph(self, fontname, font_class, sym, fontsize):
found_symbol = False
if self.use_cmex:
uniindex = latex_to_cmex.get(sym)
if uniindex is not None:
fontname = 'ex'
found_symbol = True
if not found_symbol:
try:
uniindex = get_unicode_index(sym)
found_symbol = True
except ValueError:
uniindex = ord('?')
warn("No TeX to unicode mapping for '%s'" %
sym.encode('ascii', 'backslashreplace'),
MathTextWarning)
fontname, uniindex = self._map_virtual_font(
fontname, font_class, uniindex)
# Only characters in the "Letter" class should be italicized in 'it'
# mode. Greek capital letters should be Roman.
if found_symbol:
new_fontname = fontname
if fontname == 'it':
if uniindex < 0x10000:
unistring = unichr(uniindex)
if (not unicodedata.category(unistring)[0] == "L"
or unicodedata.name(unistring).startswith("GREEK CAPITAL")):
new_fontname = 'rm'
slanted = (new_fontname == 'it') or sym in self._slanted_symbols
found_symbol = False
try:
cached_font = self._get_font(new_fontname)
except RuntimeError:
pass
else:
try:
glyphindex = cached_font.charmap[uniindex]
found_symbol = True
except KeyError:
pass
if not found_symbol:
if self.cm_fallback:
warn("Substituting with a symbol from Computer Modern.",
MathTextWarning)
return self.cm_fallback._get_glyph(
fontname, 'it', sym, fontsize)
else:
if fontname == 'it' and isinstance(self, StixFonts):
return self._get_glyph('rm', font_class, sym, fontsize)
warn("Font '%s' does not have a glyph for '%s'" %
(fontname, sym.encode('ascii', 'backslashreplace')),
MathTextWarning)
warn("Substituting with a dummy symbol.", MathTextWarning)
fontname = 'rm'
new_fontname = fontname
cached_font = self._get_font(fontname)
uniindex = 0xA4 # currency character, for lack of anything better
glyphindex = cached_font.charmap[uniindex]
slanted = False
symbol_name = cached_font.font.get_glyph_name(glyphindex)
return cached_font, uniindex, symbol_name, fontsize, slanted
def get_sized_alternatives_for_symbol(self, fontname, sym):
if self.cm_fallback:
return self.cm_fallback.get_sized_alternatives_for_symbol(
fontname, sym)
return [(fontname, sym)]
class StixFonts(UnicodeFonts):
"""
A font handling class for the STIX fonts.
In addition to what UnicodeFonts provides, this class:
- supports "virtual fonts" which are complete alpha numeric
character sets with different font styles at special Unicode
code points, such as "Blackboard".
- handles sized alternative characters for the STIXSizeX fonts.
"""
_fontmap = { 'rm' : 'STIXGeneral',
'it' : 'STIXGeneral:italic',
'bf' : 'STIXGeneral:weight=bold',
'nonunirm' : 'STIXNonUnicode',
'nonuniit' : 'STIXNonUnicode:italic',
'nonunibf' : 'STIXNonUnicode:weight=bold',
0 : 'STIXGeneral',
1 : 'STIXSize1',
2 : 'STIXSize2',
3 : 'STIXSize3',
4 : 'STIXSize4',
5 : 'STIXSize5'
}
fontmap = {}
use_cmex = False
cm_fallback = False
_sans = False
def __init__(self, *args, **kwargs):
TruetypeFonts.__init__(self, *args, **kwargs)
if not len(self.fontmap):
for key, name in self._fontmap.iteritems():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _map_virtual_font(self, fontname, font_class, uniindex):
# Handle these "fonts" that are actually embedded in
# other fonts.
mapping = stix_virtual_fonts.get(fontname)
if self._sans and mapping is None:
mapping = stix_virtual_fonts['sf']
doing_sans_conversion = True
else:
doing_sans_conversion = False
if mapping is not None:
if isinstance(mapping, dict):
mapping = mapping[font_class]
# Binary search for the source glyph
lo = 0
hi = len(mapping)
while lo < hi:
mid = (lo+hi)//2
range = mapping[mid]
if uniindex < range[0]:
hi = mid
elif uniindex <= range[1]:
break
else:
lo = mid + 1
if uniindex >= range[0] and uniindex <= range[1]:
uniindex = uniindex - range[0] + range[3]
fontname = range[2]
elif not doing_sans_conversion:
# This will generate a dummy character
uniindex = 0x1
fontname = 'it'
# Handle private use area glyphs
if (fontname in ('it', 'rm', 'bf') and
uniindex >= 0xe000 and uniindex <= 0xf8ff):
fontname = 'nonuni' + fontname
return fontname, uniindex
_size_alternatives = {}
def get_sized_alternatives_for_symbol(self, fontname, sym):
alternatives = self._size_alternatives.get(sym)
if alternatives:
return alternatives
alternatives = []
try:
uniindex = get_unicode_index(sym)
except ValueError:
return [(fontname, sym)]
fix_ups = {
ord('<'): 0x27e8,
ord('>'): 0x27e9 }
uniindex = fix_ups.get(uniindex, uniindex)
for i in range(6):
cached_font = self._get_font(i)
glyphindex = cached_font.charmap.get(uniindex)
if glyphindex is not None:
alternatives.append((i, unichr(uniindex)))
self._size_alternatives[sym] = alternatives
return alternatives
class StixSansFonts(StixFonts):
"""
A font handling class for the STIX fonts (that uses sans-serif
characters by default).
"""
_sans = True
class StandardPsFonts(Fonts):
"""
Use the standard postscript fonts for rendering to backend_ps
Unlike the other font classes, BakomaFont and UnicodeFont, this
one requires the Ps backend.
"""
basepath = os.path.join( get_data_path(), 'fonts', 'afm' )
fontmap = { 'cal' : 'pzcmi8a', # Zapf Chancery
'rm' : 'pncr8a', # New Century Schoolbook
'tt' : 'pcrr8a', # Courier
'it' : 'pncri8a', # New Century Schoolbook Italic
'sf' : 'phvr8a', # Helvetica
'bf' : 'pncb8a', # New Century Schoolbook Bold
None : 'psyr' # Symbol
}
def __init__(self, default_font_prop):
Fonts.__init__(self, default_font_prop, MathtextBackendPs())
self.glyphd = {}
self.fonts = {}
filename = findfont(default_font_prop, fontext='afm')
default_font = AFM(file(filename, 'r'))
default_font.fname = filename
self.fonts['default'] = default_font
self.pswriter = StringIO()
def _get_font(self, font):
if font in self.fontmap:
basename = self.fontmap[font]
else:
basename = font
cached_font = self.fonts.get(basename)
if cached_font is None:
fname = os.path.join(self.basepath, basename + ".afm")
cached_font = AFM(file(fname, 'r'))
cached_font.fname = fname
self.fonts[basename] = cached_font
self.fonts[cached_font.get_fontname()] = cached_font
return cached_font
def _get_info (self, fontname, font_class, sym, fontsize, dpi):
'load the cmfont, metrics and glyph with caching'
key = fontname, sym, fontsize, dpi
tup = self.glyphd.get(key)
if tup is not None:
return tup
# Only characters in the "Letter" class should really be italicized.
# This class includes greek letters, so we're ok
if (fontname == 'it' and
(len(sym) > 1 or
not unicodedata.category(unicode(sym)).startswith("L"))):
fontname = 'rm'
found_symbol = False
if sym in latex_to_standard:
fontname, num = latex_to_standard[sym]
glyph = chr(num)
found_symbol = True
elif len(sym) == 1:
glyph = sym
num = ord(glyph)
found_symbol = True
else:
warn("No TeX to built-in Postscript mapping for '%s'" % sym,
MathTextWarning)
slanted = (fontname == 'it')
font = self._get_font(fontname)
if found_symbol:
try:
symbol_name = font.get_name_char(glyph)
except KeyError:
warn("No glyph in standard Postscript font '%s' for '%s'" %
(font.postscript_name, sym),
MathTextWarning)
found_symbol = False
if not found_symbol:
glyph = sym = '?'
num = ord(glyph)
symbol_name = font.get_name_char(glyph)
offset = 0
scale = 0.001 * fontsize
xmin, ymin, xmax, ymax = [val * scale
for val in font.get_bbox_char(glyph)]
metrics = Bunch(
advance = font.get_width_char(glyph) * scale,
width = font.get_width_char(glyph) * scale,
height = font.get_height_char(glyph) * scale,
xmin = xmin,
xmax = xmax,
ymin = ymin+offset,
ymax = ymax+offset,
# iceberg is the equivalent of TeX's "height"
iceberg = ymax + offset,
slanted = slanted
)
self.glyphd[key] = Bunch(
font = font,
fontsize = fontsize,
postscript_name = font.get_fontname(),
metrics = metrics,
symbol_name = symbol_name,
num = num,
glyph = glyph,
offset = offset
)
return self.glyphd[key]
def get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi):
if font1 == font2 and fontsize1 == fontsize2:
info1 = self._get_info(font1, fontclass1, sym1, fontsize1, dpi)
info2 = self._get_info(font2, fontclass2, sym2, fontsize2, dpi)
font = info1.font
return (font.get_kern_dist(info1.glyph, info2.glyph)
* 0.001 * fontsize1)
return Fonts.get_kern(self, font1, fontclass1, sym1, fontsize1,
font2, fontclass2, sym2, fontsize2, dpi)
def get_xheight(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_xheight() * 0.001 * fontsize
def get_underline_thickness(self, font, fontsize, dpi):
cached_font = self._get_font(font)
return cached_font.get_underline_thickness() * 0.001 * fontsize
##############################################################################
# TeX-LIKE BOX MODEL
# The following is based directly on the document 'woven' from the
# TeX82 source code. This information is also available in printed
# form:
#
# Knuth, Donald E.. 1986. Computers and Typesetting, Volume B:
# TeX: The Program. Addison-Wesley Professional.
#
# The most relevant "chapters" are:
# Data structures for boxes and their friends
# Shipping pages out (Ship class)
# Packaging (hpack and vpack)
# Data structures for math mode
# Subroutines for math mode
# Typesetting math formulas
#
# Many of the docstrings below refer to a numbered "node" in that
# book, e.g. node123
#
# Note that (as TeX) y increases downward, unlike many other parts of
# matplotlib.
# How much text shrinks when going to the next-smallest level. GROW_FACTOR
# must be the inverse of SHRINK_FACTOR.
SHRINK_FACTOR = 0.7
GROW_FACTOR = 1.0 / SHRINK_FACTOR
# The number of different sizes of chars to use, beyond which they will not
# get any smaller
NUM_SIZE_LEVELS = 4
# Percentage of x-height of additional horiz. space after sub/superscripts
SCRIPT_SPACE = 0.2
# Percentage of x-height that sub/superscripts drop below the baseline
SUBDROP = 0.3
# Percentage of x-height that superscripts drop below the baseline
SUP1 = 0.5
# Percentage of x-height that subscripts drop below the baseline
SUB1 = 0.0
# Percentage of x-height that superscripts are offset relative to the subscript
DELTA = 0.18
class MathTextWarning(Warning):
pass
class Node(object):
"""
A node in the TeX box model
"""
def __init__(self):
self.size = 0
def __repr__(self):
return self.__internal_repr__()
def __internal_repr__(self):
return self.__class__.__name__
def get_kerning(self, next):
return 0.0
def shrink(self):
"""
Shrinks one level smaller. There are only three levels of
sizes, after which things will no longer get smaller.
"""
self.size += 1
def grow(self):
"""
Grows one level larger. There is no limit to how big
something can get.
"""
self.size -= 1
def render(self, x, y):
pass
class Box(Node):
"""
Represents any node with a physical location.
"""
def __init__(self, width, height, depth):
Node.__init__(self)
self.width = width
self.height = height
self.depth = depth
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
def render(self, x1, y1, x2, y2):
pass
class Vbox(Box):
"""
A box with only height (zero width).
"""
def __init__(self, height, depth):
Box.__init__(self, 0., height, depth)
class Hbox(Box):
"""
A box with only width (zero height and depth).
"""
def __init__(self, width):
Box.__init__(self, width, 0., 0.)
class Char(Node):
"""
Represents a single character. Unlike TeX, the font information
and metrics are stored with each :class:`Char` to make it easier
to lookup the font metrics when needed. Note that TeX boxes have
a width, height, and depth, unlike Type1 and Truetype which use a
full bounding box and an advance in the x-direction. The metrics
must be converted to the TeX way, and the advance (if different
from width) must be converted into a :class:`Kern` node when the
:class:`Char` is added to its parent :class:`Hlist`.
"""
def __init__(self, c, state):
Node.__init__(self)
self.c = c
self.font_output = state.font_output
assert isinstance(state.font, (str, unicode, int))
self.font = state.font
self.font_class = state.font_class
self.fontsize = state.fontsize
self.dpi = state.dpi
# The real width, height and depth will be set during the
# pack phase, after we know the real fontsize
self._update_metrics()
def __internal_repr__(self):
return '`%s`' % self.c
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
if self.c == ' ':
self.width = metrics.advance
else:
self.width = metrics.width
self.height = metrics.iceberg
self.depth = -(metrics.iceberg - metrics.height)
def is_slanted(self):
return self._metrics.slanted
def get_kerning(self, next):
"""
Return the amount of kerning between this and the given
character. Called when characters are strung together into
:class:`Hlist` to create :class:`Kern` nodes.
"""
advance = self._metrics.advance - self.width
kern = 0.
if isinstance(next, Char):
kern = self.font_output.get_kern(
self.font, self.font_class, self.c, self.fontsize,
next.font, next.font_class, next.c, next.fontsize,
self.dpi)
return advance + kern
def render(self, x, y):
"""
Render the character to the canvas
"""
self.font_output.render_glyph(
x, y,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.fontsize *= SHRINK_FACTOR
self.width *= SHRINK_FACTOR
self.height *= SHRINK_FACTOR
self.depth *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.fontsize *= GROW_FACTOR
self.width *= GROW_FACTOR
self.height *= GROW_FACTOR
self.depth *= GROW_FACTOR
class Accent(Char):
"""
The font metrics need to be dealt with differently for accents,
since they are already offset correctly from the baseline in
TrueType fonts.
"""
def _update_metrics(self):
metrics = self._metrics = self.font_output.get_metrics(
self.font, self.font_class, self.c, self.fontsize, self.dpi)
self.width = metrics.xmax - metrics.xmin
self.height = metrics.ymax - metrics.ymin
self.depth = 0
def shrink(self):
Char.shrink(self)
self._update_metrics()
def grow(self):
Char.grow(self)
self._update_metrics()
def render(self, x, y):
"""
Render the character to the canvas.
"""
self.font_output.render_glyph(
x - self._metrics.xmin, y + self._metrics.ymin,
self.font, self.font_class, self.c, self.fontsize, self.dpi)
class List(Box):
"""
A list of nodes (either horizontal or vertical).
"""
def __init__(self, elements):
Box.__init__(self, 0., 0., 0.)
self.shift_amount = 0. # An arbitrary offset
self.children = elements # The child nodes of this list
# The following parameters are set in the vpack and hpack functions
self.glue_set = 0. # The glue setting of this list
self.glue_sign = 0 # 0: normal, -1: shrinking, 1: stretching
self.glue_order = 0 # The order of infinity (0 - 3) for the glue
def __repr__(self):
return '[%s <%.02f %.02f %.02f %.02f> %s]' % (
self.__internal_repr__(),
self.width, self.height,
self.depth, self.shift_amount,
' '.join([repr(x) for x in self.children]))
def _determine_order(self, totals):
"""
A helper function to determine the highest order of glue
used by the members of this list. Used by vpack and hpack.
"""
o = 0
for i in range(len(totals) - 1, 0, -1):
if totals[i] != 0.0:
o = i
break
return o
def _set_glue(self, x, sign, totals, error_type):
o = self._determine_order(totals)
self.glue_order = o
self.glue_sign = sign
if totals[o] != 0.:
self.glue_set = x / totals[o]
else:
self.glue_sign = 0
self.glue_ratio = 0.
if o == 0:
if len(self.children):
warn("%s %s: %r" % (error_type, self.__class__.__name__, self),
MathTextWarning)
def shrink(self):
for child in self.children:
child.shrink()
Box.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.shift_amount *= SHRINK_FACTOR
self.glue_set *= SHRINK_FACTOR
def grow(self):
for child in self.children:
child.grow()
Box.grow(self)
self.shift_amount *= GROW_FACTOR
self.glue_set *= GROW_FACTOR
class Hlist(List):
"""
A horizontal list of boxes.
"""
def __init__(self, elements, w=0., m='additional', do_kern=True):
List.__init__(self, elements)
if do_kern:
self.kern()
self.hpack()
def kern(self):
"""
Insert :class:`Kern` nodes between :class:`Char` nodes to set
kerning. The :class:`Char` nodes themselves determine the
amount of kerning they need (in :meth:`~Char.get_kerning`),
and this function just creates the linked list in the correct
way.
"""
new_children = []
num_children = len(self.children)
if num_children:
for i in range(num_children):
elem = self.children[i]
if i < num_children - 1:
next = self.children[i + 1]
else:
next = None
new_children.append(elem)
kerning_distance = elem.get_kerning(next)
if kerning_distance != 0.:
kern = Kern(kerning_distance)
new_children.append(kern)
self.children = new_children
# This is a failed experiment to fake cross-font kerning.
# def get_kerning(self, next):
# if len(self.children) >= 2 and isinstance(self.children[-2], Char):
# if isinstance(next, Char):
# print "CASE A"
# return self.children[-2].get_kerning(next)
# elif isinstance(next, Hlist) and len(next.children) and isinstance(next.children[0], Char):
# print "CASE B"
# result = self.children[-2].get_kerning(next.children[0])
# print result
# return result
# return 0.0
def hpack(self, w=0., m='additional'):
"""
The main duty of :meth:`hpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified. The computed sizes normally
enclose all of the material inside the new box; but some items
may stick out if negative glue is used, if the box is
overfull, or if a ``\\vbox`` includes other boxes that have
been shifted left.
- *w*: specifies a width
- *m*: is either 'exactly' or 'additional'.
Thus, ``hpack(w, 'exactly')`` produces a box whose width is
exactly *w*, while ``hpack(w, 'additional')`` yields a box
whose width is the natural width plus *w*. The default values
produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
#self.shift_amount = 0.
h = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Char):
x += p.width
h = max(h, p.height)
d = max(d, p.depth)
elif isinstance(p, Box):
x += p.width
if not isinf(p.height) and not isinf(p.depth):
s = getattr(p, 'shift_amount', 0.)
h = max(h, p.height - s)
d = max(d, p.depth + s)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += p.width
self.height = h
self.depth = d
if m == 'additional':
w += x
self.width = w
x = w - x
if x == 0.:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Vlist(List):
"""
A vertical list of boxes.
"""
def __init__(self, elements, h=0., m='additional'):
List.__init__(self, elements)
self.vpack()
def vpack(self, h=0., m='additional', l=float(inf)):
"""
The main duty of :meth:`vpack` is to compute the dimensions of
the resulting boxes, and to adjust the glue if one of those
dimensions is pre-specified.
- *h*: specifies a height
- *m*: is either 'exactly' or 'additional'.
- *l*: a maximum height
Thus, ``vpack(h, 'exactly')`` produces a box whose height is
exactly *h*, while ``vpack(h, 'additional')`` yields a box
whose height is the natural height plus *h*. The default
values produce a box with the natural width.
"""
# I don't know why these get reset in TeX. Shift_amount is pretty
# much useless if we do.
# self.shift_amount = 0.
w = 0.
d = 0.
x = 0.
total_stretch = [0.] * 4
total_shrink = [0.] * 4
for p in self.children:
if isinstance(p, Box):
x += d + p.height
d = p.depth
if not isinf(p.width):
s = getattr(p, 'shift_amount', 0.)
w = max(w, p.width + s)
elif isinstance(p, Glue):
x += d
d = 0.
glue_spec = p.glue_spec
x += glue_spec.width
total_stretch[glue_spec.stretch_order] += glue_spec.stretch
total_shrink[glue_spec.shrink_order] += glue_spec.shrink
elif isinstance(p, Kern):
x += d + p.width
d = 0.
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in Vlist.")
self.width = w
if d > l:
x += d - l
self.depth = l
else:
self.depth = d
if m == 'additional':
h += x
self.height = h
x = h - x
if x == 0:
self.glue_sign = 0
self.glue_order = 0
self.glue_ratio = 0.
return
if x > 0.:
self._set_glue(x, 1, total_stretch, "Overfull")
else:
self._set_glue(x, -1, total_shrink, "Underfull")
class Rule(Box):
"""
A :class:`Rule` node stands for a solid black rectangle; it has
*width*, *depth*, and *height* fields just as in an
:class:`Hlist`. However, if any of these dimensions is inf, the
actual value will be determined by running the rule up to the
boundary of the innermost enclosing box. This is called a "running
dimension." The width is never running in an :class:`Hlist`; the
height and depth are never running in a :class:`Vlist`.
"""
def __init__(self, width, height, depth, state):
Box.__init__(self, width, height, depth)
self.font_output = state.font_output
def render(self, x, y, w, h):
self.font_output.render_rect_filled(x, y, x + w, y + h)
class Hrule(Rule):
"""
Convenience class to create a horizontal rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
height = depth = thickness * 0.5
Rule.__init__(self, inf, height, depth, state)
class Vrule(Rule):
"""
Convenience class to create a vertical rule.
"""
def __init__(self, state):
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
Rule.__init__(self, thickness, inf, inf, state)
class Glue(Node):
"""
Most of the information in this object is stored in the underlying
:class:`GlueSpec` class, which is shared between multiple glue objects. (This
is a memory optimization which probably doesn't matter anymore, but it's
easier to stick to what TeX does.)
"""
def __init__(self, glue_type, copy=False):
Node.__init__(self)
self.glue_subtype = 'normal'
if is_string_like(glue_type):
glue_spec = GlueSpec.factory(glue_type)
elif isinstance(glue_type, GlueSpec):
glue_spec = glue_type
else:
raise ArgumentError("glue_type must be a glue spec name or instance.")
if copy:
glue_spec = glue_spec.copy()
self.glue_spec = glue_spec
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
if self.glue_spec.width != 0.:
self.glue_spec = self.glue_spec.copy()
self.glue_spec.width *= GROW_FACTOR
class GlueSpec(object):
"""
See :class:`Glue`.
"""
def __init__(self, width=0., stretch=0., stretch_order=0, shrink=0., shrink_order=0):
self.width = width
self.stretch = stretch
self.stretch_order = stretch_order
self.shrink = shrink
self.shrink_order = shrink_order
def copy(self):
return GlueSpec(
self.width,
self.stretch,
self.stretch_order,
self.shrink,
self.shrink_order)
def factory(cls, glue_type):
return cls._types[glue_type]
factory = classmethod(factory)
GlueSpec._types = {
'fil': GlueSpec(0., 1., 1, 0., 0),
'fill': GlueSpec(0., 1., 2, 0., 0),
'filll': GlueSpec(0., 1., 3, 0., 0),
'neg_fil': GlueSpec(0., 0., 0, 1., 1),
'neg_fill': GlueSpec(0., 0., 0, 1., 2),
'neg_filll': GlueSpec(0., 0., 0, 1., 3),
'empty': GlueSpec(0., 0., 0, 0., 0),
'ss': GlueSpec(0., 1., 1, -1., 1)
}
# Some convenient ways to get common kinds of glue
class Fil(Glue):
def __init__(self):
Glue.__init__(self, 'fil')
class Fill(Glue):
def __init__(self):
Glue.__init__(self, 'fill')
class Filll(Glue):
def __init__(self):
Glue.__init__(self, 'filll')
class NegFil(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fil')
class NegFill(Glue):
def __init__(self):
Glue.__init__(self, 'neg_fill')
class NegFilll(Glue):
def __init__(self):
Glue.__init__(self, 'neg_filll')
class SsGlue(Glue):
def __init__(self):
Glue.__init__(self, 'ss')
class HCentered(Hlist):
"""
A convenience class to create an :class:`Hlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Hlist.__init__(self, [SsGlue()] + elements + [SsGlue()],
do_kern=False)
class VCentered(Hlist):
"""
A convenience class to create a :class:`Vlist` whose contents are
centered within its enclosing box.
"""
def __init__(self, elements):
Vlist.__init__(self, [SsGlue()] + elements + [SsGlue()])
class Kern(Node):
"""
A :class:`Kern` node has a width field to specify a (normally
negative) amount of spacing. This spacing correction appears in
horizontal lists between letters like A and V when the font
designer said that it looks better to move them closer together or
further apart. A kern node can also appear in a vertical list,
when its *width* denotes additional spacing in the vertical
direction.
"""
def __init__(self, width):
Node.__init__(self)
self.width = width
def __repr__(self):
return "k%.02f" % self.width
def shrink(self):
Node.shrink(self)
if self.size < NUM_SIZE_LEVELS:
self.width *= SHRINK_FACTOR
def grow(self):
Node.grow(self)
self.width *= GROW_FACTOR
class SubSuperCluster(Hlist):
"""
:class:`SubSuperCluster` is a sort of hack to get around that fact
that this code do a two-pass parse like TeX. This lets us store
enough information in the hlist itself, namely the nucleus, sub-
and super-script, such that if another script follows that needs
to be attached, it can be reconfigured on the fly.
"""
def __init__(self):
self.nucleus = None
self.sub = None
self.super = None
Hlist.__init__(self, [])
class AutoHeightChar(Hlist):
"""
:class:`AutoHeightChar` will create a character as close to the
given height and depth as possible. When using a font with
multiple height versions of some characters (such as the BaKoMa
fonts), the correct glyph will be selected, otherwise this will
always just return a scaled version of the glyph.
"""
def __init__(self, c, height, depth, state, always=False):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
target_total = height + depth
for fontname, sym in alternatives:
state.font = fontname
char = Char(sym, state)
if char.height + char.depth >= target_total:
break
factor = target_total / (char.height + char.depth)
state.fontsize *= factor
char = Char(sym, state)
shift = (depth - char.depth)
Hlist.__init__(self, [char])
self.shift_amount = shift
class AutoWidthChar(Hlist):
"""
:class:`AutoWidthChar` will create a character as close to the
given width as possible. When using a font with multiple width
versions of some characters (such as the BaKoMa fonts), the
correct glyph will be selected, otherwise this will always just
return a scaled version of the glyph.
"""
def __init__(self, c, width, state, always=False, char_class=Char):
alternatives = state.font_output.get_sized_alternatives_for_symbol(
state.font, c)
state = state.copy()
for fontname, sym in alternatives:
state.font = fontname
char = char_class(sym, state)
if char.width >= width:
break
factor = width / char.width
state.fontsize *= factor
char = char_class(sym, state)
Hlist.__init__(self, [char])
self.width = char.width
class Ship(object):
"""
Once the boxes have been set up, this sends them to output. Since
boxes can be inside of boxes inside of boxes, the main work of
:class:`Ship` is done by two mutually recursive routines,
:meth:`hlist_out` and :meth:`vlist_out`, which traverse the
:class:`Hlist` nodes and :class:`Vlist` nodes inside of horizontal
and vertical boxes. The global variables used in TeX to store
state as it processes have become member variables here.
"""
def __call__(self, ox, oy, box):
self.max_push = 0 # Deepest nesting of push commands so far
self.cur_s = 0
self.cur_v = 0.
self.cur_h = 0.
self.off_h = ox
self.off_v = oy + box.height
self.hlist_out(box)
def clamp(value):
if value < -1000000000.:
return -1000000000.
if value > 1000000000.:
return 1000000000.
return value
clamp = staticmethod(clamp)
def hlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
base_line = self.cur_v
left_edge = self.cur_h
self.cur_s += 1
self.max_push = max(self.cur_s, self.max_push)
clamp = self.clamp
for p in box.children:
if isinstance(p, Char):
p.render(self.cur_h + self.off_h, self.cur_v + self.off_v)
self.cur_h += p.width
elif isinstance(p, Kern):
self.cur_h += p.width
elif isinstance(p, List):
# node623
if len(p.children) == 0:
self.cur_h += p.width
else:
edge = self.cur_h
self.cur_v = base_line + p.shift_amount
if isinstance(p, Hlist):
self.hlist_out(p)
else:
# p.vpack(box.height + box.depth, 'exactly')
self.vlist_out(p)
self.cur_h = edge + p.width
self.cur_v = base_line
elif isinstance(p, Box):
# node624
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_height):
rule_height = box.height
if isinf(rule_depth):
rule_depth = box.depth
if rule_height > 0 and rule_width > 0:
self.cur_v = baseline + rule_depth
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
self.cur_v = baseline
self.cur_h += rule_width
elif isinstance(p, Glue):
# node625
glue_spec = p.glue_spec
rule_width = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order:
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_width += cur_g
self.cur_h += rule_width
self.cur_s -= 1
def vlist_out(self, box):
cur_g = 0
cur_glue = 0.
glue_order = box.glue_order
glue_sign = box.glue_sign
self.cur_s += 1
self.max_push = max(self.max_push, self.cur_s)
left_edge = self.cur_h
self.cur_v -= box.height
top_edge = self.cur_v
clamp = self.clamp
for p in box.children:
if isinstance(p, Kern):
self.cur_v += p.width
elif isinstance(p, List):
if len(p.children) == 0:
self.cur_v += p.height + p.depth
else:
self.cur_v += p.height
self.cur_h = left_edge + p.shift_amount
save_v = self.cur_v
p.width = box.width
if isinstance(p, Hlist):
self.hlist_out(p)
else:
self.vlist_out(p)
self.cur_v = save_v + p.depth
self.cur_h = left_edge
elif isinstance(p, Box):
rule_height = p.height
rule_depth = p.depth
rule_width = p.width
if isinf(rule_width):
rule_width = box.width
rule_height += rule_depth
if rule_height > 0 and rule_depth > 0:
self.cur_v += rule_height
p.render(self.cur_h + self.off_h,
self.cur_v + self.off_v,
rule_width, rule_height)
elif isinstance(p, Glue):
glue_spec = p.glue_spec
rule_height = glue_spec.width - cur_g
if glue_sign != 0: # normal
if glue_sign == 1: # stretching
if glue_spec.stretch_order == glue_order:
cur_glue += glue_spec.stretch
cur_g = round(clamp(float(box.glue_set) * cur_glue))
elif glue_spec.shrink_order == glue_order: # shrinking
cur_glue += glue_spec.shrink
cur_g = round(clamp(float(box.glue_set) * cur_glue))
rule_height += cur_g
self.cur_v += rule_height
elif isinstance(p, Char):
raise RuntimeError("Internal mathtext error: Char node found in vlist")
self.cur_s -= 1
ship = Ship()
##############################################################################
# PARSER
def Error(msg):
"""
Helper class to raise parser errors.
"""
def raise_error(s, loc, toks):
raise ParseFatalException(msg + "\n" + s)
empty = Empty()
empty.setParseAction(raise_error)
return empty
class Parser(object):
"""
This is the pyparsing-based parser for math expressions. It
actually parses full strings *containing* math expressions, in
that raw text may also appear outside of pairs of ``$``.
The grammar is based directly on that in TeX, though it cuts a few
corners.
"""
_binary_operators = set(r'''
+ *
\pm \sqcap \rhd
\mp \sqcup \unlhd
\times \vee \unrhd
\div \wedge \oplus
\ast \setminus \ominus
\star \wr \otimes
\circ \diamond \oslash
\bullet \bigtriangleup \odot
\cdot \bigtriangledown \bigcirc
\cap \triangleleft \dagger
\cup \triangleright \ddagger
\uplus \lhd \amalg'''.split())
_relation_symbols = set(r'''
= < > :
\leq \geq \equiv \models
\prec \succ \sim \perp
\preceq \succeq \simeq \mid
\ll \gg \asymp \parallel
\subset \supset \approx \bowtie
\subseteq \supseteq \cong \Join
\sqsubset \sqsupset \neq \smile
\sqsubseteq \sqsupseteq \doteq \frown
\in \ni \propto
\vdash \dashv'''.split())
_arrow_symbols = set(r'''
\leftarrow \longleftarrow \uparrow
\Leftarrow \Longleftarrow \Uparrow
\rightarrow \longrightarrow \downarrow
\Rightarrow \Longrightarrow \Downarrow
\leftrightarrow \longleftrightarrow \updownarrow
\Leftrightarrow \Longleftrightarrow \Updownarrow
\mapsto \longmapsto \nearrow
\hookleftarrow \hookrightarrow \searrow
\leftharpoonup \rightharpoonup \swarrow
\leftharpoondown \rightharpoondown \nwarrow
\rightleftharpoons \leadsto'''.split())
_spaced_symbols = _binary_operators | _relation_symbols | _arrow_symbols
_punctuation_symbols = set(r', ; . ! \ldotp \cdotp'.split())
_overunder_symbols = set(r'''
\sum \prod \coprod \bigcap \bigcup \bigsqcup \bigvee
\bigwedge \bigodot \bigotimes \bigoplus \biguplus
'''.split())
_overunder_functions = set(
r"lim liminf limsup sup max min".split())
_dropsub_symbols = set(r'''\int \oint'''.split())
_fontnames = set("rm cal it tt sf bf default bb frak circled scr".split())
_function_names = set("""
arccos csc ker min arcsin deg lg Pr arctan det lim sec arg dim
liminf sin cos exp limsup sinh cosh gcd ln sup cot hom log tan
coth inf max tanh""".split())
_ambiDelim = set(r"""
| \| / \backslash \uparrow \downarrow \updownarrow \Uparrow
\Downarrow \Updownarrow .""".split())
_leftDelim = set(r"( [ { < \lfloor \langle \lceil".split())
_rightDelim = set(r") ] } > \rfloor \rangle \rceil".split())
def __init__(self):
# All forward declarations are here
font = Forward().setParseAction(self.font).setName("font")
latexfont = Forward()
subsuper = Forward().setParseAction(self.subsuperscript).setName("subsuper")
placeable = Forward().setName("placeable")
simple = Forward().setName("simple")
autoDelim = Forward().setParseAction(self.auto_sized_delimiter)
self._expression = Forward().setParseAction(self.finish).setName("finish")
float = Regex(r"[-+]?([0-9]+\.?[0-9]*|\.[0-9]+)")
lbrace = Literal('{').suppress()
rbrace = Literal('}').suppress()
start_group = (Optional(latexfont) - lbrace)
start_group.setParseAction(self.start_group)
end_group = rbrace.copy()
end_group.setParseAction(self.end_group)
bslash = Literal('\\')
accent = oneOf(self._accent_map.keys() +
list(self._wide_accents))
function = oneOf(list(self._function_names))
fontname = oneOf(list(self._fontnames))
latex2efont = oneOf(['math' + x for x in self._fontnames])
space =(FollowedBy(bslash)
+ oneOf([r'\ ',
r'\/',
r'\,',
r'\;',
r'\quad',
r'\qquad',
r'\!'])
).setParseAction(self.space).setName('space')
customspace =(Literal(r'\hspace')
- (( lbrace
- float
- rbrace
) | Error(r"Expected \hspace{n}"))
).setParseAction(self.customspace).setName('customspace')
unicode_range = u"\U00000080-\U0001ffff"
symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\[\]|%s])|(\\[%%${}\[\]_|])" % unicode_range)
| (Combine(
bslash
+ oneOf(tex2uni.keys())
) + FollowedBy(Regex("[^a-zA-Z]")))
).setParseAction(self.symbol).leaveWhitespace()
c_over_c =(Suppress(bslash)
+ oneOf(self._char_over_chars.keys())
).setParseAction(self.char_over_chars)
accent = Group(
Suppress(bslash)
+ accent
- placeable
).setParseAction(self.accent).setName("accent")
function =(Suppress(bslash)
+ function
).setParseAction(self.function).setName("function")
group = Group(
start_group
+ ZeroOrMore(
autoDelim
^ simple)
- end_group
).setParseAction(self.group).setName("group")
font <<(Suppress(bslash)
+ fontname)
latexfont <<(Suppress(bslash)
+ latex2efont)
frac = Group(
Suppress(Literal(r"\frac"))
+ ((group + group)
| Error(r"Expected \frac{num}{den}"))
).setParseAction(self.frac).setName("frac")
sqrt = Group(
Suppress(Literal(r"\sqrt"))
+ Optional(
Suppress(Literal("["))
- Regex("[0-9]+")
- Suppress(Literal("]")),
default = None
)
+ (group | Error("Expected \sqrt{value}"))
).setParseAction(self.sqrt).setName("sqrt")
placeable <<(accent
^ function
^ (c_over_c | symbol)
^ group
^ frac
^ sqrt
)
simple <<(space
| customspace
| font
| subsuper
)
subsuperop = oneOf(["_", "^"])
subsuper << Group(
( Optional(placeable)
+ OneOrMore(
subsuperop
- placeable
)
)
| placeable
)
ambiDelim = oneOf(list(self._ambiDelim))
leftDelim = oneOf(list(self._leftDelim))
rightDelim = oneOf(list(self._rightDelim))
autoDelim <<(Suppress(Literal(r"\left"))
+ ((leftDelim | ambiDelim) | Error("Expected a delimiter"))
+ Group(
autoDelim
^ OneOrMore(simple))
+ Suppress(Literal(r"\right"))
+ ((rightDelim | ambiDelim) | Error("Expected a delimiter"))
)
math = OneOrMore(
autoDelim
^ simple
).setParseAction(self.math).setName("math")
math_delim = ~bslash + Literal('$')
non_math = Regex(r"(?:(?:\\[$])|[^$])*"
).setParseAction(self.non_math).setName("non_math").leaveWhitespace()
self._expression << (
non_math
+ ZeroOrMore(
Suppress(math_delim)
+ Optional(math)
+ (Suppress(math_delim)
| Error("Expected end of math '$'"))
+ non_math
)
) + StringEnd()
self.clear()
def clear(self):
"""
Clear any state before parsing.
"""
self._expr = None
self._state_stack = None
self._em_width_cache = {}
def parse(self, s, fonts_object, fontsize, dpi):
"""
Parse expression *s* using the given *fonts_object* for
output, at the given *fontsize* and *dpi*.
Returns the parse tree of :class:`Node` instances.
"""
self._state_stack = [self.State(fonts_object, 'default', 'rm', fontsize, dpi)]
try:
self._expression.parseString(s)
except ParseException, err:
raise ValueError("\n".join([
"",
err.line,
" " * (err.column - 1) + "^",
str(err)]))
return self._expr
# The state of the parser is maintained in a stack. Upon
# entering and leaving a group { } or math/non-math, the stack
# is pushed and popped accordingly. The current state always
# exists in the top element of the stack.
class State(object):
"""
Stores the state of the parser.
States are pushed and popped from a stack as necessary, and
the "current" state is always at the top of the stack.
"""
def __init__(self, font_output, font, font_class, fontsize, dpi):
self.font_output = font_output
self._font = font
self.font_class = font_class
self.fontsize = fontsize
self.dpi = dpi
def copy(self):
return Parser.State(
self.font_output,
self.font,
self.font_class,
self.fontsize,
self.dpi)
def _get_font(self):
return self._font
def _set_font(self, name):
if name in ('it', 'rm', 'bf'):
self.font_class = name
self._font = name
font = property(_get_font, _set_font)
def get_state(self):
"""
Get the current :class:`State` of the parser.
"""
return self._state_stack[-1]
def pop_state(self):
"""
Pop a :class:`State` off of the stack.
"""
self._state_stack.pop()
def push_state(self):
"""
Push a new :class:`State` onto the stack which is just a copy
of the current state.
"""
self._state_stack.append(self.get_state().copy())
def finish(self, s, loc, toks):
#~ print "finish", toks
self._expr = Hlist(toks)
return [self._expr]
def math(self, s, loc, toks):
#~ print "math", toks
hlist = Hlist(toks)
self.pop_state()
return [hlist]
def non_math(self, s, loc, toks):
#~ print "non_math", toks
s = toks[0].replace(r'\$', '$')
symbols = [Char(c, self.get_state()) for c in s]
hlist = Hlist(symbols)
# We're going into math now, so set font to 'it'
self.push_state()
self.get_state().font = 'it'
return [hlist]
def _make_space(self, percentage):
# All spaces are relative to em width
state = self.get_state()
key = (state.font, state.fontsize, state.dpi)
width = self._em_width_cache.get(key)
if width is None:
metrics = state.font_output.get_metrics(
state.font, 'it', 'm', state.fontsize, state.dpi)
width = metrics.advance
self._em_width_cache[key] = width
return Kern(width * percentage)
_space_widths = { r'\ ' : 0.3,
r'\,' : 0.4,
r'\;' : 0.8,
r'\quad' : 1.6,
r'\qquad' : 3.2,
r'\!' : -0.4,
r'\/' : 0.4 }
def space(self, s, loc, toks):
assert(len(toks)==1)
num = self._space_widths[toks[0]]
box = self._make_space(num)
return [box]
def customspace(self, s, loc, toks):
return [self._make_space(float(toks[1]))]
def symbol(self, s, loc, toks):
# print "symbol", toks
c = toks[0]
try:
char = Char(c, self.get_state())
except ValueError:
raise ParseFatalException("Unknown symbol: %s" % c)
if c in self._spaced_symbols:
return [Hlist( [self._make_space(0.2),
char,
self._make_space(0.2)] ,
do_kern = False)]
elif c in self._punctuation_symbols:
return [Hlist( [char,
self._make_space(0.2)] ,
do_kern = False)]
return [char]
_char_over_chars = {
# The first 2 entires in the tuple are (font, char, sizescale) for
# the two symbols under and over. The third element is the space
# (in multiples of underline height)
r'AA' : ( ('rm', 'A', 1.0), (None, '\circ', 0.5), 0.0),
}
def char_over_chars(self, s, loc, toks):
sym = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
under_desc, over_desc, space = \
self._char_over_chars.get(sym, (None, None, 0.0))
if under_desc is None:
raise ParseFatalException("Error parsing symbol")
over_state = state.copy()
if over_desc[0] is not None:
over_state.font = over_desc[0]
over_state.fontsize *= over_desc[2]
over = Accent(over_desc[1], over_state)
under_state = state.copy()
if under_desc[0] is not None:
under_state.font = under_desc[0]
under_state.fontsize *= under_desc[2]
under = Char(under_desc[1], under_state)
width = max(over.width, under.width)
over_centered = HCentered([over])
over_centered.hpack(width, 'exactly')
under_centered = HCentered([under])
under_centered.hpack(width, 'exactly')
return Vlist([
over_centered,
Vbox(0., thickness * space),
under_centered
])
_accent_map = {
r'hat' : r'\circumflexaccent',
r'breve' : r'\combiningbreve',
r'bar' : r'\combiningoverline',
r'grave' : r'\combininggraveaccent',
r'acute' : r'\combiningacuteaccent',
r'ddot' : r'\combiningdiaeresis',
r'tilde' : r'\combiningtilde',
r'dot' : r'\combiningdotabove',
r'vec' : r'\combiningrightarrowabove',
r'"' : r'\combiningdiaeresis',
r"`" : r'\combininggraveaccent',
r"'" : r'\combiningacuteaccent',
r'~' : r'\combiningtilde',
r'.' : r'\combiningdotabove',
r'^' : r'\circumflexaccent'
}
_wide_accents = set(r"widehat widetilde".split())
def accent(self, s, loc, toks):
assert(len(toks)==1)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
if len(toks[0]) != 2:
raise ParseFatalException("Error parsing accent")
accent, sym = toks[0]
if accent in self._wide_accents:
accent = AutoWidthChar(
'\\' + accent, sym.width, state, char_class=Accent)
else:
accent = Accent(self._accent_map[accent], state)
centered = HCentered([accent])
centered.hpack(sym.width, 'exactly')
return Vlist([
centered,
Vbox(0., thickness * 2.0),
Hlist([sym])
])
def function(self, s, loc, toks):
#~ print "function", toks
self.push_state()
state = self.get_state()
state.font = 'rm'
hlist = Hlist([Char(c, state) for c in toks[0]])
self.pop_state()
hlist.function_name = toks[0]
return hlist
def start_group(self, s, loc, toks):
self.push_state()
# Deal with LaTeX-style font tokens
if len(toks):
self.get_state().font = toks[0][4:]
return []
def group(self, s, loc, toks):
grp = Hlist(toks[0])
return [grp]
def end_group(self, s, loc, toks):
self.pop_state()
return []
def font(self, s, loc, toks):
assert(len(toks)==1)
name = toks[0]
self.get_state().font = name
return []
def is_overunder(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._overunder_symbols
elif isinstance(nucleus, Hlist) and hasattr(nucleus, 'function_name'):
return nucleus.function_name in self._overunder_functions
return False
def is_dropsub(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.c in self._dropsub_symbols
return False
def is_slanted(self, nucleus):
if isinstance(nucleus, Char):
return nucleus.is_slanted()
return False
def subsuperscript(self, s, loc, toks):
assert(len(toks)==1)
# print 'subsuperscript', toks
nucleus = None
sub = None
super = None
if len(toks[0]) == 1:
return toks[0].asList()
elif len(toks[0]) == 2:
op, next = toks[0]
nucleus = Hbox(0.0)
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 3:
nucleus, op, next = toks[0]
if op == '_':
sub = next
else:
super = next
elif len(toks[0]) == 5:
nucleus, op1, next1, op2, next2 = toks[0]
if op1 == op2:
if op1 == '_':
raise ParseFatalException("Double subscript")
else:
raise ParseFatalException("Double superscript")
if op1 == '_':
sub = next1
super = next2
else:
super = next1
sub = next2
else:
raise ParseFatalException(
"Subscript/superscript sequence is too long. "
"Use braces { } to remove ambiguity.")
state = self.get_state()
rule_thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
xHeight = state.font_output.get_xheight(
state.font, state.fontsize, state.dpi)
# Handle over/under symbols, such as sum or integral
if self.is_overunder(nucleus):
vlist = []
shift = 0.
width = nucleus.width
if super is not None:
super.shrink()
width = max(width, super.width)
if sub is not None:
sub.shrink()
width = max(width, sub.width)
if super is not None:
hlist = HCentered([super])
hlist.hpack(width, 'exactly')
vlist.extend([hlist, Kern(rule_thickness * 3.0)])
hlist = HCentered([nucleus])
hlist.hpack(width, 'exactly')
vlist.append(hlist)
if sub is not None:
hlist = HCentered([sub])
hlist.hpack(width, 'exactly')
vlist.extend([Kern(rule_thickness * 3.0), hlist])
shift = hlist.height + hlist.depth + rule_thickness * 2.0
vlist = Vlist(vlist)
vlist.shift_amount = shift + nucleus.depth * 0.5
result = Hlist([vlist])
return [result]
# Handle regular sub/superscripts
shift_up = nucleus.height - SUBDROP * xHeight
if self.is_dropsub(nucleus):
shift_down = nucleus.depth + SUBDROP * xHeight
else:
shift_down = SUBDROP * xHeight
if super is None:
# node757
sub.shrink()
x = Hlist([sub])
# x.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1)
clr = x.height - (abs(xHeight * 4.0) / 5.0)
shift_down = max(shift_down, clr)
x.shift_amount = shift_down
else:
super.shrink()
x = Hlist([super, Kern(SCRIPT_SPACE * xHeight)])
# x.width += SCRIPT_SPACE * xHeight
clr = SUP1 * xHeight
shift_up = max(shift_up, clr)
clr = x.depth + (abs(xHeight) / 4.0)
shift_up = max(shift_up, clr)
if sub is None:
x.shift_amount = -shift_up
else: # Both sub and superscript
sub.shrink()
y = Hlist([sub])
# y.width += SCRIPT_SPACE * xHeight
shift_down = max(shift_down, SUB1 * xHeight)
clr = (2.0 * rule_thickness -
((shift_up - x.depth) - (y.height - shift_down)))
if clr > 0.:
shift_up += clr
shift_down += clr
if self.is_slanted(nucleus):
x.shift_amount = DELTA * (shift_up + shift_down)
x = Vlist([x,
Kern((shift_up - x.depth) - (y.height - shift_down)),
y])
x.shift_amount = shift_down
result = Hlist([nucleus, x])
return [result]
def frac(self, s, loc, toks):
assert(len(toks)==1)
assert(len(toks[0])==2)
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
num, den = toks[0]
num.shrink()
den.shrink()
cnum = HCentered([num])
cden = HCentered([den])
width = max(num.width, den.width) + thickness * 10.
cnum.hpack(width, 'exactly')
cden.hpack(width, 'exactly')
vlist = Vlist([cnum, # numerator
Vbox(0, thickness * 2.0), # space
Hrule(state), # rule
Vbox(0, thickness * 4.0), # space
cden # denominator
])
# Shift so the fraction line sits in the middle of the
# equals sign
metrics = state.font_output.get_metrics(
state.font, 'it', '=', state.fontsize, state.dpi)
shift = (cden.height -
((metrics.ymax + metrics.ymin) / 2 -
thickness * 3.0))
vlist.shift_amount = shift
hlist = Hlist([vlist, Hbox(thickness * 2.)])
return [hlist]
def sqrt(self, s, loc, toks):
#~ print "sqrt", toks
root, body = toks[0]
state = self.get_state()
thickness = state.font_output.get_underline_thickness(
state.font, state.fontsize, state.dpi)
# Determine the height of the body, and add a little extra to
# the height so it doesn't seem cramped
height = body.height - body.shift_amount + thickness * 5.0
depth = body.depth + body.shift_amount
check = AutoHeightChar(r'\__sqrt__', height, depth, state, always=True)
height = check.height - check.shift_amount
depth = check.depth + check.shift_amount
# Put a little extra space to the left and right of the body
padded_body = Hlist([Hbox(thickness * 2.0),
body,
Hbox(thickness * 2.0)])
rightside = Vlist([Hrule(state),
Fill(),
padded_body])
# Stretch the glue between the hrule and the body
rightside.vpack(height + (state.fontsize * state.dpi) / (100.0 * 12.0),
depth, 'exactly')
# Add the root and shift it upward so it is above the tick.
# The value of 0.6 is a hard-coded hack ;)
if root is None:
root = Box(check.width * 0.5, 0., 0.)
else:
root = Hlist([Char(x, state) for x in root])
root.shrink()
root.shrink()
root_vlist = Vlist([Hlist([root])])
root_vlist.shift_amount = -height * 0.6
hlist = Hlist([root_vlist, # Root
# Negative kerning to put root over tick
Kern(-check.width * 0.5),
check, # Check
rightside]) # Body
return [hlist]
def auto_sized_delimiter(self, s, loc, toks):
#~ print "auto_sized_delimiter", toks
front, middle, back = toks
state = self.get_state()
height = max([x.height for x in middle])
depth = max([x.depth for x in middle])
parts = []
# \left. and \right. aren't supposed to produce any symbols
if front != '.':
parts.append(AutoHeightChar(front, height, depth, state))
parts.extend(middle.asList())
if back != '.':
parts.append(AutoHeightChar(back, height, depth, state))
hlist = Hlist(parts)
return hlist
###
##############################################################################
# MAIN
class MathTextParser(object):
_parser = None
_backend_mapping = {
'bitmap': MathtextBackendBitmap,
'agg' : MathtextBackendAgg,
'ps' : MathtextBackendPs,
'pdf' : MathtextBackendPdf,
'svg' : MathtextBackendSvg,
'cairo' : MathtextBackendCairo,
'macosx': MathtextBackendAgg,
}
_font_type_mapping = {
'cm' : BakomaFonts,
'stix' : StixFonts,
'stixsans' : StixSansFonts,
'custom' : UnicodeFonts
}
def __init__(self, output):
"""
Create a MathTextParser for the given backend *output*.
"""
self._output = output.lower()
self._cache = maxdict(50)
def parse(self, s, dpi = 72, prop = None):
"""
Parse the given math expression *s* at the given *dpi*. If
*prop* is provided, it is a
:class:`~matplotlib.font_manager.FontProperties` object
specifying the "default" font to use in the math expression,
used for all non-math text.
The results are cached, so multiple calls to :meth:`parse`
with the same expression should be fast.
"""
if prop is None:
prop = FontProperties()
cacheKey = (s, dpi, hash(prop))
result = self._cache.get(cacheKey)
if result is not None:
return result
if self._output == 'ps' and rcParams['ps.useafm']:
font_output = StandardPsFonts(prop)
else:
backend = self._backend_mapping[self._output]()
fontset = rcParams['mathtext.fontset']
fontset_class = self._font_type_mapping.get(fontset.lower())
if fontset_class is not None:
font_output = fontset_class(prop, backend)
else:
raise ValueError(
"mathtext.fontset must be either 'cm', 'stix', "
"'stixsans', or 'custom'")
fontsize = prop.get_size_in_points()
# This is a class variable so we don't rebuild the parser
# with each request.
if self._parser is None:
self.__class__._parser = Parser()
box = self._parser.parse(s, font_output, fontsize, dpi)
font_output.set_canvas_size(box.width, box.height, box.depth)
result = font_output.get_results(box)
self._cache[cacheKey] = result
# Free up the transient data structures
self._parser.clear()
# Fix cyclical references
font_output.destroy()
font_output.mathtext_backend.fonts_object = None
font_output.mathtext_backend = None
return result
def to_mask(self, texstr, dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
x = ftimage.as_array()
return x, depth
def to_rgba(self, texstr, color='black', dpi=120, fontsize=14):
"""
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
Any matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns a tuple (*array*, *depth*)
- *array* is an NxM uint8 alpha ubyte mask array of
rasterized tex.
- depth is the offset of the baseline from the bottom of the
image in pixels.
"""
x, depth = self.to_mask(texstr, dpi=dpi, fontsize=fontsize)
r, g, b = mcolors.colorConverter.to_rgb(color)
RGBA = np.zeros((x.shape[0], x.shape[1], 4), dtype=np.uint8)
RGBA[:,:,0] = int(255*r)
RGBA[:,:,1] = int(255*g)
RGBA[:,:,2] = int(255*b)
RGBA[:,:,3] = x
return RGBA, depth
def to_png(self, filename, texstr, color='black', dpi=120, fontsize=14):
"""
Writes a tex expression to a PNG file.
Returns the offset of the baseline from the bottom of the
image in pixels.
*filename*
A writable filename or fileobject
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*color*
A valid matplotlib color argument
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
Returns the offset of the baseline from the bottom of the
image in pixels.
"""
rgba, depth = self.to_rgba(texstr, color=color, dpi=dpi, fontsize=fontsize)
numrows, numcols, tmp = rgba.shape
_png.write_png(rgba.tostring(), numcols, numrows, filename)
return depth
def get_depth(self, texstr, dpi=120, fontsize=14):
"""
Returns the offset of the baseline from the bottom of the
image in pixels.
*texstr*
A valid mathtext string, eg r'IQ: $\sigma_i=15$'
*dpi*
The dots-per-inch to render the text
*fontsize*
The font size in points
"""
assert(self._output=="bitmap")
prop = FontProperties(size=fontsize)
ftimage, depth = self.parse(texstr, dpi=dpi, prop=prop)
return depth
| agpl-3.0 |
google-research/tapas | tapas/scripts/calc_metrics_utils.py | 1 | 14541 | # coding=utf-8
# Copyright 2019 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Denotation accuracy calculation for TAPAS predictions over WikiSQL."""
import math
import os
from typing import Any, Dict, List, Optional, Set, Text, Tuple, Mapping
from absl import logging
import dataclasses
import pandas as pd
import sklearn.metrics
from tapas.protos import interaction_pb2
from tapas.scripts import prediction_utils
from tapas.utils import text_utils
import tensorflow.compat.v1 as tf
_Answer = interaction_pb2.Answer
@dataclasses.dataclass
class Example:
"""Represents an example."""
example_id: Text
question: Text
table_id: Text
table: pd.DataFrame
gold_cell_coo: Set[Tuple[int, int]]
gold_agg_function: int
float_answer: float
has_gold_answer: bool
pred_cell_coo: Set[Tuple[int, int]] = dataclasses.field(default_factory=set)
pred_agg_function: int = _Answer.NONE
weight: float = 1.0
gold_class_index: Optional[int] = None
pred_class_index: Optional[int] = None
def write_to_tensorboard(
metrics,
global_step,
logdir,
):
"""Writes metrics to tensorbaord."""
with tf.summary.FileWriter(logdir) as writer:
for label, value in metrics.items():
summary = tf.Summary(
value=[tf.Summary.Value(tag=label, simple_value=value)])
writer.add_summary(summary, global_step)
def read_data_examples_from_interactions(
interactions_path):
"""Reads examples from an interactions file."""
data_examples = {}
for interaction in prediction_utils.iterate_interactions(interactions_path):
for question in interaction.questions:
data_examples[question.id] = example_from_question(interaction, question)
return data_examples
def example_from_question(
interaction,
question,
):
"""Converts question to example."""
ex_id = question.id
question_text = question.original_text
table = prediction_utils.table_to_panda_frame(interaction.table)
table_id = interaction.table.table_id
has_gold_answer = question.answer.is_valid
gold_cell_coo = {
(x.row_index, x.column_index) for x in question.answer.answer_coordinates
}
gold_agg_function = question.answer.aggregation_function
float_value = question.answer.float_value if question.answer.HasField(
'float_value') else None
class_index = question.answer.class_index if question.answer.HasField(
'class_index') else None
ex = Example(
ex_id,
question_text,
table_id,
table,
gold_cell_coo,
gold_agg_function,
float_value,
has_gold_answer,
gold_class_index=class_index,
)
return ex
def read_predictions(predictions_path, examples):
"""Reads predictions from a csv file."""
for row in prediction_utils.iterate_predictions(predictions_path):
pred_id = '{}-{}_{}'.format(row['id'], row['annotator'], row['position'])
example = examples[pred_id]
example.pred_cell_coo = prediction_utils.parse_coordinates(
row['answer_coordinates'])
example.pred_agg_function = int(row.get('pred_aggr', '0'))
example.pred_class_index = int(row.get('pred_cls', '0'))
if 'column_scores' in row:
column_scores = list(filter(None, row['column_scores'][1:-1].split(' ')))
removed_column_scores = [
float(score) for score in column_scores if float(score) < 0.0
]
if column_scores:
example.weight = len(removed_column_scores) / len(column_scores)
def _calc_acc(correct):
"""Returns the fraction of true examples."""
matches = sum(1 for ex in correct if ex)
return matches / float(len(correct))
@dataclasses.dataclass(frozen=True)
class StructuredMetrics:
aggregation_acc: float
cell_acc: float
joint_acc: float
confusion_df: pd.DataFrame
f1_scores_df: pd.DataFrame
def calc_structure_metrics(
examples,
denotation_errors_path = None):
"""Calculates metrics regarding the correctness of the predicted structure."""
examples_to_write = []
for ex_id in examples:
pred_agg_function = examples[ex_id].pred_agg_function
pred_cell_coo = examples[ex_id].pred_cell_coo
gold_agg_function = examples[ex_id].gold_agg_function
gold_cell_coo = examples[ex_id].gold_cell_coo
assert pred_agg_function is not None
assert pred_cell_coo is not None
agg_function_correct = gold_agg_function == pred_agg_function
coo_correct = gold_cell_coo == pred_cell_coo
is_correct = agg_function_correct and coo_correct
examples_to_write.append([
ex_id,
gold_agg_function,
pred_agg_function,
sorted(gold_cell_coo),
sorted(pred_cell_coo),
agg_function_correct,
coo_correct,
is_correct,
])
frame = pd.DataFrame(
examples_to_write,
columns=[
'id',
'gold_agg',
'pred_agg',
'gold_cell_coo',
'pred_cell_coo',
'agg_function_correct',
'coo_correct',
'is_correct',
])
aggregation_acc = frame['agg_function_correct'].mean()
logging.info('aggregation_acc=%f', aggregation_acc)
cell_acc = frame['coo_correct'].mean()
logging.info('cell_acc=%f', cell_acc)
joint_acc = frame['is_correct'].mean()
logging.info('joint_acc=%f', joint_acc)
agg_labels = list(_Answer.AggregationFunction.keys())
gold_agg = frame['gold_agg']
pred_agg = frame['pred_agg']
confusion_mat = sklearn.metrics.confusion_matrix(gold_agg, pred_agg)
confusion_df = pd.DataFrame(
data=confusion_mat,
columns=['pred_{}'.format(l) for l in agg_labels],
index=['gold_{}'.format(l) for l in agg_labels])
logging.info('*** Aggregation confusion matrix ***')
logging.info('\n%s', confusion_df)
f1_scores = [sklearn.metrics.f1_score(gold_agg, pred_agg, average=None)]
f1_scores_df = pd.DataFrame(data=f1_scores, columns=agg_labels)
logging.info('*** Aggregation F1 scores ***')
logging.info('\n%s', f1_scores_df)
if denotation_errors_path:
with tf.io.gfile.GFile(
os.path.join(denotation_errors_path, 'structured_examples.tsv'),
'w') as f:
frame.to_csv(f, sep='\t')
return StructuredMetrics(
aggregation_acc=aggregation_acc,
cell_acc=cell_acc,
joint_acc=joint_acc,
confusion_df=confusion_df,
f1_scores_df=f1_scores_df,
)
def _collect_cells_from_table(cell_coos,
table):
cell_values = []
for cell in cell_coos:
value = str(table.iat[cell[0], cell[1]])
cell_values.append(value)
return cell_values
def _safe_convert_to_float(value):
float_value = text_utils.convert_to_float(value)
if math.isnan(float_value):
raise ValueError('Value is NaN %s' % value)
return float_value
def _parse_value(value):
"""Parses a cell value to a number or lowercased string."""
try:
return _safe_convert_to_float(value)
except ValueError:
try:
return value.lower()
except ValueError:
return value
def _to_float32s(elements):
return tuple(text_utils.to_float32(v) for v in elements)
def execute(aggregation_type, cell_coos,
table):
"""Executes predicted structure against a table to produce the denotation."""
values = _collect_cells_from_table(cell_coos, table)
values_parsed = [_parse_value(value) for value in values]
values_parsed = tuple(values_parsed)
if aggregation_type == _Answer.NONE:
# In this case there is no aggregation
return values_parsed, values
else: # Should perform aggregation.
if not values and (aggregation_type == _Answer.AVERAGE or
aggregation_type == _Answer.SUM):
# Summing or averaging an empty set results in an empty set.
# NB: SQL returns null for sum over an empty set.
return tuple(), values
if aggregation_type == _Answer.COUNT:
denotation = len(values)
else:
# In this case all values must be numbers (to be summed or averaged).
try:
values_num = [text_utils.convert_to_float(value) for value in values]
except ValueError:
return values_parsed, values
if aggregation_type == _Answer.SUM:
denotation = sum(values_num)
elif aggregation_type == _Answer.AVERAGE:
denotation = sum(values_num) / len(values_num)
else:
raise ValueError('Unknwon aggregation type: %s' % aggregation_type)
return tuple([float(denotation)]), values
@dataclasses.dataclass
class DenotationResult:
denotation: Optional[List[Any]]
values: Optional[List[Text]]
agg_function: Optional[int]
cell_coordinates: Optional[Set[Tuple[int, int]]]
@dataclasses.dataclass
class DenotationStats:
"""Represent the denotation evaluation for a single example."""
is_correct: bool
pred_result: DenotationResult
gold_result: Optional[DenotationResult]
weight: float
def _highlight_cells(coordinates, table):
"""Returns a printable version of the table with highlighted cells."""
result = table.copy()[list(table)].astype(str)
for x, y in coordinates:
result.iat[x, y] = '[[' + str(table.iat[x, y]) + ']]'
return result
def _get_debug_row(result, table):
if not result:
return [None, None, None, None]
return [
result.denotation,
result.values,
_Answer.AggregationFunction.Name(result.agg_function),
sorted(result.cell_coordinates) if result.cell_coordinates else None,
_highlight_cells(result.cell_coordinates, table)
if result.cell_coordinates else None,
]
def _get_gold_denotation_result(example):
"""Computes gold denotation of the example."""
if not example.has_gold_answer:
# No gold answer for this example.
return None
agg_function = example.gold_agg_function
cell_coo = example.gold_cell_coo
if example.float_answer is None:
denotation, values = execute(agg_function, cell_coo, example.table)
elif math.isnan(example.float_answer):
denotation = []
values = []
else:
denotation = [(example.float_answer)]
values = []
denotation = _to_float32s(denotation)
denotation = text_utils.normalize_answers(denotation)
return DenotationResult(
denotation=denotation,
values=values,
agg_function=agg_function,
cell_coordinates=cell_coo,
)
def _get_pred_denotation_result(example):
"""Computes predicted denotation."""
if example.pred_agg_function is None:
raise ValueError('pred_agg_function is None')
if example.pred_cell_coo is None:
raise ValueError('pred_cell_coo is None')
agg_function = example.pred_agg_function
cell_coo = example.pred_cell_coo
denotation, values = execute(agg_function, cell_coo, example.table)
denotation = _to_float32s(denotation)
denotation = text_utils.normalize_answers(denotation)
return DenotationResult(
denotation=denotation,
values=values,
agg_function=agg_function,
cell_coordinates=cell_coo,
)
def get_denotation_stats(example):
"""Computes denotation stats for single example."""
pred_result = _get_pred_denotation_result(example)
gold_result = _get_gold_denotation_result(example)
is_correct = False
if gold_result is not None:
is_correct = pred_result.denotation == gold_result.denotation
return DenotationStats(
is_correct=is_correct,
gold_result=gold_result,
pred_result=pred_result,
weight=example.weight * float(is_correct),
)
def calc_weighted_denotation_accuracy(examples,
denotation_errors_path,
predictions_file_name,
add_weights):
"""Calculates the denotation accuracy weighted by the column scores."""
examples_to_write = []
for example_id, example in sorted(examples.items()):
denotation_stats = get_denotation_stats(example)
example_stats = [example_id, example.question, denotation_stats.is_correct]
if add_weights:
example_stats.append(example.weight)
examples_to_write.append(
example_stats +
_get_debug_row(denotation_stats.gold_result, example.table) +
_get_debug_row(denotation_stats.pred_result, example.table))
columns = [
'example_id',
'question',
'is_correct',
'gold denotation',
'gold cell values',
'gold cell coordinates',
'gold aggregation',
'gold table',
'pred denotation',
'pred cell values',
'pred cell coordinates',
'pred aggregation',
'pred table',
]
if add_weights:
weights_columns = columns[:3]
weights_columns.append('weight')
weights_columns.extend(columns[3:])
columns = weights_columns
frame = pd.DataFrame(examples_to_write, columns=columns)
if denotation_errors_path is not None:
examples_file = os.path.join(
denotation_errors_path,
'denotation_examples_{}'.format(predictions_file_name))
with tf.io.gfile.GFile(examples_file, 'w') as f:
frame.to_csv(f, sep='\t')
denotation_acc = frame['is_correct'].mean()
logging.info('denotation_accuracy=%f', denotation_acc)
stats = {'denotation_accuracy': denotation_acc}
if not add_weights:
return stats
weighted_denotation_acc = frame['weight'].mean()
stats['weighted_denotation_accuracy'] = weighted_denotation_acc
logging.info('weighted_denotation_accuracy=%f', weighted_denotation_acc)
logging.info('total_test_examples=%d', len(examples))
return stats
def calc_denotation_accuracy(examples,
denotation_errors_path,
predictions_file_name):
"""Calculates the denotation accuracy."""
return calc_weighted_denotation_accuracy(
examples,
denotation_errors_path,
predictions_file_name,
add_weights=False)['denotation_accuracy']
def calc_classification_accuracy(examples):
"""Calculates the classification accuracy."""
total_correct = sum(1 for example in examples.values()
if example.gold_class_index == example.pred_class_index)
return total_correct / len(examples)
| apache-2.0 |
rahuldhote/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
rednaxelafx/apache-spark | python/pyspark/worker.py | 3 | 27752 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Worker that receives input from Piped RDD.
"""
import os
import sys
import time
from inspect import getfullargspec
import importlib
# 'resource' is a Unix specific module.
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.java_gateway import local_connect_and_auth
from pyspark.taskcontext import BarrierTaskContext, TaskContext
from pyspark.files import SparkFiles
from pyspark.resource import ResourceInformation
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, read_bool, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer
from pyspark.sql.pandas.serializers import ArrowStreamPandasUDFSerializer, CogroupUDFSerializer
from pyspark.sql.pandas.types import to_arrow_type
from pyspark.sql.types import StructType
from pyspark.util import fail_on_stopiteration
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
def verify_result_length(result, length):
if len(result) != length:
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (length, len(result)))
return result
return lambda *a: (verify_result_length(
verify_result_type(f(*a)), len(a[0])), arrow_return_type)
def wrap_pandas_iter_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
return lambda *iterator: map(lambda res: (res, arrow_return_type),
map(verify_result_type, f(*iterator)))
def wrap_cogrouped_map_pandas_udf(f, return_type, argspec):
def wrapped(left_key_series, left_value_series, right_key_series, right_value_series):
import pandas as pd
left_df = pd.concat(left_value_series, axis=1)
right_df = pd.concat(right_value_series, axis=1)
if len(argspec.args) == 2:
result = f(left_df, right_df)
elif len(argspec.args) == 3:
key_series = left_key_series if not left_df.empty else right_key_series
key = tuple(s[0] for s in key_series)
result = f(key, left_df, right_df)
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda kl, vl, kr, vr: [(wrapped(kl, vl, kr, vr), to_arrow_type(return_type))]
def wrap_grouped_map_pandas_udf(f, return_type, argspec):
def wrapped(key_series, value_series):
import pandas as pd
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_window_agg_pandas_udf(f, return_type, runner_conf, udf_index):
window_bound_types_str = runner_conf.get('pandas_window_bound_types')
window_bound_type = [t.strip().lower() for t in window_bound_types_str.split(',')][udf_index]
if window_bound_type == 'bounded':
return wrap_bounded_window_agg_pandas_udf(f, return_type)
elif window_bound_type == 'unbounded':
return wrap_unbounded_window_agg_pandas_udf(f, return_type)
else:
raise RuntimeError("Invalid window bound type: {} ".format(window_bound_type))
def wrap_unbounded_window_agg_pandas_udf(f, return_type):
# This is similar to grouped_agg_pandas_udf, the only difference
# is that window_agg_pandas_udf needs to repeat the return value
# to match window length, where grouped_agg_pandas_udf just returns
# the scalar value.
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result]).repeat(len(series[0]))
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_bounded_window_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(begin_index, end_index, *series):
import pandas as pd
result = []
# Index operation is faster on np.ndarray,
# So we turn the index series into np array
# here for performance
begin_array = begin_index.values
end_array = end_index.values
for i in range(len(begin_array)):
# Note: Create a slice from a series for each window is
# actually pretty expensive. However, there
# is no easy way to reduce cost here.
# Note: s.iloc[i : j] is about 30% faster than s[i: j], with
# the caveat that the created slices shares the same
# memory with s. Therefore, user are not allowed to
# change the value of input series inside the window
# function. It is rare that user needs to modify the
# input series in the window function, and therefore,
# it is be a reasonable restriction.
# Note: Calling reset_index on the slices will increase the cost
# of creating slices by about 100%. Therefore, for performance
# reasons we don't do it here.
series_slices = [s.iloc[begin_array[i]: end_array[i]] for s in series]
result.append(f(*series_slices))
return pd.Series(result)
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
chained_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if chained_func is None:
chained_func = f
else:
chained_func = chain(chained_func, f)
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
func = chained_func
else:
# make sure StopIteration's raised in the user code are not ignored
# when they are processed in a for loop, raise them as RuntimeError's instead
func = fail_on_stopiteration(chained_func)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
argspec = getfullargspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_grouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
argspec = getfullargspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_cogrouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF:
return arg_offsets, wrap_window_agg_pandas_udf(func, return_type, runner_conf, udf_index)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
runner_conf = {}
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF):
# Load conf used for pandas_udf evaluation
num_conf = read_int(infile)
for i in range(num_conf):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
runner_conf[k] = v
# NOTE: if timezone is set here, that implies respectSessionTimeZone is True
timezone = runner_conf.get("spark.sql.session.timeZone", None)
safecheck = runner_conf.get("spark.sql.execution.pandas.convertToArrowArraySafely",
"false").lower() == 'true'
# Used by SQL_GROUPED_MAP_PANDAS_UDF and SQL_SCALAR_PANDAS_UDF when returning StructType
assign_cols_by_name = runner_conf.get(
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName", "true")\
.lower() == "true"
if eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
ser = CogroupUDFSerializer(timezone, safecheck, assign_cols_by_name)
else:
# Scalar Pandas UDF handles struct type arguments as pandas DataFrames instead of
# pandas Series. See SPARK-27240.
df_for_struct = (eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF or
eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
ser = ArrowStreamPandasUDFSerializer(timezone, safecheck, assign_cols_by_name,
df_for_struct)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
num_udfs = read_int(infile)
is_scalar_iter = eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
is_map_iter = eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
if is_scalar_iter or is_map_iter:
if is_scalar_iter:
assert num_udfs == 1, "One SCALAR_ITER UDF expected here."
if is_map_iter:
assert num_udfs == 1, "One MAP_ITER UDF expected here."
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=0)
def func(_, iterator):
num_input_rows = 0
def map_batch(batch):
nonlocal num_input_rows
udf_args = [batch[offset] for offset in arg_offsets]
num_input_rows += len(udf_args[0])
if len(udf_args) == 1:
return udf_args[0]
else:
return tuple(udf_args)
iterator = map(map_batch, iterator)
result_iter = udf(iterator)
num_output_rows = 0
for result_batch, result_type in result_iter:
num_output_rows += len(result_batch)
# This assert is for Scalar Iterator UDF to fail fast.
# The length of the entire input can only be explicitly known
# by consuming the input iterator in user side. Therefore,
# it's very unlikely the output length is higher than
# input length.
assert is_map_iter or num_output_rows <= num_input_rows, \
"Pandas SCALAR_ITER UDF outputted more rows than input rows."
yield (result_batch, result_type)
if is_scalar_iter:
try:
next(iterator)
except StopIteration:
pass
else:
raise RuntimeError("pandas iterator UDF should exhaust the input "
"iterator.")
if num_output_rows != num_input_rows:
raise RuntimeError(
"The length of output in Scalar iterator pandas UDF should be "
"the same with the input's; however, the length of output was %d and the "
"length of input was %d." % (num_output_rows, num_input_rows))
# profiling is not supported for UDF
return func, None, ser, ser
def extract_key_value_indexes(grouped_arg_offsets):
"""
Helper function to extract the key and value indexes from arg_offsets for the grouped and
cogrouped pandas udfs. See BasePandasGroupExec.resolveArgOffsets for equivalent scala code.
:param grouped_arg_offsets: List containing the key and value indexes of columns of the
DataFrames to be passed to the udf. It consists of n repeating groups where n is the
number of DataFrames. Each group has the following format:
group[0]: length of group
group[1]: length of key indexes
group[2.. group[1] +2]: key attributes
group[group[1] +3 group[0]]: value attributes
"""
parsed = []
idx = 0
while idx < len(grouped_arg_offsets):
offsets_len = grouped_arg_offsets[idx]
idx += 1
offsets = grouped_arg_offsets[idx: idx + offsets_len]
split_index = offsets[0] + 1
offset_keys = offsets[1: split_index]
offset_values = offsets[split_index:]
parsed.append([offset_keys, offset_values])
idx += offsets_len
return parsed
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, f = read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=0)
parsed_offsets = extract_key_value_indexes(arg_offsets)
# Create function like this:
# mapper a: f([a[0]], [a[0], a[1]])
def mapper(a):
keys = [a[o] for o in parsed_offsets[0][0]]
vals = [a[o] for o in parsed_offsets[0][1]]
return f(keys, vals)
elif eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
# We assume there is only one UDF here because cogrouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
arg_offsets, f = read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=0)
parsed_offsets = extract_key_value_indexes(arg_offsets)
def mapper(a):
df1_keys = [a[0][o] for o in parsed_offsets[0][0]]
df1_vals = [a[0][o] for o in parsed_offsets[0][1]]
df2_keys = [a[1][o] for o in parsed_offsets[1][0]]
df2_vals = [a[1][o] for o in parsed_offsets[1][1]]
return f(df1_keys, df1_vals, df2_keys, df2_vals)
else:
udfs = []
for i in range(num_udfs):
udfs.append(read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=i))
def mapper(a):
result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in udfs)
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
if len(result) == 1:
return result[0]
else:
return result
func = lambda _, it: map(mapper, it)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions. " +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# read inputs only for a barrier task
isBarrier = read_bool(infile)
boundPort = read_int(infile)
secret = UTF8Deserializer().loads(infile)
# set up memory limits
memory_limit_mb = int(os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
if memory_limit_mb > 0 and has_resource_module:
total_memory = resource.RLIMIT_AS
try:
(soft_limit, hard_limit) = resource.getrlimit(total_memory)
msg = "Current mem limits: {0} of max {1}\n".format(soft_limit, hard_limit)
print(msg, file=sys.stderr)
# convert to bytes
new_limit = memory_limit_mb * 1024 * 1024
if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
msg = "Setting mem limits to {0} of max {1}\n".format(new_limit, new_limit)
print(msg, file=sys.stderr)
resource.setrlimit(total_memory, (new_limit, new_limit))
except (resource.error, OSError, ValueError) as e:
# not all systems support resource limits, so warn instead of failing
print("WARN: Failed to set memory limit: {0}\n".format(e), file=sys.stderr)
# initialize global state
taskContext = None
if isBarrier:
taskContext = BarrierTaskContext._getOrCreate()
BarrierTaskContext._initialize(boundPort, secret)
# Set the task context instance here, so we can get it by TaskContext.get for
# both TaskContext and BarrierTaskContext
TaskContext._setTaskContext(taskContext)
else:
taskContext = TaskContext._getOrCreate()
# read inputs for TaskContext info
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
taskContext._resources = {}
for r in range(read_int(infile)):
key = utf8_deserializer.loads(infile)
name = utf8_deserializer.loads(infile)
addresses = []
taskContext._resources = {}
for a in range(read_int(infile)):
addresses.append(utf8_deserializer.loads(infile))
taskContext._resources[key] = ResourceInformation(name, addresses)
taskContext._localProperties = dict()
for i in range(read_int(infile)):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
taskContext._localProperties[k] = v
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
importlib.invalidate_caches()
# fetch names and values of broadcast variables
needs_broadcast_decryption_server = read_bool(infile)
num_broadcast_variables = read_int(infile)
if needs_broadcast_decryption_server:
# read the decrypted data from a server in the jvm
port = read_int(infile)
auth_secret = utf8_deserializer.loads(infile)
(broadcast_sock_file, _) = local_connect_and_auth(port, auth_secret)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
if needs_broadcast_decryption_server:
read_bid = read_long(broadcast_sock_file)
assert(read_bid == bid)
_broadcastRegistry[bid] = \
Broadcast(sock_file=broadcast_sock_file)
else:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
if needs_broadcast_decryption_server:
broadcast_sock_file.write(b'1')
broadcast_sock_file.close()
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
out_iter = func(split_index, iterator)
try:
serializer.dump_stream(out_iter, outfile)
finally:
if hasattr(out_iter, 'close'):
out_iter.close()
if profiler:
profiler.profile(process)
else:
process()
# Reset task context to None. This is a guard code to avoid residual context when worker
# reuse.
TaskContext._setTaskContext(None)
BarrierTaskContext._setTaskContext(None)
except Exception:
try:
exc_info = traceback.format_exc()
if isinstance(exc_info, bytes):
# exc_info may contains other encoding bytes, replace the invalid bytes and convert
# it back to utf-8 again
exc_info = exc_info.decode("utf-8", "replace").encode("utf-8")
else:
exc_info = exc_info.encode("utf-8")
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(exc_info, outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read information about how to connect back to the JVM from the environment.
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
(sock_file, _) = local_connect_and_auth(java_port, auth_secret)
main(sock_file, sock_file)
| apache-2.0 |
glennq/scikit-learn | sklearn/utils/tests/test_multiclass.py | 58 | 14316 |
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
from sklearn.utils.multiclass import check_classification_targets
from sklearn.utils.metaestimators import _safe_split
from sklearn.model_selection import ShuffleSplit
from sklearn.svm import SVC
from sklearn import datasets
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formatted as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that weren't supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_check_classification_targets():
for y_type in EXAMPLES.keys():
if y_type in ["unknown", "continuous", 'continuous-multioutput']:
for example in EXAMPLES[y_type]:
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg,
check_classification_targets, example)
else:
for example in EXAMPLES[y_type]:
check_classification_targets(example)
# @ignore_warnings
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = datasets.load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = ShuffleSplit(test_size=0.25, random_state=0)
train, test = list(cv.split(X))[0]
X_train, y_train = _safe_split(clf, X, y, train)
K_train, y_train2 = _safe_split(clfp, K, y, train)
assert_array_almost_equal(K_train, np.dot(X_train, X_train.T))
assert_array_almost_equal(y_train, y_train2)
X_test, y_test = _safe_split(clf, X, y, test, train)
K_test, y_test2 = _safe_split(clfp, K, y, test, train)
assert_array_almost_equal(K_test, np.dot(X_test, X_train.T))
assert_array_almost_equal(y_test, y_test2)
| bsd-3-clause |
chankeypathak/pandas-matplotlib-examples | Lesson 3/pandas_matplot_excel.py | 1 | 4649 | import numpy.random as np
import pandas as pd
import matplotlib.pyplot as plt
# set seed
np.seed(111)
# Function to generate test data
def CreateDataSet(Number=1):
Output = []
for i in range(Number):
# Create a weekly (mondays) date range
rng = pd.date_range(start='1/1/2009', end='12/31/2012', freq='W-MON')
# Create random data
data = np.randint(low=25, high=1000, size=len(rng))
# Status pool
status = [1, 2, 3]
# Make a random list of statuses
random_status = [status[np.randint(low=0, high=len(status))] for i in range(len(rng))]
# State pool
states = ['GA', 'FL', 'fl', 'NY', 'NJ', 'TX']
# Make a random list of states
random_states = [states[np.randint(low=0, high=len(states))] for i in range(len(rng))]
Output.extend(zip(random_states, random_status, data, rng))
return Output
dataset = CreateDataSet(4)
df = pd.DataFrame(data=dataset, columns=['State','Status','CustomerCount','StatusDate'])
# Save results to excel
df.to_excel('Lesson3.xlsx', index=False)
# Location of file
Location = 'Lesson3.xlsx'
# Parse a specific sheet
df = pd.read_excel(Location, 0, index_col='StatusDate')
# Clean State Column, convert to upper case
df['State'] = df.State.apply(lambda x: x.upper())
# Only grab where Status == 1
mask = df['Status'] == 1
df = df[mask]
# Convert NJ to NY
mask = df.State == 'NJ'
df['State'][mask] = 'NY'
df['CustomerCount'].plot(figsize=(15,5));
#plt.show()
sortdf = df[df['State']=='NY'].sort_index(axis=0)
#print sortdf.head(10)
# Group by State and StatusDate
Daily = df.reset_index().groupby(['State','StatusDate']).sum()
#print Daily.head()
del Daily['Status']
#print Daily.head()
# What is the index of the dataframe
#print Daily.index
# Select the State index
#print Daily.index.levels[0]
# Select the StatusDate index
#print Daily.index.levels[1]
#Daily.loc['FL'].plot()
#Daily.loc['GA'].plot()
#Daily.loc['NY'].plot()
#Daily.loc['TX'].plot();
#plt.show()
# Calculate Outliers
StateYearMonth = Daily.groupby([Daily.index.get_level_values(0), Daily.index.get_level_values(1).year, Daily.index.get_level_values(1).month])
Daily['Lower'] = StateYearMonth['CustomerCount'].transform( lambda x: x.quantile(q=.25) - (1.5*x.quantile(q=.75)-x.quantile(q=.25)) )
Daily['Upper'] = StateYearMonth['CustomerCount'].transform( lambda x: x.quantile(q=.75) + (1.5*x.quantile(q=.75)-x.quantile(q=.25)) )
Daily['Outlier'] = (Daily['CustomerCount'] < Daily['Lower']) | (Daily['CustomerCount'] > Daily['Upper'])
# Remove Outliers
Daily = Daily[Daily['Outlier'] == False]
#print Daily.head()
# Combine all markets
# Get the max customer count by Date
ALL = pd.DataFrame(Daily['CustomerCount'].groupby(Daily.index.get_level_values(1)).sum())
ALL.columns = ['CustomerCount'] # rename column
# Group by Year and Month
YearMonth = ALL.groupby([lambda x: x.year, lambda x: x.month])
# What is the max customer count per Year and Month
ALL['Max'] = YearMonth['CustomerCount'].transform(lambda x: x.max())
#print ALL.head()
# Create the BHAG (Big Hairy Annual Goal) dataframe
data = [1000,2000,3000]
idx = pd.date_range(start='12/31/2011', end='12/31/2013', freq='A')
BHAG = pd.DataFrame(data, index=idx, columns=['BHAG'])
#print BHAG
# Combine the BHAG and the ALL data set
combined = pd.concat([ALL,BHAG], axis=0)
combined = combined.sort_index(axis=0)
#print combined.tail()
fig, axes = plt.subplots(figsize=(12, 7))
combined['BHAG'].fillna(method='pad').plot(color='green', label='BHAG')
combined['Max'].plot(color='blue', label='All Markets')
plt.legend(loc='best')
#plt.show()
# Group by Year and then get the max value per year
Year = combined.groupby(lambda x: x.year).max()
#print Year
# Add a column representing the percent change per year
Year['YR_PCT_Change'] = Year['Max'].pct_change(periods=1)
#print Year
#forecast
#print (1 + Year.ix[2012,'YR_PCT_Change']) * Year.ix[2012,'Max']
# First Graph
ALL['Max'].plot(figsize=(10, 5));
plt.title('ALL Markets')
# Last four Graphs
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 10))
fig.subplots_adjust(hspace=1.0) ## Create space between plots
Daily.loc['FL']['CustomerCount']['2012':].fillna(method='pad').plot(ax=axes[0,0])
Daily.loc['GA']['CustomerCount']['2012':].fillna(method='pad').plot(ax=axes[0,1])
Daily.loc['TX']['CustomerCount']['2012':].fillna(method='pad').plot(ax=axes[1,0])
Daily.loc['NY']['CustomerCount']['2012':].fillna(method='pad').plot(ax=axes[1,1])
# Add titles
axes[0,0].set_title('Florida')
axes[0,1].set_title('Georgia')
axes[1,0].set_title('Texas')
axes[1,1].set_title('North East');
plt.show()
| mit |
pylayers/pylayers | pylayers/antprop/coeffModel.py | 2 | 7181 | """
.. currentmodule:: pylayers.antprop.coeffModel
.. autosummary::
:members:
"""
from __future__ import print_function
import doctest
import os
import glob
import doctest
import pdb
import numpy as np
import scipy as sp
import scipy.special as special
import matplotlib.pylab as plt
from numpy import zeros
def relative_error(Eth_original, Eph_original,Eth_model, Eph_model,theta, phi, dsf=1,kf=-1):
""" calculate relative error between original and model
Parameters
----------
Eth_original : np.array
Eph_original : np.array
Eth_model : np.array
Eph_model : np.array
theta : np.array
phi : np.phi
dsf : int
down sampling factor
kf : int
"""
st = np.sin(theta).reshape((len(theta), 1))
#
# Construct difference between reference and reconstructed
#
if kf!=-1:
dTh = (Eth_model[kf, :, :] - Eth_original[kf, ::dsf, ::dsf])
dPh = (Eph_model[kf, :, :] - Eph_original[kf, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(Eth_original[kf, ::dsf, ::dsf] \
* np.conj(Eth_original[kf, ::dsf, ::dsf])) * st
vPh2 = np.real(Eph_original[kf, ::dsf, ::dsf] \
* np.conj(Eph_original[kf, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
else:
dTh = (Eth_model[:, :, :] - Eth_original[:, ::dsf, ::dsf])
dPh = (Eph_model[:, :, :] - Eph_original[:, ::dsf, ::dsf])
#
# squaring + Jacobian
#
dTh2 = np.real(dTh * np.conj(dTh)) * st
dPh2 = np.real(dPh * np.conj(dPh)) * st
vTh2 = np.real(Eth_original[:, ::dsf, ::dsf] \
* np.conj(Eth_original[:, ::dsf, ::dsf])) * st
vPh2 = np.real(Eph_original[:, ::dsf, ::dsf] \
* np.conj(Eph_original[:, ::dsf, ::dsf])) * st
mvTh2 = np.sum(vTh2)
mvPh2 = np.sum(vPh2)
errTh = np.sum(dTh2)
errPh = np.sum(dPh2)
errelTh = (errTh / mvTh2)
errelPh = (errPh / mvPh2)
errel =( (errTh + errPh) / (mvTh2 + mvPh2))
return(errelTh, errelPh, errel)
def RepAzimuth1 (Ec, theta, phi, th= np.pi/2,typ = 'Gain'):
""" response in azimuth
Parameters
----------
Ec
theta :
phi :
th :
typ : string
'Gain'
"""
pos_th = np.where(theta == th)[0][0]
start = pos_th*len(phi)
stop = start + len(phi)
if typ=='Gain':
V = np.sqrt(np.real(Ec[0,:,start:stop]*
np.conj(Ec[0,:,start:stop]) +
Ec[1,:,start:stop]*np.conj(Ec[1,:,start:stop]) +
Ec[2,:,start:stop]*np.conj(Ec[2,:,start:stop])))
if typ=='Ex':
V = np.abs(Ec[0,:,start:stop])
if typ=='Ey':
V = np.abs(Ec[1,:,start:stop])
if typ=='Ez':
V = np.abs(Ec[2,:,start:stop])
VdB = 20*np.log10(V)
VdBmin = -40
VdB = VdB - VdBmin
V = VdB
#plt.polar(phi,V)
#plt.title('theta = '+str(th))
return V
def mode_energy(C,M,L =20, ifreq = 46):
""" calculates mode energy
Parameters
----------
C :
M :
L : int
ifreq : int
shape C = (dim = 3,Ncoef = (1+L)**2)
"""
Em = []
Lc = (1+L)**2
for m in range(M+1):
im = m*(2*L+3-m)/2
bind = (1+L)*(L+2)/2 + im-L-1
if ifreq > 0:
if m == 0:
em = np.sum(np.abs(C[:,ifreq,im:im+L-m+1])**2)
else:
em = np.sum(np.abs(C[:,ifreq,im:im+L-m+1])**2) + np.sum(np.abs(C[:,ifreq,bind:bind + L-m+1])**2)
Et = np.sum(np.abs(C[:,ifreq,:])**2)
Em.append(em)
return np.array(Em)/Et
def mode_energy2(A,m, ifreq=46, L= 20):
""" calculates mode energy (version 2)
Parameters
----------
A :
m :
ifreq
L :
"""
cx = lmreshape(A.S.Cx.s2)
cy = lmreshape(A.S.Cy.s2)
cz = lmreshape(A.S.Cz.s2)
if ifreq >0:
em = np.sum(np.abs(cx[ifreq,:,L+m])**2+np.abs(cy[ifreq,:,L+m])**2+np.abs(cz[ifreq,:,L+m])**2)
Et = np.sum(np.abs(cx[ifreq])**2+np.abs(cy[ifreq])**2+np.abs(cz[ifreq])**2)
return em/Et
def level_energy(A,l, ifreq = 46,L=20):
""" calculates energy of the level l
Parameters
----------
A : Antenna
l : int
level
ifreq
L
"""
cx = lmreshape(A.S.Cx.s2)
cy = lmreshape(A.S.Cy.s2)
cz = lmreshape(A.S.Cz.s2)
if ifreq >0:
el = np.sum(np.abs(cx[ifreq,l,:])**2+np.abs(cy[ifreq,l,:])**2+np.abs(cz[ifreq,l,:])**2)
Et = np.sum(np.abs(cx[ifreq])**2+np.abs(cy[ifreq])**2+np.abs(cz[ifreq])**2)
return el/Et
def modeMax(coeff,L= 20, ifreq = 46):
""" calculates maximal mode
Parameters
----------
coeff :
L : int
maximum level
ifreq : int
"""
Em_dB = 20*np.log10(mode_energy(C = coeff,M = L))
max_mode = np.where(Em_dB <-20 )[0][0]-1
return max_mode
def lmreshape(coeff,L= 20):
""" level and mode reshaping
Parameters
----------
coeff
L : int
maximum level
"""
sh = coeff.shape
coeff_lm = zeros(shape = (sh[0],1+L, 1+2*L), dtype = complex )
for m in range(0,1+L):
im = m*(2*L+3-m)/2
coeff_lm[:,m:L+1,L+m] = coeff[:,im:im +L+1-m]
for m in range(1,L):
im = m*(2*L+3-m)/2
bind = (1+L)*(L+2)/2 + im-L-1
coeff_lm[:,m:L+1,L-m]= coeff[:,bind: bind + L-m+1]
return coeff_lm
def sshModel(c,d, L = 20):
""" calculates sshModel
Parameters
----------
c : ssh coeff
free space antenna coeff
d : float
distance (meters)
L : int
Returns
-------
cm : ssh coeff
perturbed antenna coeff
"""
Lc = (1+L)**2
sh = np.shape(c)
cm = np.zeros(shape = sh , dtype = complex)
m0 = modeMax(c, L= 20, ifreq = 46)
im0 = m0*(2*L+3-m0)/2
M = m0 + int(0.06*d) + 4
a0 = 0.002*d+0.55
am = -0.002*d + 1.55
alpha = -0.006*d+1.22
for m in range(0,m0):
im = m*(2*L+3-m)/2
if m == 0:
dephm = 0
cm[:,:,im: im+L+1-m] = a0*c[:,:,im: im+L+1-m]
else:
dephm = (m-m0)*alpha
cm[:,:,im: im+L+1-m] = a0*c[:,:,im: im+L+1-m]*np.exp(1j*dephm)
bind = (1+L)*(L+2)/2 + im-L-1
cm[:,:,bind: bind + L-m+1] = ((-1)**m)*cm[:,:,im: im+L+1-m]
for m in range(m0,M+1):
dephm = (m-m0)*alpha
if m == m0:
im = m*(2*L+3-m)/2
cm[:,:,im: im+L+1-m] = (am/(m-m0+1))*c[:,:,im0 : im0+L-m+1]*np.exp(1j*dephm)
bind = (1+L)*(L+2)/2 + im -L-1
cm[:,:,bind: bind + L-m+1] = ((-1)**m)*(cm[:,:,im: im+L+1-m])
else:
im = m*(2*L+3-m)/2
cm[:,:,im: im+L+1-m] = (am/(m-m0+1))*c[:,:,im0 : im0+L-m+1]*np.exp(1j*dephm)
bind = (1+L)*(L+2)/2 + im -L-1
cm[:,:,bind: bind + L-m+1] = ((-1)**m)*(cm[:,:,im: im+L+1-m])
cm[0:2] = c[0:2]
return cm
if (__name__=="__main__"):
doctest.testmod()
| mit |
Fuchai/Philosophy-Machine | amne_binding/binding_tree_analysis.py | 1 | 2395 | import os
import pickle
from sklearn import tree
import torch
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
import matplotlib.pyplot as plt
if os.path.isfile("god.file"):
outputs,coeffs=pickle.load(open("god.file","rb"))
else:
raise ("where is the file?")
# The tensors are really weird.
# I need to think about them later.
# Today I'm done. I implemented binding.py from zero. Enough work.
outputs=[i.data.unsqueeze(2) for i in outputs]
coeffs=[torch.cat(i,1) for i in coeffs]
outputs=torch.cat(outputs,0)
coeffs=torch.cat(coeffs,0)
outputs=outputs.cpu().numpy().squeeze()
coeffs=coeffs.data.cpu().numpy()
clf = tree.DecisionTreeClassifier()
predicted_labels=outputs.argmax(1)
one_hot=np.zeros(outputs.shape)
one_hot[np.arange(outputs.shape[0]),predicted_labels]=1
# I peeked into the coefficients.
# They are not vanilla.
# New hope.
# good practice on scikit also. wow. It's been two years.
# I hate scikit plotting. I would seriously export data to R and plot with ggplot2.
# TODO Which module is playing a bigger role?
f, axarr = plt.subplots(2, 2)
axarr[0, 0].hist(coeffs[0],bins=50)
axarr[0, 1].hist(coeffs[1],bins=50)
axarr[1, 0].hist(coeffs[2],bins=50)
axarr[1, 1].hist(coeffs[3],bins=50)
plt.show()
# what?
# okay.
# the features are not necessarily normalized.
# They must be normalized, because otherwise the coefficients cannot be interpreted.
# If not normalized, the features and coefficients have to figure out with each other
# what the norm is. Well, it's a hard thing to coordinate, and that's why the coeffs
# show multiple modes: modes are more predictable and coordinatable.
# after the batch_norm, all the coefficients hardly matter.
# two staged training might be necessary.
# let's train multiple modules, and fix them at the coefficients training stage?
# anyhow, the divergence of the coefficients is a desirable property
# TTODO Which module interacts with which labels?
# TTODO Which module interacts with other modules?
# questions are not valid.
# the bifur modules hardly differ.
# TODO Hold on, why are there three modes?
# it's okay if all coefficients show the same distribution
# as long as they depend on each other.
# it's possible that the discreteness is a desirable property
# it's very necessary that I use Gaussian kernel to analyze the datapoints. | apache-2.0 |
florian-f/sklearn | sklearn/neighbors/nearest_centroid.py | 4 | 5895 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD Style.
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..externals.six.moves import xrange
from ..metrics.pairwise import pairwise_distances
from ..utils.validation import check_arrays, atleast2d_or_csr
class NearestCentroid(BaseEstimator, ClassifierMixin):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Parameters
----------
metric: string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
shrink_threshold : float, optional (default = None)
Threshold for shrinking centroids to remove features.
Attributes
----------
`centroids_` : array-like, shape = [n_classes, n_features]
Centroid of each class
Examples
--------
>>> from sklearn.neighbors.nearest_centroid import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid(metric='euclidean', shrink_threshold=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf–idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_arrays(X, y, sparse_format="csr")
if sp.issparse(X) and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
n_samples, n_features = X.shape
classes = np.unique(y)
self.classes_ = classes
n_classes = classes.size
if n_classes < 2:
raise ValueError('y has less than 2 classes')
# Mask mapping each class to it's members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
for i, cur_class in enumerate(classes):
center_mask = y == cur_class
if sp.issparse(X):
center_mask = np.where(center_mask)[0]
self.centroids_[i] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.array(X.mean(axis=0))[0]
# Number of clusters in each class.
nk = np.array([np.sum(classes == cur_class)
for cur_class in classes])
# m parameter for determining deviation
m = np.sqrt((1. / nk) + (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = np.array(np.power(X - self.centroids_[y], 2))
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
deviation[deviation < 0] = 0
deviation = np.multiply(deviation, signs)
# Now adjust the centroids using the deviation
msd = np.multiply(ms, deviation)
self.centroids_ = np.array([dataset_centroid_ + msd[i]
for i in xrange(n_classes)])
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
X = atleast2d_or_csr(X)
if not hasattr(self, "centroids_"):
raise AttributeError("Model has not been trained yet.")
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
windyuuy/opera | chromium/src/ppapi/native_client/tests/breakpad_crash_test/crash_dump_tester.py | 154 | 8545 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import tempfile
import time
script_dir = os.path.dirname(__file__)
sys.path.append(os.path.join(script_dir,
'../../tools/browser_tester'))
import browser_tester
import browsertester.browserlauncher
# This script extends browser_tester to check for the presence of
# Breakpad crash dumps.
# This reads a file of lines containing 'key:value' pairs.
# The file contains entries like the following:
# plat:Win32
# prod:Chromium
# ptype:nacl-loader
# rept:crash svc
def ReadDumpTxtFile(filename):
dump_info = {}
fh = open(filename, 'r')
for line in fh:
if ':' in line:
key, value = line.rstrip().split(':', 1)
dump_info[key] = value
fh.close()
return dump_info
def StartCrashService(browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, crash_service_exe,
skip_if_missing=False):
# Find crash_service.exe relative to chrome.exe. This is a bit icky.
browser_dir = os.path.dirname(browser_path)
crash_service_path = os.path.join(browser_dir, crash_service_exe)
if skip_if_missing and not os.path.exists(crash_service_path):
return
proc = subprocess.Popen([crash_service_path,
'--v=1', # Verbose output for debugging failures
'--dumps-dir=%s' % dumps_dir,
'--pipe-name=%s' % windows_pipe_name])
def Cleanup():
# Note that if the process has already exited, this will raise
# an 'Access is denied' WindowsError exception, but
# crash_service.exe is not supposed to do this and such
# behaviour should make the test fail.
proc.terminate()
status = proc.wait()
sys.stdout.write('crash_dump_tester: %s exited with status %s\n'
% (crash_service_exe, status))
cleanup_funcs.append(Cleanup)
def ListPathsInDir(dir_path):
if os.path.exists(dir_path):
return [os.path.join(dir_path, name)
for name in os.listdir(dir_path)]
else:
return []
def GetDumpFiles(dumps_dirs):
all_files = [filename
for dumps_dir in dumps_dirs
for filename in ListPathsInDir(dumps_dir)]
sys.stdout.write('crash_dump_tester: Found %i files\n' % len(all_files))
for dump_file in all_files:
sys.stdout.write(' %s (size %i)\n'
% (dump_file, os.stat(dump_file).st_size))
return [dump_file for dump_file in all_files
if dump_file.endswith('.dmp')]
def Main(cleanup_funcs):
parser = browser_tester.BuildArgParser()
parser.add_option('--expected_crash_dumps', dest='expected_crash_dumps',
type=int, default=0,
help='The number of crash dumps that we should expect')
parser.add_option('--expected_process_type_for_crash',
dest='expected_process_type_for_crash',
type=str, default='nacl-loader',
help='The type of Chromium process that we expect the '
'crash dump to be for')
# Ideally we would just query the OS here to find out whether we are
# running x86-32 or x86-64 Windows, but Python's win32api module
# does not contain a wrapper for GetNativeSystemInfo(), which is
# what NaCl uses to check this, or for IsWow64Process(), which is
# what Chromium uses. Instead, we just rely on the build system to
# tell us.
parser.add_option('--win64', dest='win64', action='store_true',
help='Pass this if we are running tests for x86-64 Windows')
options, args = parser.parse_args()
temp_dir = tempfile.mkdtemp(prefix='nacl_crash_dump_tester_')
def CleanUpTempDir():
browsertester.browserlauncher.RemoveDirectory(temp_dir)
cleanup_funcs.append(CleanUpTempDir)
# To get a guaranteed unique pipe name, use the base name of the
# directory we just created.
windows_pipe_name = r'\\.\pipe\%s_crash_service' % os.path.basename(temp_dir)
# This environment variable enables Breakpad crash dumping in
# non-official builds of Chromium.
os.environ['CHROME_HEADLESS'] = '1'
if sys.platform == 'win32':
dumps_dir = temp_dir
# Override the default (global) Windows pipe name that Chromium will
# use for out-of-process crash reporting.
os.environ['CHROME_BREAKPAD_PIPE_NAME'] = windows_pipe_name
# Launch the x86-32 crash service so that we can handle crashes in
# the browser process.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service.exe')
if options.win64:
# Launch the x86-64 crash service so that we can handle crashes
# in the NaCl loader process (nacl64.exe).
# Skip if missing, since in win64 builds crash_service.exe is 64-bit
# and crash_service64.exe does not exist.
StartCrashService(options.browser_path, dumps_dir, windows_pipe_name,
cleanup_funcs, 'crash_service64.exe',
skip_if_missing=True)
# We add a delay because there is probably a race condition:
# crash_service.exe might not have finished doing
# CreateNamedPipe() before NaCl does a crash dump and tries to
# connect to that pipe.
# TODO(mseaborn): We could change crash_service.exe to report when
# it has successfully created the named pipe.
time.sleep(1)
elif sys.platform == 'darwin':
dumps_dir = temp_dir
os.environ['BREAKPAD_DUMP_LOCATION'] = dumps_dir
elif sys.platform.startswith('linux'):
# The "--user-data-dir" option is not effective for the Breakpad
# setup in Linux Chromium, because Breakpad is initialized before
# "--user-data-dir" is read. So we set HOME to redirect the crash
# dumps to a temporary directory.
home_dir = temp_dir
os.environ['HOME'] = home_dir
options.enable_crash_reporter = True
result = browser_tester.Run(options.url, options)
# Find crash dump results.
if sys.platform.startswith('linux'):
# Look in "~/.config/*/Crash Reports". This will find crash
# reports under ~/.config/chromium or ~/.config/google-chrome, or
# under other subdirectories in case the branding is changed.
dumps_dirs = [os.path.join(path, 'Crash Reports')
for path in ListPathsInDir(os.path.join(home_dir, '.config'))]
else:
dumps_dirs = [dumps_dir]
dmp_files = GetDumpFiles(dumps_dirs)
failed = False
msg = ('crash_dump_tester: ERROR: Got %i crash dumps but expected %i\n' %
(len(dmp_files), options.expected_crash_dumps))
if len(dmp_files) != options.expected_crash_dumps:
sys.stdout.write(msg)
failed = True
for dump_file in dmp_files:
# Sanity check: Make sure dumping did not fail after opening the file.
msg = 'crash_dump_tester: ERROR: Dump file is empty\n'
if os.stat(dump_file).st_size == 0:
sys.stdout.write(msg)
failed = True
# On Windows, the crash dumps should come in pairs of a .dmp and
# .txt file.
if sys.platform == 'win32':
second_file = dump_file[:-4] + '.txt'
msg = ('crash_dump_tester: ERROR: File %r is missing a corresponding '
'%r file\n' % (dump_file, second_file))
if not os.path.exists(second_file):
sys.stdout.write(msg)
failed = True
continue
# Check that the crash dump comes from the NaCl process.
dump_info = ReadDumpTxtFile(second_file)
if 'ptype' in dump_info:
msg = ('crash_dump_tester: ERROR: Unexpected ptype value: %r != %r\n'
% (dump_info['ptype'], options.expected_process_type_for_crash))
if dump_info['ptype'] != options.expected_process_type_for_crash:
sys.stdout.write(msg)
failed = True
else:
sys.stdout.write('crash_dump_tester: ERROR: Missing ptype field\n')
failed = True
# TODO(mseaborn): Ideally we would also check that a backtrace
# containing an expected function name can be extracted from the
# crash dump.
if failed:
sys.stdout.write('crash_dump_tester: FAILED\n')
result = 1
else:
sys.stdout.write('crash_dump_tester: PASSED\n')
return result
def MainWrapper():
cleanup_funcs = []
try:
return Main(cleanup_funcs)
finally:
for func in cleanup_funcs:
func()
if __name__ == '__main__':
sys.exit(MainWrapper())
| bsd-3-clause |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/matplotlib/streamplot.py | 1 | 19062 | """
Streamline plotting for 2D vector fields.
"""
import numpy as np
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as mcolors
import matplotlib.collections as mcollections
import matplotlib.patches as patches
__all__ = ['streamplot']
def streamplot(axes, x, y, u, v, density=1, linewidth=None, color=None,
cmap=None, norm=None, arrowsize=1, arrowstyle='-|>',
minlength=0.1, transform=None):
"""Draws streamlines of a vector flow.
*x*, *y* : 1d arrays
an *evenly spaced* grid.
*u*, *v* : 2d arrays
x and y-velocities. Number of rows should match length of y, and
the number of columns should match x.
*density* : float or 2-tuple
Controls the closeness of streamlines. When `density = 1`, the domain
is divided into a 25x25 grid---*density* linearly scales this grid.
Each cell in the grid can have, at most, one traversing streamline.
For different densities in each direction, use [density_x, density_y].
*linewidth* : numeric or 2d array
vary linewidth when given a 2d array with the same shape as velocities.
*color* : matplotlib color code, or 2d array
Streamline color. When given an array with the same shape as
velocities, *color* values are converted to colors using *cmap*.
*cmap* : :class:`~matplotlib.colors.Colormap`
Colormap used to plot streamlines and arrows. Only necessary when using
an array input for *color*.
*norm* : :class:`~matplotlib.colors.Normalize`
Normalize object used to scale luminance data to 0, 1. If None, stretch
(min, max) to (0, 1). Only necessary when *color* is an array.
*arrowsize* : float
Factor scale arrow size.
*arrowstyle* : str
Arrow style specification.
See :class:`~matplotlib.patches.FancyArrowPatch`.
*minlength* : float
Minimum length of streamline in axes coordinates.
Returns:
*stream_container* : StreamplotSet
Container object with attributes
- lines: `matplotlib.collections.LineCollection` of streamlines
- arrows: collection of `matplotlib.patches.FancyArrowPatch`
objects representing arrows half-way along stream
lines.
This container will probably change in the future to allow changes
to the colormap, alpha, etc. for both lines and arrows, but these
changes should be backward compatible.
"""
grid = Grid(x, y)
mask = StreamMask(density)
dmap = DomainMap(grid, mask)
# default to data coordinates
if transform is None:
transform = axes.transData
if color is None:
color = next(axes._get_lines.color_cycle)
if linewidth is None:
linewidth = matplotlib.rcParams['lines.linewidth']
line_kw = {}
arrow_kw = dict(arrowstyle=arrowstyle, mutation_scale=10 * arrowsize)
use_multicolor_lines = isinstance(color, np.ndarray)
if use_multicolor_lines:
assert color.shape == grid.shape
line_colors = []
if np.any(np.isnan(color)):
color = np.ma.array(color, mask=np.isnan(color))
else:
line_kw['color'] = color
arrow_kw['color'] = color
if isinstance(linewidth, np.ndarray):
assert linewidth.shape == grid.shape
line_kw['linewidth'] = []
else:
line_kw['linewidth'] = linewidth
arrow_kw['linewidth'] = linewidth
## Sanity checks.
assert u.shape == grid.shape
assert v.shape == grid.shape
if np.any(np.isnan(u)):
u = np.ma.array(u, mask=np.isnan(u))
if np.any(np.isnan(v)):
v = np.ma.array(v, mask=np.isnan(v))
integrate = get_integrator(u, v, dmap, minlength)
trajectories = []
for xm, ym in _gen_starting_points(mask.shape):
if mask[ym, xm] == 0:
xg, yg = dmap.mask2grid(xm, ym)
t = integrate(xg, yg)
if t is not None:
trajectories.append(t)
if use_multicolor_lines:
if norm is None:
norm = mcolors.Normalize(color.min(), color.max())
if cmap is None:
cmap = cm.get_cmap(matplotlib.rcParams['image.cmap'])
else:
cmap = cm.get_cmap(cmap)
streamlines = []
arrows = []
for t in trajectories:
tgx = np.array(t[0])
tgy = np.array(t[1])
# Rescale from grid-coordinates to data-coordinates.
tx = np.array(t[0]) * grid.dx + grid.x_origin
ty = np.array(t[1]) * grid.dy + grid.y_origin
points = np.transpose([tx, ty]).reshape(-1, 1, 2)
streamlines.extend(np.hstack([points[:-1], points[1:]]))
# Add arrows half way along each trajectory.
s = np.cumsum(np.sqrt(np.diff(tx) ** 2 + np.diff(ty) ** 2))
n = np.searchsorted(s, s[-1] / 2.)
arrow_tail = (tx[n], ty[n])
arrow_head = (np.mean(tx[n:n + 2]), np.mean(ty[n:n + 2]))
if isinstance(linewidth, np.ndarray):
line_widths = interpgrid(linewidth, tgx, tgy)[:-1]
line_kw['linewidth'].extend(line_widths)
arrow_kw['linewidth'] = line_widths[n]
if use_multicolor_lines:
color_values = interpgrid(color, tgx, tgy)[:-1]
line_colors.extend(color_values)
arrow_kw['color'] = cmap(norm(color_values[n]))
p = patches.FancyArrowPatch(arrow_tail,
arrow_head,
transform=transform,
**arrow_kw)
axes.add_patch(p)
arrows.append(p)
lc = mcollections.LineCollection(streamlines,
transform=transform,
**line_kw)
if use_multicolor_lines:
lc.set_array(np.asarray(line_colors))
lc.set_cmap(cmap)
lc.set_norm(norm)
axes.add_collection(lc)
axes.update_datalim(((x.min(), y.min()), (x.max(), y.max())))
axes.autoscale_view(tight=True)
ac = matplotlib.collections.PatchCollection(arrows)
stream_container = StreamplotSet(lc, ac)
return stream_container
class StreamplotSet(object):
def __init__(self, lines, arrows, **kwargs):
self.lines = lines
self.arrows = arrows
# Coordinate definitions
#========================
class DomainMap(object):
"""Map representing different coordinate systems.
Coordinate definitions:
* axes-coordinates goes from 0 to 1 in the domain.
* data-coordinates are specified by the input x-y coordinates.
* grid-coordinates goes from 0 to N and 0 to M for an N x M grid,
where N and M match the shape of the input data.
* mask-coordinates goes from 0 to N and 0 to M for an N x M mask,
where N and M are user-specified to control the density of streamlines.
This class also has methods for adding trajectories to the StreamMask.
Before adding a trajectory, run `start_trajectory` to keep track of regions
crossed by a given trajectory. Later, if you decide the trajectory is bad
(e.g., if the trajectory is very short) just call `undo_trajectory`.
"""
def __init__(self, grid, mask):
self.grid = grid
self.mask = mask
## Constants for conversion between grid- and mask-coordinates
self.x_grid2mask = float(mask.nx - 1) / grid.nx
self.y_grid2mask = float(mask.ny - 1) / grid.ny
self.x_mask2grid = 1. / self.x_grid2mask
self.y_mask2grid = 1. / self.y_grid2mask
self.x_data2grid = grid.nx / grid.width
self.y_data2grid = grid.ny / grid.height
def grid2mask(self, xi, yi):
"""Return nearest space in mask-coords from given grid-coords."""
return int((xi * self.x_grid2mask) + 0.5), \
int((yi * self.y_grid2mask) + 0.5)
def mask2grid(self, xm, ym):
return xm * self.x_mask2grid, ym * self.y_mask2grid
def data2grid(self, xd, yd):
return xd * self.x_data2grid, yd * self.y_data2grid
def start_trajectory(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._start_trajectory(xm, ym)
def reset_start_point(self, xg, yg):
xm, ym = self.grid2mask(xg, yg)
self.mask._current_xy = (xm, ym)
def update_trajectory(self, xg, yg):
if not self.grid.within_grid(xg, yg):
raise InvalidIndexError
xm, ym = self.grid2mask(xg, yg)
self.mask._update_trajectory(xm, ym)
def undo_trajectory(self):
self.mask._undo_trajectory()
class Grid(object):
"""Grid of data."""
def __init__(self, x, y):
if len(x.shape) == 2:
x_row = x[0]
assert np.allclose(x_row, x)
x = x_row
else:
assert len(x.shape) == 1
if len(y.shape) == 2:
y_col = y[:, 0]
assert np.allclose(y_col, y.T)
y = y_col
else:
assert len(y.shape) == 1
self.nx = len(x)
self.ny = len(y)
self.dx = x[1] - x[0]
self.dy = y[1] - y[0]
self.x_origin = x[0]
self.y_origin = y[0]
self.width = x[-1] - x[0]
self.height = y[-1] - y[0]
@property
def shape(self):
return self.ny, self.nx
def within_grid(self, xi, yi):
"""Return True if point is a valid index of grid."""
# Note that xi/yi can be floats; so, for example, we can't simply check
# `xi < self.nx` since `xi` can be `self.nx - 1 < xi < self.nx`
return xi >= 0 and xi <= self.nx - 1 and yi >= 0 and yi <= self.ny - 1
class StreamMask(object):
"""Mask to keep track of discrete regions crossed by streamlines.
The resolution of this grid determines the approximate spacing between
trajectories. Streamlines are only allowed to pass through zeroed cells:
When a streamline enters a cell, that cell is set to 1, and no new
streamlines are allowed to enter.
"""
def __init__(self, density):
if np.isscalar(density):
assert density > 0
self.nx = self.ny = int(30 * density)
else:
assert len(density) == 2
self.nx = int(25 * density[0])
self.ny = int(25 * density[1])
self._mask = np.zeros((self.ny, self.nx))
self.shape = self._mask.shape
self._current_xy = None
def __getitem__(self, *args):
return self._mask.__getitem__(*args)
def _start_trajectory(self, xm, ym):
"""Start recording streamline trajectory"""
self._traj = []
self._update_trajectory(xm, ym)
def _undo_trajectory(self):
"""Remove current trajectory from mask"""
for t in self._traj:
self._mask.__setitem__(t, 0)
def _update_trajectory(self, xm, ym):
"""Update current trajectory position in mask.
If the new position has already been filled, raise `InvalidIndexError`.
"""
if self._current_xy != (xm, ym):
if self[ym, xm] == 0:
self._traj.append((ym, xm))
self._mask[ym, xm] = 1
self._current_xy = (xm, ym)
else:
raise InvalidIndexError
class InvalidIndexError(Exception):
pass
class TerminateTrajectory(Exception):
pass
# Integrator definitions
#========================
def get_integrator(u, v, dmap, minlength):
# rescale velocity onto grid-coordinates for integrations.
u, v = dmap.data2grid(u, v)
# speed (path length) will be in axes-coordinates
u_ax = u / dmap.grid.nx
v_ax = v / dmap.grid.ny
speed = np.ma.sqrt(u_ax ** 2 + v_ax ** 2)
def forward_time(xi, yi):
ds_dt = interpgrid(speed, xi, yi)
if ds_dt == 0:
raise TerminateTrajectory()
dt_ds = 1. / ds_dt
ui = interpgrid(u, xi, yi)
vi = interpgrid(v, xi, yi)
return ui * dt_ds, vi * dt_ds
def backward_time(xi, yi):
dxi, dyi = forward_time(xi, yi)
return -dxi, -dyi
def integrate(x0, y0):
"""Return x, y grid-coordinates of trajectory based on starting point.
Integrate both forward and backward in time from starting point in
grid coordinates.
Integration is terminated when a trajectory reaches a domain boundary
or when it crosses into an already occupied cell in the StreamMask. The
resulting trajectory is None if it is shorter than `minlength`.
"""
dmap.start_trajectory(x0, y0)
sf, xf_traj, yf_traj = _integrate_rk12(x0, y0, dmap, forward_time)
dmap.reset_start_point(x0, y0)
sb, xb_traj, yb_traj = _integrate_rk12(x0, y0, dmap, backward_time)
# combine forward and backward trajectories
stotal = sf + sb
x_traj = xb_traj[::-1] + xf_traj[1:]
y_traj = yb_traj[::-1] + yf_traj[1:]
if stotal > minlength:
return x_traj, y_traj
else: # reject short trajectories
dmap.undo_trajectory()
return None
return integrate
def _integrate_rk12(x0, y0, dmap, f):
"""2nd-order Runge-Kutta algorithm with adaptive step size.
This method is also referred to as the improved Euler's method, or Heun's
method. This method is favored over higher-order methods because:
1. To get decent looking trajectories and to sample every mask cell
on the trajectory we need a small timestep, so a lower order
solver doesn't hurt us unless the data is *very* high resolution.
In fact, for cases where the user inputs
data smaller or of similar grid size to the mask grid, the higher
order corrections are negligible because of the very fast linear
interpolation used in `interpgrid`.
2. For high resolution input data (i.e. beyond the mask
resolution), we must reduce the timestep. Therefore, an adaptive
timestep is more suited to the problem as this would be very hard
to judge automatically otherwise.
This integrator is about 1.5 - 2x as fast as both the RK4 and RK45
solvers in most setups on my machine. I would recommend removing the
other two to keep things simple.
"""
## This error is below that needed to match the RK4 integrator. It
## is set for visual reasons -- too low and corners start
## appearing ugly and jagged. Can be tuned.
maxerror = 0.003
## This limit is important (for all integrators) to avoid the
## trajectory skipping some mask cells. We could relax this
## condition if we use the code which is commented out below to
## increment the location gradually. However, due to the efficient
## nature of the interpolation, this doesn't boost speed by much
## for quite a bit of complexity.
maxds = min(1. / dmap.mask.nx, 1. / dmap.mask.ny, 0.1)
ds = maxds
stotal = 0
xi = x0
yi = y0
xf_traj = []
yf_traj = []
while dmap.grid.within_grid(xi, yi):
xf_traj.append(xi)
yf_traj.append(yi)
try:
k1x, k1y = f(xi, yi)
k2x, k2y = f(xi + ds * k1x,
yi + ds * k1y)
except IndexError:
# Out of the domain on one of the intermediate integration steps.
# Take an Euler step to the boundary to improve neatness.
ds, xf_traj, yf_traj = _euler_step(xf_traj, yf_traj, dmap, f)
stotal += ds
break
except TerminateTrajectory:
break
dx1 = ds * k1x
dy1 = ds * k1y
dx2 = ds * 0.5 * (k1x + k2x)
dy2 = ds * 0.5 * (k1y + k2y)
nx, ny = dmap.grid.shape
# Error is normalized to the axes coordinates
error = np.sqrt(((dx2 - dx1) / nx) ** 2 + ((dy2 - dy1) / ny) ** 2)
# Only save step if within error tolerance
if error < maxerror:
xi += dx2
yi += dy2
try:
dmap.update_trajectory(xi, yi)
except InvalidIndexError:
break
if (stotal + ds) > 2:
break
stotal += ds
# recalculate stepsize based on step error
if error == 0:
ds = maxds
else:
ds = min(maxds, 0.85 * ds * (maxerror / error) ** 0.5)
return stotal, xf_traj, yf_traj
def _euler_step(xf_traj, yf_traj, dmap, f):
"""Simple Euler integration step that extends streamline to boundary."""
ny, nx = dmap.grid.shape
xi = xf_traj[-1]
yi = yf_traj[-1]
cx, cy = f(xi, yi)
if cx == 0:
dsx = np.inf
elif cx < 0:
dsx = xi / -cx
else:
dsx = (nx - 1 - xi) / cx
if cy == 0:
dsy = np.inf
elif cy < 0:
dsy = yi / -cy
else:
dsy = (ny - 1 - yi) / cy
ds = min(dsx, dsy)
xf_traj.append(xi + cx * ds)
yf_traj.append(yi + cy * ds)
return ds, xf_traj, yf_traj
# Utility functions
#========================
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(np.int)
y = yi.astype(np.int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = np.int(xi)
y = np.int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2):
xn = x
else:
xn = x + 1
if y == (Ny - 2):
yn = y
else:
yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
def _gen_starting_points(shape):
"""Yield starting points for streamlines.
Trying points on the boundary first gives higher quality streamlines.
This algorithm starts with a point on the mask corner and spirals inward.
This algorithm is inefficient, but fast compared to rest of streamplot.
"""
ny, nx = shape
xfirst = 0
yfirst = 1
xlast = nx - 1
ylast = ny - 1
x, y = 0, 0
i = 0
direction = 'right'
for i in range(nx * ny):
yield x, y
if direction == 'right':
x += 1
if x >= xlast:
xlast -= 1
direction = 'up'
elif direction == 'up':
y += 1
if y >= ylast:
ylast -= 1
direction = 'left'
elif direction == 'left':
x -= 1
if x <= xfirst:
xfirst += 1
direction = 'down'
elif direction == 'down':
y -= 1
if y <= yfirst:
yfirst += 1
direction = 'right'
| gpl-3.0 |
mikebenfield/scikit-learn | benchmarks/bench_mnist.py | 45 | 6977 | """
=======================
MNIST dataset benchmark
=======================
Benchmark on the MNIST dataset. The dataset comprises 70,000 samples
and 784 features. Here, we consider the task of predicting
10 classes - digits from 0 to 9 from their raw images. By contrast to the
covertype dataset, the feature space is homogenous.
Example of output :
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
------------------------------------------------------------
MLP_adam 53.46s 0.11s 0.0224
Nystroem-SVM 112.97s 0.92s 0.0228
MultilayerPerceptron 24.33s 0.14s 0.0287
ExtraTrees 42.99s 0.57s 0.0294
RandomForest 42.70s 0.49s 0.0318
SampledRBF-SVM 135.81s 0.56s 0.0486
LinearRegression-SAG 16.67s 0.06s 0.0824
CART 20.69s 0.02s 0.1219
dummy 0.00s 0.01s 0.8973
"""
from __future__ import division, print_function
# Author: Issam H. Laradji
# Arnaud Joly <[email protected]>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_mldata
from sklearn.datasets import get_data_home
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.dummy import DummyClassifier
from sklearn.externals.joblib import Memory
from sklearn.kernel_approximation import Nystroem
from sklearn.kernel_approximation import RBFSampler
from sklearn.metrics import zero_one_loss
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.utils import check_array
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'mnist_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='F'):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
# Load dataset
print("Loading dataset...")
data = fetch_mldata('MNIST original')
X = check_array(data['data'], dtype=dtype, order=order)
y = data["target"]
# Normalize features
X = X / 255
# Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 60000
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
return X_train, X_test, y_train, y_test
ESTIMATORS = {
"dummy": DummyClassifier(),
'CART': DecisionTreeClassifier(),
'ExtraTrees': ExtraTreesClassifier(n_estimators=100),
'RandomForest': RandomForestClassifier(n_estimators=100),
'Nystroem-SVM': make_pipeline(
Nystroem(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'SampledRBF-SVM': make_pipeline(
RBFSampler(gamma=0.015, n_components=1000), LinearSVC(C=100)),
'LogisticRegression-SAG': LogisticRegression(solver='sag', tol=1e-1,
C=1e4),
'LogisticRegression-SAGA': LogisticRegression(solver='saga', tol=1e-1,
C=1e4),
'MultilayerPerceptron': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='sgd', learning_rate_init=0.2, momentum=0.9, verbose=1,
tol=1e-4, random_state=1),
'MLP-adam': MLPClassifier(
hidden_layer_sizes=(100, 100), max_iter=400, alpha=1e-4,
solver='adam', learning_rate_init=0.001, verbose=1,
tol=1e-4, random_state=1)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['ExtraTrees', 'Nystroem-SVM'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=0, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(order=args["order"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (size=%dMB)" % ("number of train samples:".ljust(25),
X_train.shape[0], int(X_train.nbytes / 1e6)))
print("%s %d (size=%dMB)" % ("number of test samples:".ljust(25),
X_test.shape[0], int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("{0: <24} {1: >10} {2: >11} {3: >12}"
"".format("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 60)
for name in sorted(args["classifiers"], key=error.get):
print("{0: <23} {1: >10.2f}s {2: >10.2f}s {3: >12.4f}"
"".format(name, train_time[name], test_time[name], error[name]))
print()
| bsd-3-clause |
PrashntS/scikit-learn | sklearn/cluster/setup.py | 263 | 1449 | # Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
aminert/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
MartialD/hyperspy | hyperspy/drawing/marker.py | 4 | 9439 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import matplotlib.pyplot as plt
from hyperspy.events import Event, Events
import hyperspy.drawing._markers as markers
import logging
_logger = logging.getLogger(__name__)
class MarkerBase(object):
"""Marker that can be added to the signal figure
Attributes
----------
marker_properties : dictionary
Accepts a dictionary of valid (i.e. recognized by mpl.plot)
containing valid line properties. In addition it understands
the keyword `type` that can take the following values:
{'line', 'text'}
"""
def __init__(self):
# Data attributes
self.data = None
self.axes_manager = None
self.ax = None
self.auto_update = True
# Properties
self.marker = None
self._marker_properties = {}
self.signal = None
self._plot_on_signal = True
self.name = ''
self.plot_marker = True
# Events
self.events = Events()
self.events.closed = Event("""
Event triggered when a marker is closed.
Arguments
---------
marker : Marker
The marker that was closed.
""", arguments=['obj'])
self._closing = False
def __deepcopy__(self, memo):
new_marker = dict2marker(
self._to_dictionary(),
self.name)
return new_marker
@property
def marker_properties(self):
return self._marker_properties
@marker_properties.setter
def marker_properties(self, kwargs):
for key, item in kwargs.items():
if item is None and key in self._marker_properties:
del self._marker_properties[key]
else:
self._marker_properties[key] = item
if self.marker is not None:
plt.setp(self.marker, **self.marker_properties)
self._render_figure()
def _to_dictionary(self):
marker_dict = {
'marker_properties': self.marker_properties,
'marker_type': self.__class__.__name__,
'plot_on_signal': self._plot_on_signal,
'data': {k: self.data[k][()].tolist() for k in (
'x1', 'x2', 'y1', 'y2', 'text', 'size')}
}
return marker_dict
def _get_data_shape(self):
data_shape = None
for key in ('x1', 'x2', 'y1', 'y2'):
ar = self.data[key][()]
if next(ar.flat) is not None:
data_shape = ar.shape
break
if data_shape is None:
raise ValueError("None of the coordinates have value")
else:
return data_shape
def set_marker_properties(self, **kwargs):
"""
Set the line_properties attribute using keyword
arguments.
"""
self.marker_properties = kwargs
def set_data(self, x1=None, y1=None,
x2=None, y2=None, text=None, size=None):
"""
Set data to the structured array. Each field of data should have
the same dimensions than the navigation axes. The other fields are
overwritten.
"""
self.data = np.array((np.array(x1), np.array(y1),
np.array(x2), np.array(y2),
np.array(text), np.array(size)),
dtype=[('x1', object), ('y1', object),
('x2', object), ('y2', object),
('text', object), ('size', object)])
self._is_marker_static()
def add_data(self, **kwargs):
"""
Add data to the structured array. Each field of data should have
the same dimensions than the navigation axes. The other fields are
not changed.
"""
if self.data is None:
self.set_data(**kwargs)
else:
for key in kwargs.keys():
self.data[key][()] = np.array(kwargs[key])
self._is_marker_static()
def isiterable(self, obj):
return not isinstance(obj, (str, bytes)) and hasattr(obj, '__iter__')
def _is_marker_static(self):
test = [self.isiterable(self.data[key].item()[()]) is False
for key in self.data.dtype.names]
if np.alltrue(test):
self.auto_update = False
else:
self.auto_update = True
def get_data_position(self, ind):
data = self.data
if data[ind].item()[()] is None:
return None
elif self.isiterable(data[ind].item()[()]) and self.auto_update:
if self.axes_manager is None:
return self.data['x1'].item().flatten()[0]
indices = self.axes_manager.indices[::-1]
return data[ind].item()[indices]
else:
return data[ind].item()[()]
def plot(self, render_figure=True):
"""
Plot a marker which has been added to a signal.
Parameters
----------
render_figure : bool, optional, default True
If True, will render the figure after adding the marker.
If False, the marker will be added to the plot, but will the figure
will not be rendered. This is useful when plotting many markers,
since rendering the figure after adding each marker will slow
things down.
"""
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self._plot_marker()
self.marker.set_animated(self.ax.figure.canvas.supports_blit)
if render_figure:
self._render_figure()
def _render_figure(self):
if self.ax.figure.canvas.supports_blit:
self.ax.hspy_fig._update_animated()
else:
self.ax.figure.canvas.draw_idle()
def close(self, render_figure=True):
"""Remove and disconnect the marker.
Parameters
----------
render_figure : bool, optional, default True
If True, the figure is rendered after removing the marker.
If False, the figure is not rendered after removing the marker.
This is useful when many markers are removed from a figure,
since rendering the figure after removing each marker will slow
things down.
"""
if self._closing:
return
self._closing = True
self.marker.remove()
self.events.closed.trigger(obj=self)
for f in self.events.closed.connected:
self.events.closed.disconnect(f)
if render_figure:
self._render_figure()
def dict2marker(marker_dict, marker_name):
marker_type = marker_dict['marker_type']
if marker_type == 'Point':
marker = markers.point.Point(0, 0)
elif marker_type == 'HorizontalLine':
marker = markers.horizontal_line.HorizontalLine(0)
elif marker_type == 'HorizontalLineSegment':
marker = markers.horizontal_line_segment.HorizontalLineSegment(0, 0, 0)
elif marker_type == 'LineSegment':
marker = markers.line_segment.LineSegment(0, 0, 0, 0)
elif marker_type == 'Rectangle':
marker = markers.rectangle.Rectangle(0, 0, 0, 0)
elif marker_type == 'Text':
marker = markers.text.Text(0, 0, "")
elif marker_type == 'VerticalLine':
marker = markers.vertical_line.VerticalLine(0)
elif marker_type == 'VerticalLineSegment':
marker = markers.vertical_line_segment.VerticalLineSegment(0, 0, 0)
else:
_log = logging.getLogger(__name__)
_log.warning(
"Marker {} with marker type {} "
"not recognized".format(marker_name, marker_type))
return(False)
marker.set_data(**marker_dict['data'])
marker.set_marker_properties(**marker_dict['marker_properties'])
marker._plot_on_signal = marker_dict['plot_on_signal']
marker.name = marker_name
return(marker)
def markers_metadata_dict_to_markers(metadata_markers_dict, axes_manager):
markers_dict = {}
for marker_name, m_dict in metadata_markers_dict.items():
try:
marker = dict2marker(m_dict, marker_name)
if marker is not False:
marker.axes_manager = axes_manager
markers_dict[marker_name] = marker
except Exception as expt:
_logger.warning(
"Marker {} could not be loaded, skipping it. "
"Error: {}".format(marker_name, expt))
return(markers_dict)
| gpl-3.0 |
xiaoxiamii/scikit-learn | sklearn/decomposition/tests/test_sparse_pca.py | 160 | 6028 | # Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_safe_multiprocessing_with_blas
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
| bsd-3-clause |
themrmax/scikit-learn | examples/covariance/plot_covariance_estimation.py | 99 | 5074 | """
=======================================================================
Shrinkage covariance estimation: LedoitWolf vs OAS and max-likelihood
=======================================================================
When working with covariance estimation, the usual approach is to use
a maximum likelihood estimator, such as the
:class:`sklearn.covariance.EmpiricalCovariance`. It is unbiased, i.e. it
converges to the true (population) covariance when given many
observations. However, it can also be beneficial to regularize it, in
order to reduce its variance; this, in turn, introduces some bias. This
example illustrates the simple regularization used in
:ref:`shrunk_covariance` estimators. In particular, it focuses on how to
set the amount of regularization, i.e. how to choose the bias-variance
trade-off.
Here we compare 3 approaches:
* Setting the parameter by cross-validating the likelihood on three folds
according to a grid of potential shrinkage parameters.
* A close formula proposed by Ledoit and Wolf to compute
the asymptotically optimal regularization parameter (minimizing a MSE
criterion), yielding the :class:`sklearn.covariance.LedoitWolf`
covariance estimate.
* An improvement of the Ledoit-Wolf shrinkage, the
:class:`sklearn.covariance.OAS`, proposed by Chen et al. Its
convergence is significantly better under the assumption that the data
are Gaussian, in particular for small samples.
To quantify estimation error, we plot the likelihood of unseen data for
different values of the shrinkage parameter. We also show the choices by
cross-validation, or with the LedoitWolf and OAS estimates.
Note that the maximum likelihood estimate corresponds to no shrinkage,
and thus performs poorly. The Ledoit-Wolf estimate performs really well,
as it is close to the optimal and is computational not costly. In this
example, the OAS estimate is a bit further away. Interestingly, both
approaches outperform cross-validation, which is significantly most
computationally costly.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.covariance import LedoitWolf, OAS, ShrunkCovariance, \
log_likelihood, empirical_covariance
from sklearn.model_selection import GridSearchCV
###############################################################################
# Generate sample data
n_features, n_samples = 40, 20
np.random.seed(42)
base_X_train = np.random.normal(size=(n_samples, n_features))
base_X_test = np.random.normal(size=(n_samples, n_features))
# Color samples
coloring_matrix = np.random.normal(size=(n_features, n_features))
X_train = np.dot(base_X_train, coloring_matrix)
X_test = np.dot(base_X_test, coloring_matrix)
###############################################################################
# Compute the likelihood on test data
# spanning a range of possible shrinkage coefficient values
shrinkages = np.logspace(-2, 0, 30)
negative_logliks = [-ShrunkCovariance(shrinkage=s).fit(X_train).score(X_test)
for s in shrinkages]
# under the ground-truth model, which we would not have access to in real
# settings
real_cov = np.dot(coloring_matrix.T, coloring_matrix)
emp_cov = empirical_covariance(X_train)
loglik_real = -log_likelihood(emp_cov, linalg.inv(real_cov))
###############################################################################
# Compare different approaches to setting the parameter
# GridSearch for an optimal shrinkage coefficient
tuned_parameters = [{'shrinkage': shrinkages}]
cv = GridSearchCV(ShrunkCovariance(), tuned_parameters)
cv.fit(X_train)
# Ledoit-Wolf optimal shrinkage coefficient estimate
lw = LedoitWolf()
loglik_lw = lw.fit(X_train).score(X_test)
# OAS coefficient estimate
oa = OAS()
loglik_oa = oa.fit(X_train).score(X_test)
###############################################################################
# Plot results
fig = plt.figure()
plt.title("Regularized covariance: likelihood and shrinkage coefficient")
plt.xlabel('Regularizaton parameter: shrinkage coefficient')
plt.ylabel('Error: negative log-likelihood on test data')
# range shrinkage curve
plt.loglog(shrinkages, negative_logliks, label="Negative log-likelihood")
plt.plot(plt.xlim(), 2 * [loglik_real], '--r',
label="Real covariance likelihood")
# adjust view
lik_max = np.amax(negative_logliks)
lik_min = np.amin(negative_logliks)
ymin = lik_min - 6. * np.log((plt.ylim()[1] - plt.ylim()[0]))
ymax = lik_max + 10. * np.log(lik_max - lik_min)
xmin = shrinkages[0]
xmax = shrinkages[-1]
# LW likelihood
plt.vlines(lw.shrinkage_, ymin, -loglik_lw, color='magenta',
linewidth=3, label='Ledoit-Wolf estimate')
# OAS likelihood
plt.vlines(oa.shrinkage_, ymin, -loglik_oa, color='purple',
linewidth=3, label='OAS estimate')
# best CV estimator likelihood
plt.vlines(cv.best_estimator_.shrinkage, ymin,
-cv.best_estimator_.score(X_test), color='cyan',
linewidth=3, label='Cross-validation best estimate')
plt.ylim(ymin, ymax)
plt.xlim(xmin, xmax)
plt.legend()
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
dsquareindia/scikit-learn | examples/classification/plot_classification_probability.py | 138 | 2871 | """
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting, and Gaussian process classification.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
kernel = 1.0 * RBF([1.0, 1.0]) # for GPC
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'),
'GPC': GaussianProcessClassifier(kernel)
}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
louispotok/pandas | pandas/io/formats/latex.py | 3 | 9390 | # -*- coding: utf-8 -*-
"""
Module for formatting output data in Latex.
"""
from __future__ import print_function
from pandas.core.index import MultiIndex
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.io.formats.format import TableFormatter
import numpy as np
class LatexFormatter(TableFormatter):
""" Used to render a DataFrame to a LaTeX tabular/longtable environment
output.
Parameters
----------
formatter : `DataFrameFormatter`
column_format : str, default None
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g 'rcl' for 3 columns
longtable : boolean, default False
Use a longtable environment instead of tabular.
See Also
--------
HTMLFormatter
"""
def __init__(self, formatter, column_format=None, longtable=False,
multicolumn=False, multicolumn_format=None, multirow=False):
self.fmt = formatter
self.frame = self.fmt.frame
self.bold_rows = self.fmt.kwds.get('bold_rows', False)
self.column_format = column_format
self.longtable = longtable
self.multicolumn = multicolumn
self.multicolumn_format = multicolumn_format
self.multirow = multirow
def write_result(self, buf):
"""
Render a DataFrame to a LaTeX tabular/longtable environment output.
"""
# string representation of the columns
if len(self.frame.columns) == 0 or len(self.frame.index) == 0:
info_line = (u('Empty {name}\nColumns: {col}\nIndex: {idx}')
.format(name=type(self.frame).__name__,
col=self.frame.columns,
idx=self.frame.index))
strcols = [[info_line]]
else:
strcols = self.fmt._to_str_columns()
def get_col_type(dtype):
if issubclass(dtype.type, np.number):
return 'r'
else:
return 'l'
# reestablish the MultiIndex that has been joined by _to_str_column
if self.fmt.index and isinstance(self.frame.index, MultiIndex):
out = self.frame.index.format(
adjoin=False, sparsify=self.fmt.sparsify,
names=self.fmt.has_index_names, na_rep=self.fmt.na_rep
)
# index.format will sparsify repeated entries with empty strings
# so pad these with some empty space
def pad_empties(x):
for pad in reversed(x):
if pad:
break
return [x[0]] + [i if i else ' ' * len(pad) for i in x[1:]]
out = (pad_empties(i) for i in out)
# Add empty spaces for each column level
clevels = self.frame.columns.nlevels
out = [[' ' * len(i[-1])] * clevels + i for i in out]
# Add the column names to the last index column
cnames = self.frame.columns.names
if any(cnames):
new_names = [i if i else '{}' for i in cnames]
out[self.frame.index.nlevels - 1][:clevels] = new_names
# Get rid of old multiindex column and add new ones
strcols = out + strcols[1:]
column_format = self.column_format
if column_format is None:
dtypes = self.frame.dtypes._values
column_format = ''.join(map(get_col_type, dtypes))
if self.fmt.index:
index_format = 'l' * self.frame.index.nlevels
column_format = index_format + column_format
elif not isinstance(column_format,
compat.string_types): # pragma: no cover
raise AssertionError('column_format must be str or unicode, '
'not {typ}'.format(typ=type(column_format)))
if not self.longtable:
buf.write('\\begin{{tabular}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
else:
buf.write('\\begin{{longtable}}{{{fmt}}}\n'
.format(fmt=column_format))
buf.write('\\toprule\n')
ilevels = self.frame.index.nlevels
clevels = self.frame.columns.nlevels
nlevels = clevels
if self.fmt.has_index_names and self.fmt.show_index_names:
nlevels += 1
strrows = list(zip(*strcols))
self.clinebuf = []
for i, row in enumerate(strrows):
if i == nlevels and self.fmt.header:
buf.write('\\midrule\n') # End of header
if self.longtable:
buf.write('\\endhead\n')
buf.write('\\midrule\n')
buf.write('\\multicolumn{{{n}}}{{r}}{{{{Continued on next '
'page}}}} \\\\\n'.format(n=len(row)))
buf.write('\\midrule\n')
buf.write('\\endfoot\n\n')
buf.write('\\bottomrule\n')
buf.write('\\endlastfoot\n')
if self.fmt.kwds.get('escape', True):
# escape backslashes first
crow = [(x.replace('\\', '\\textbackslash ')
.replace('_', '\\_')
.replace('%', '\\%').replace('$', '\\$')
.replace('#', '\\#').replace('{', '\\{')
.replace('}', '\\}').replace('~', '\\textasciitilde ')
.replace('^', '\\textasciicircum ')
.replace('&', '\\&')
if (x and x != '{}') else '{}') for x in row]
else:
crow = [x if x else '{}' for x in row]
if self.bold_rows and self.fmt.index:
# bold row labels
crow = ['\\textbf{{{x}}}'.format(x=x)
if j < ilevels and x.strip() not in ['', '{}'] else x
for j, x in enumerate(crow)]
if i < clevels and self.fmt.header and self.multicolumn:
# sum up columns to multicolumns
crow = self._format_multicolumn(crow, ilevels)
if (i >= nlevels and self.fmt.index and self.multirow and
ilevels > 1):
# sum up rows to multirows
crow = self._format_multirow(crow, ilevels, i, strrows)
buf.write(' & '.join(crow))
buf.write(' \\\\\n')
if self.multirow and i < len(strrows) - 1:
self._print_cline(buf, i, len(strcols))
if not self.longtable:
buf.write('\\bottomrule\n')
buf.write('\\end{tabular}\n')
else:
buf.write('\\end{longtable}\n')
def _format_multicolumn(self, row, ilevels):
r"""
Combine columns belonging to a group to a single multicolumn entry
according to self.multicolumn_format
e.g.:
a & & & b & c &
will become
\multicolumn{3}{l}{a} & b & \multicolumn{2}{l}{c}
"""
row2 = list(row[:ilevels])
ncol = 1
coltext = ''
def append_col():
# write multicolumn if needed
if ncol > 1:
row2.append('\\multicolumn{{{ncol:d}}}{{{fmt:s}}}{{{txt:s}}}'
.format(ncol=ncol, fmt=self.multicolumn_format,
txt=coltext.strip()))
# don't modify where not needed
else:
row2.append(coltext)
for c in row[ilevels:]:
# if next col has text, write the previous
if c.strip():
if coltext:
append_col()
coltext = c
ncol = 1
# if not, add it to the previous multicolumn
else:
ncol += 1
# write last column name
if coltext:
append_col()
return row2
def _format_multirow(self, row, ilevels, i, rows):
r"""
Check following rows, whether row should be a multirow
e.g.: becomes:
a & 0 & \multirow{2}{*}{a} & 0 &
& 1 & & 1 &
b & 0 & \cline{1-2}
b & 0 &
"""
for j in range(ilevels):
if row[j].strip():
nrow = 1
for r in rows[i + 1:]:
if not r[j].strip():
nrow += 1
else:
break
if nrow > 1:
# overwrite non-multirow entry
row[j] = '\\multirow{{{nrow:d}}}{{*}}{{{row:s}}}'.format(
nrow=nrow, row=row[j].strip())
# save when to end the current block with \cline
self.clinebuf.append([i + nrow - 1, j + 1])
return row
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'
.format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i]
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/plot_kernel_ridge_regression.py | 230 | 6222 | """
=============================================
Comparison of kernel ridge regression and SVR
=============================================
Both kernel ridge regression (KRR) and SVR learn a non-linear function by
employing the kernel trick, i.e., they learn a linear function in the space
induced by the respective kernel which corresponds to a non-linear function in
the original space. They differ in the loss functions (ridge versus
epsilon-insensitive loss). In contrast to SVR, fitting a KRR can be done in
closed-form and is typically faster for medium-sized datasets. On the other
hand, the learned model is non-sparse and thus slower than SVR at
prediction-time.
This example illustrates both methods on an artificial dataset, which
consists of a sinusoidal target function and strong noise added to every fifth
datapoint. The first figure compares the learned model of KRR and SVR when both
complexity/regularization and bandwidth of the RBF kernel are optimized using
grid-search. The learned functions are very similar; however, fitting KRR is
approx. seven times faster than fitting SVR (both with grid-search). However,
prediction of 100000 target values is more than tree times faster with SVR
since it has learned a sparse model using only approx. 1/3 of the 100 training
datapoints as support vectors.
The next figure compares the time for fitting and prediction of KRR and SVR for
different sizes of the training set. Fitting KRR is faster than SVR for medium-
sized training sets (less than 1000 samples); however, for larger training sets
SVR scales better. With regard to prediction time, SVR is faster than
KRR for all sizes of the training set because of the learned sparse
solution. Note that the degree of sparsity and thus the prediction time depends
on the parameters epsilon and C of the SVR.
"""
# Authors: Jan Hendrik Metzen <[email protected]>
# License: BSD 3 clause
from __future__ import division
import time
import numpy as np
from sklearn.svm import SVR
from sklearn.grid_search import GridSearchCV
from sklearn.learning_curve import learning_curve
from sklearn.kernel_ridge import KernelRidge
import matplotlib.pyplot as plt
rng = np.random.RandomState(0)
#############################################################################
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
X_plot = np.linspace(0, 5, 100000)[:, None]
#############################################################################
# Fit regression model
train_size = 100
svr = GridSearchCV(SVR(kernel='rbf', gamma=0.1), cv=5,
param_grid={"C": [1e0, 1e1, 1e2, 1e3],
"gamma": np.logspace(-2, 2, 5)})
kr = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=5,
param_grid={"alpha": [1e0, 0.1, 1e-2, 1e-3],
"gamma": np.logspace(-2, 2, 5)})
t0 = time.time()
svr.fit(X[:train_size], y[:train_size])
svr_fit = time.time() - t0
print("SVR complexity and bandwidth selected and model fitted in %.3f s"
% svr_fit)
t0 = time.time()
kr.fit(X[:train_size], y[:train_size])
kr_fit = time.time() - t0
print("KRR complexity and bandwidth selected and model fitted in %.3f s"
% kr_fit)
sv_ratio = svr.best_estimator_.support_.shape[0] / train_size
print("Support vector ratio: %.3f" % sv_ratio)
t0 = time.time()
y_svr = svr.predict(X_plot)
svr_predict = time.time() - t0
print("SVR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], svr_predict))
t0 = time.time()
y_kr = kr.predict(X_plot)
kr_predict = time.time() - t0
print("KRR prediction for %d inputs in %.3f s"
% (X_plot.shape[0], kr_predict))
#############################################################################
# look at the results
sv_ind = svr.best_estimator_.support_
plt.scatter(X[sv_ind], y[sv_ind], c='r', s=50, label='SVR support vectors')
plt.scatter(X[:100], y[:100], c='k', label='data')
plt.hold('on')
plt.plot(X_plot, y_svr, c='r',
label='SVR (fit: %.3fs, predict: %.3fs)' % (svr_fit, svr_predict))
plt.plot(X_plot, y_kr, c='g',
label='KRR (fit: %.3fs, predict: %.3fs)' % (kr_fit, kr_predict))
plt.xlabel('data')
plt.ylabel('target')
plt.title('SVR versus Kernel Ridge')
plt.legend()
# Visualize training and prediction time
plt.figure()
# Generate sample data
X = 5 * rng.rand(10000, 1)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(X.shape[0]/5))
sizes = np.logspace(1, 4, 7)
for name, estimator in {"KRR": KernelRidge(kernel='rbf', alpha=0.1,
gamma=10),
"SVR": SVR(kernel='rbf', C=1e1, gamma=10)}.items():
train_time = []
test_time = []
for train_test_size in sizes:
t0 = time.time()
estimator.fit(X[:train_test_size], y[:train_test_size])
train_time.append(time.time() - t0)
t0 = time.time()
estimator.predict(X_plot[:1000])
test_time.append(time.time() - t0)
plt.plot(sizes, train_time, 'o-', color="r" if name == "SVR" else "g",
label="%s (train)" % name)
plt.plot(sizes, test_time, 'o--', color="r" if name == "SVR" else "g",
label="%s (test)" % name)
plt.xscale("log")
plt.yscale("log")
plt.xlabel("Train size")
plt.ylabel("Time (seconds)")
plt.title('Execution Time')
plt.legend(loc="best")
# Visualize learning curves
plt.figure()
svr = SVR(kernel='rbf', C=1e1, gamma=0.1)
kr = KernelRidge(kernel='rbf', alpha=0.1, gamma=0.1)
train_sizes, train_scores_svr, test_scores_svr = \
learning_curve(svr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
train_sizes_abs, train_scores_kr, test_scores_kr = \
learning_curve(kr, X[:100], y[:100], train_sizes=np.linspace(0.1, 1, 10),
scoring="mean_squared_error", cv=10)
plt.plot(train_sizes, test_scores_svr.mean(1), 'o-', color="r",
label="SVR")
plt.plot(train_sizes, test_scores_kr.mean(1), 'o-', color="g",
label="KRR")
plt.xlabel("Train size")
plt.ylabel("Mean Squared Error")
plt.title('Learning curves')
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
PrashntS/scikit-learn | examples/gaussian_process/plot_gp_regression.py | 253 | 4054 | #!/usr/bin/python
# -*- coding: utf-8 -*-
r"""
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression exercise computed in two different ways:
1. A noise-free case with a cubic correlation model
2. A noisy case with a squared Euclidean correlation model
In both cases, the model parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``nugget`` is applied as a Tikhonov regularization
of the assumed covariance between the training points. In the special case
of the squared euclidean correlation model, nugget is mathematically equivalent
to a normalized variance: That is
.. math::
\mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Licence: BSD 3 clause
import numpy as np
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.plot(X, y, 'r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
#----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
gp = GaussianProcess(corr='squared_exponential', theta0=1e-1,
thetaL=1e-3, thetaU=1,
nugget=(dy / y) ** 2,
random_start=100)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, MSE = gp.predict(x, eval_MSE=True)
sigma = np.sqrt(MSE)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = pl.figure()
pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
pl.plot(x, y_pred, 'b-', label=u'Prediction')
pl.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
pl.xlabel('$x$')
pl.ylabel('$f(x)$')
pl.ylim(-10, 20)
pl.legend(loc='upper left')
pl.show()
| bsd-3-clause |
CforED/Machine-Learning | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
percyfal/snakemakelib-core | snakemakelib/tests/test_application.py | 1 | 8625 | # Copyright (C) 2015 by Per Unneberg
# pylint: disable=R0904
import os
import logging
import pytest
import pandas as pd
from blaze import DataFrame, resource
from snakemakelib.application import Application, PlatformUnitApplication, SampleApplication
from snakemakelib.io import IOTarget, IOSampleTarget, IOAggregateTarget
logging.basicConfig(level=logging.DEBUG)
from snakemakelib.odo import pandas
@resource.register('.+\.foo')
@pandas.annotate_by_uri
def resource_foo_to_df(uri, **kwargs):
df = pd.read_csv(uri)
return df
@pytest.fixture(scope="module")
def foo1(tmpdir_factory):
fn = tmpdir_factory.mktemp('data').join('foo1_bar1.foo')
fn.write("""foo,bar\n1,2\n3,4""")
return fn
@pytest.fixture(scope="module")
def foo2(tmpdir_factory):
fn = tmpdir_factory.mktemp('data').join('foo2_bar2.foo')
fn.write("""foo,bar\n5,6\n7,8""")
return fn
@pytest.fixture(scope="module")
def foo3(tmpdir_factory):
fn = tmpdir_factory.mktemp('data').join('foo3_bar3.foo')
fn.write("""foo,bar\n9,10\n11,12""")
return fn
@pytest.fixture(scope="module")
def bar1(tmpdir_factory):
fn = tmpdir_factory.mktemp('data').join('bar1_foo1.foo')
fn.write("""bar,foo\n1,2\n3,4""")
return fn
@pytest.fixture(scope="module")
def bar2(tmpdir_factory):
fn = tmpdir_factory.mktemp('data').join('bar2_foo2.foo')
fn.write("""bar,foo\n5,6\n7,8""")
return fn
@pytest.fixture(scope="module")
def bar3(tmpdir_factory):
fn = tmpdir_factory.mktemp('data').join('bar3_foo3.foo')
fn.write("""bar,foo\n9,10\n11,12""")
return fn
@pytest.fixture(scope="module")
def units():
return [
{'foo': 'foo1', 'bar': 'bar1'},
{'foo': 'foo2', 'bar': 'bar2'},
{'foo': 'foo3', 'bar': 'bar3'}
]
class TestApplication:
"""Test application class"""
iotargets = {'foo':(IOTarget("{foo}_{bar}"), None)}
iotargets_w_suffix = {'foo':(IOTarget("{foo}_{bar}", suffix=".foo"), None)}
iotargets_foo_bar = {
'foo':(IOTarget("{foo}_{bar}", suffix=".foo"), None),
'bar':(IOTarget("{bar}_{foo}", suffix=".bar"), None),
}
iotargets_foo_bar_aggregate = {
'foo':(IOTarget("{foo}_{bar,[^.]+}", suffix=".foo"), IOAggregateTarget("foo_aggregate.txt")),
'bar':(IOTarget("{bar}_{foo,[^.]+}", suffix=".foo"), IOAggregateTarget("bar_aggregate.txt")),
}
def test_init_empty_dict(self):
with pytest.raises(AssertionError):
app = Application(name="foo", iotargets=dict())
def test_init(self):
app = Application(name="foo", iotargets=self.iotargets)
def test_targets(self, units):
app = Application(name="foo", iotargets=self.iotargets_w_suffix, units=units)
assert sorted(app.targets['foo']) == ['foo1_bar1.foo', 'foo2_bar2.foo', 'foo3_bar3.foo']
def test_targets_no_run(self, units):
app = Application(name="foo", iotargets=self.iotargets_w_suffix, units=units, run=False)
assert app.targets == {'foo': []}
def test_foobar_targets(self, units):
app = Application(name="foo", iotargets=self.iotargets_foo_bar, units=units)
assert sorted(app.targets['foo']) == ['foo1_bar1.foo', 'foo2_bar2.foo', 'foo3_bar3.foo']
assert sorted(app.targets['bar']) == ['bar1_foo1.bar', 'bar2_foo2.bar', 'bar3_foo3.bar']
def test_add_annotation_fn(self, units):
app = Application("foo", iotargets=self.iotargets_foo_bar, units=units)
barapp = Application("foo", iotargets=self.iotargets_foo_bar, units=units)
assert app._annotation_funcs == {}
@barapp.register_annotation_fn("foo")
@app.register_annotation_fn("foo")
def _annot_func(df, uri, **kwargs):
return "Added annotation func"
assert 'foo' in app._annotation_funcs
assert app._annotation_funcs['foo'](None, None) == "Added annotation func"
assert barapp._annotation_funcs['foo'](None, None) == "Added annotation func"
def test_aggregate_targets(self, units):
app = Application("foo", iotargets=self.iotargets_foo_bar_aggregate, units=units)
assert app.aggregate_targets == {'bar': 'bar_aggregate.txt', 'foo': 'foo_aggregate.txt'}
def test_aggregate(self, foo1, foo2, foo3, bar1, bar2, bar3, units):
app = Application("foo", iotargets=self.iotargets_foo_bar_aggregate, units=units, annotate=True)
@app.register_annotation_fn("foo")
def _annot_func(df, uri, iotarget=app.iotargets['foo'], **kwargs):
m = iotarget[0].search(os.path.basename(uri))
by = kwargs.get("by", list(iotarget[0].keys()))
for key in by:
df[key] = iotarget[0].groupdict[key]
return df
# Mock app._targets
app._targets = {'foo': [str(foo1), str(foo2), str(foo3)], 'bar': [str(bar1), str(bar2), str(bar3)]}
app.aggregate()
assert 'foo' in app.aggregate_data['foo'].columns
assert 'bar' in app.aggregate_data['bar'].columns
assert set(list(app.aggregate_data['foo']['foo'])) == {'foo1', 'foo2', 'foo3'}
assert set([os.path.basename(x) for x in list(app.aggregate_data['bar']['uri'])]) == {'bar1_foo1.foo', 'bar2_foo2.foo', 'bar3_foo3.foo'}
@pytest.fixture(scope="module")
def SM_PU_iotargets_foo_bar_aggregate():
return {
'foo':(IOTarget("{PATH}/{SM}_{PU,[^.]+}", suffix=".foo"), IOAggregateTarget("foo_aggregate.txt")),
'bar':(IOTarget("{PATH}/{SM}_{PU,[^.]+}", suffix=".foo"), IOAggregateTarget("bar_aggregate.txt")),
}
class TestPlatformUnitApplication:
"""Test PlatformUnitApplication class"""
def test_aggregate(self, foo1, foo2, foo3, bar1, bar2, bar3, SM_PU_iotargets_foo_bar_aggregate, units):
app = PlatformUnitApplication(name="foo", iotargets=SM_PU_iotargets_foo_bar_aggregate, units=units)
app._targets = {'foo': [str(foo1), str(foo2), str(foo3)], 'bar': [str(bar1), str(bar2), str(bar3)]}
app.aggregate()
assert list(app.aggregate_data['foo']['PU']) == ['bar1', 'bar1', 'bar2', 'bar2', 'bar3', 'bar3']
assert list(app.aggregate_data['foo']['SM']) == ['foo1', 'foo1', 'foo2', 'foo2', 'foo3', 'foo3']
assert list(app.aggregate_data['foo']['PlatformUnit']) == ['foo1__bar1', 'foo1__bar1', 'foo2__bar2', 'foo2__bar2', 'foo3__bar3', 'foo3__bar3']
class TestSampleApplication:
"""Test SampleApplication class"""
def test_aggregate(self, foo1, foo2, foo3, bar1, bar2, bar3, SM_PU_iotargets_foo_bar_aggregate, units):
app = SampleApplication(name="foo", iotargets=SM_PU_iotargets_foo_bar_aggregate, units=units)
app._targets = {'foo': [str(foo1), str(foo2), str(foo3)], 'bar': [str(bar1), str(bar2), str(bar3)]}
app.aggregate()
assert list(app.aggregate_data['foo'].reset_index()['SM']) == ['foo1', 'foo1', 'foo2', 'foo2', 'foo3', 'foo3']
# atomic tests
def test_post_processing_hook(foo1, foo2, foo3, bar1, bar2, bar3, SM_PU_iotargets_foo_bar_aggregate, units):
app = PlatformUnitApplication(name="foo", iotargets=SM_PU_iotargets_foo_bar_aggregate, units=units)
@app.register_post_processing_hook('foo')
def _pphook(df, **kwargs):
df['foobar'] = df['foo'] + df['bar']
return df
app._targets = {'foo': [str(foo1), str(foo2), str(foo3)], 'bar': [str(bar1), str(bar2), str(bar3)]}
app.aggregate()
assert 'foobar' in list(app.aggregate_data['foo'].columns)
assert 'foobar' not in list(app.aggregate_data['bar'].columns)
assert list(app.aggregate_data['foo']['foobar']) == [3, 7, 11, 15, 19, 23]
def test_plot_fn(foo1, foo2, foo3, bar1, bar2, bar3, SM_PU_iotargets_foo_bar_aggregate, units):
app = PlatformUnitApplication(name="foo", iotargets=SM_PU_iotargets_foo_bar_aggregate, units=units)
@app.register_plot('foo')
def _plot1(df, **kwargs):
from bokeh.charts import Scatter
return Scatter(df, x="foo", y="bar", **kwargs)
@app.register_plot('bar')
@app.register_plot('foo')
def _plot2(df, **kwargs):
from bokeh.charts import Scatter
return Scatter(df, x="bar", y="foo", **kwargs)
app._targets = {'foo': [str(foo1), str(foo2), str(foo3)], 'bar': [str(bar1), str(bar2), str(bar3)]}
app.aggregate()
# bar
d = app.plot('bar')
assert isinstance(d, list)
assert len(d) == 1
# foo
d = app.plot('foo')
assert len(d) == 2
from bokeh.charts import Chart
assert isinstance(d[0], Chart)
assert d[0].plot_width == 600
d = app.plot(key="foo", plot_width=400)
assert d[0].plot_width == 400
| mit |
bluemonk482/emotionannotate | src/Utilities.py | 1 | 5242 | import numpy as np
import os
import Config
from sklearn.cross_validation import train_test_split
def class_dist(dir):
emotions = ['anger', 'disgust', 'happy', 'surprise', 'sad']
for emo in emotions:
print emo
f = open(dir+'/emo_'+emo+'_raw.txt', 'r')
lines = f.readlines()
p1 = len(lines)
f = open(dir+'/hash_'+emo+'_raw.txt', 'r')
lines = f.readlines()
p2 = len(lines)
f = open(dir+'/hash_non'+emo+'_raw.txt', 'r')
lines = f.readlines()
n2 = len(lines)
f = open(dir+'/emo_non'+emo+'_raw.txt', 'r')
lines = f.readlines()
n1 = len(lines)
print "POS: %f; NEG: %f"%(float((p1+p2))/(p1+p2+n1+n2), float((n1+n2))/(p1+p2+n1+n2))
def load_tweets(emotion_tweet_dict, non_emotion_tweet_dict):
# Load positive tweets from files to memory
tweet_files_positive = Config.get(emotion_tweet_dict)
positive_tweets = {}
for emo in tweet_files_positive.keys():
positive_tweets[emo] = []
for filename in tweet_files_positive[emo]:
file_obj = open(filename, "r")
for tweet in file_obj:
positive_tweets[emo].append(tweet)
# Load negative tweets from files to memory
tweet_files_negative = Config.get(non_emotion_tweet_dict)
negative_tweets = {}
for emo in tweet_files_negative.keys():
negative_tweets[emo] = []
for filename in tweet_files_negative[emo]:
file_obj = open(filename, "r")
for tweet in file_obj:
negative_tweets[emo].append(tweet)
return positive_tweets, negative_tweets
def format_data(emo, pos, neg):
d = []
for tweet in pos[emo]:
d.append((tweet.strip(), 1))
for tweet in neg[emo]:
d.append((tweet.strip(), 0))
X = []
y = []
for tweet, label in d:
if tweet != '':
X.append(tweet)
y.append(label)
X = np.asarray(X)
y = np.asarray(y)
return X, y
def feval(truefile, predfile):
truefile = os.path.abspath(truefile)
predfile = os.path.abspath(predfile)
f1 = open(truefile, 'r')
f2 = open(predfile, 'r')
l1 = f1.readlines()
l2 = f2.readlines()
y_test = []
y_predicted = []
if len(l1) == len(l2):
for i in xrange(len(l1)):
y_test.append(int(l1[i].strip()))
y_predicted.append(int(l2[i].strip()))
else:
raise Exception('ERROR: true and pred file length do not match!')
f1.close()
f2.close()
return y_test, y_predicted
def getlabels(X):
y = []
for i in X:
i = i[0].split(' ')
y.append(int(i[0]))
return y
def writingfile(filepath, X):
with open(filepath, 'w') as f:
for item in X:
f.write("%s\n" % item)
def readfeats(filepath):
f = open(filepath, 'r')
lines = f.readlines()
d = []
for i in lines:
d.append(i.strip())
d = np.asarray(d)
return d
def frange(start, stop, step):
r = start
while r <= stop:
yield r
r *= step
def subset_data(sub_dir='../data/subset/'):
pos, neg = load_tweets("emotion_tweet_files_dict", "non_emotion_tweet_files_dict")
for emo in pos.keys():
d = []
for tweet in pos[emo]:
d.append((tweet.strip(), 1))
for tweet in neg[emo]:
d.append((tweet.strip(), 0))
X = []
y = []
for tweet, label in d:
X.append(tweet)
y.append(label)
X = np.asarray(X)
y = np.asarray(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=0)
pos_test = []
neg_test = []
for i, j in enumerate(X_test):
if y_test[i] == 1:
pos_test.append(j)
elif y_test[i] == 0:
neg_test.append(j)
pos_dir = sub_dir+'emo_'+emo+'.txt'
neg_dir = sub_dir+'emo_non'+emo+'.txt'
writingfile(pos_dir, pos_test)
writingfile(neg_dir, neg_test)
def vocab(emotion_tweet_dict, non_emotion_tweet_dict, emo):
import re
import string
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import CountVectorizer
stopWords = stopwords.words('english')
regex = re.compile('[%s]' % re.escape(string.punctuation))
vectorizer = CountVectorizer(min_df=1, analyzer='word',stop_words=stopWords, binary=True)
pos, neg = load_tweets(emotion_tweet_dict, non_emotion_tweet_dict)
emo_tweets, y = format_data(emo, pos, neg)
tweets=[]
for i in emo_tweets:
i = i.decode('utf-8')
i = re.sub(r'@USERID', '', i)
i = re.sub(r'URL', '', i)
i = re.sub(r're-tweet', '', i)
i = re.sub(r'\s+', ' ', i)
new = []
for word in i.split():
word = regex.sub(u'', word)
if (not word in stopWords) and (not word == u''):
new.append(word)
new = ' '.join(new)
tweets.append(new)
X = vectorizer.fit_transform(tweets)
vocab_dict = vectorizer.vocabulary_
vocab=[]
for key, value in vocab_dict.iteritems():
temp = key
vocab.append(temp)
return vocab
| gpl-3.0 |
Shaswat27/scipy | scipy/signal/filter_design.py | 6 | 127925 | """Filter design.
"""
from __future__ import division, print_function, absolute_import
import warnings
import numpy
from numpy import (atleast_1d, poly, polyval, roots, real, asarray, allclose,
resize, pi, absolute, logspace, r_, sqrt, tan, log10,
arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate,
zeros, sinh, append, concatenate, prod, ones, array)
from numpy import mintypecode
import numpy as np
from scipy import special, optimize
from scipy.special import comb
__all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize',
'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign',
'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel',
'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord',
'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap',
'BadCoefficients',
'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay']
class BadCoefficients(UserWarning):
"""Warning about badly conditioned filter coefficients"""
pass
abs = absolute
def findfreqs(num, den, N):
"""
Find array of frequencies for computing the response of an analog filter.
Parameters
----------
num, den : array_like, 1-D
The polynomial coefficients of the numerator and denominator of the
transfer function of the filter or LTI system. The coefficients are
ordered from highest to lowest degree.
N : int
The length of the array to be computed.
Returns
-------
w : (N,) ndarray
A 1-D array of frequencies, logarithmically spaced.
Examples
--------
Find a set of nine frequencies that span the "interesting part" of the
frequency response for the filter with the transfer function
H(s) = s / (s^2 + 8s + 25)
>>> from scipy import signal
>>> signal.findfreqs([1, 0], [1, 8, 25], N=9)
array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01,
3.16227766e-01, 1.00000000e+00, 3.16227766e+00,
1.00000000e+01, 3.16227766e+01, 1.00000000e+02])
"""
ep = atleast_1d(roots(den)) + 0j
tz = atleast_1d(roots(num)) + 0j
if len(ep) == 0:
ep = atleast_1d(-1000) + 0j
ez = r_['-1',
numpy.compress(ep.imag >= 0, ep, axis=-1),
numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)]
integ = abs(ez) < 1e-10
hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) +
1.5 * ez.imag)) + 0.5)
lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) +
2 * ez.imag)) - 0.5)
w = logspace(lfreq, hfreq, N)
return w
def freqs(b, a, worN=None, plot=None):
"""
Compute frequency response of analog filter.
Given the numerator `b` and denominator `a` of a filter, compute its
frequency response::
b[0]*(jw)**(nb-1) + b[1]*(jw)**(nb-2) + ... + b[nb-1]
H(w) = -------------------------------------------------------
a[0]*(jw)**(na-1) + a[1]*(jw)**(na-2) + ... + a[na-1]
Parameters
----------
b : array_like
Numerator of a linear filter.
a : array_like
Denominator of a linear filter.
worN : {None, int}, optional
If None, then compute at 200 frequencies around the interesting parts
of the response curve (determined by pole-zero locations). If a single
integer, then compute at that many frequencies. Otherwise, compute the
response at the angular frequencies (e.g. rad/s) given in `worN`.
plot : callable, optional
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqs`.
Returns
-------
w : ndarray
The angular frequencies at which `h` was computed.
h : ndarray
The frequency response.
See Also
--------
freqz : Compute the frequency response of a digital filter.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy.signal import freqs, iirfilter
>>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1')
>>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000))
>>> import matplotlib.pyplot as plt
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.xlabel('Frequency')
>>> plt.ylabel('Amplitude response [dB]')
>>> plt.grid()
>>> plt.show()
"""
if worN is None:
w = findfreqs(b, a, 200)
elif isinstance(worN, int):
N = worN
w = findfreqs(b, a, N)
else:
w = worN
w = atleast_1d(w)
s = 1j * w
h = polyval(b, s) / polyval(a, s)
if plot is not None:
plot(w, h)
return w, h
def freqz(b, a=1, worN=None, whole=0, plot=None):
"""
Compute the frequency response of a digital filter.
Given the numerator `b` and denominator `a` of a digital filter,
compute its frequency response::
jw -jw -jmw
jw B(e) b[0] + b[1]e + .... + b[m]e
H(e) = ---- = ------------------------------------
jw -jw -jnw
A(e) a[0] + a[1]e + .... + a[n]e
Parameters
----------
b : array_like
numerator of a linear filter
a : array_like
denominator of a linear filter
worN : {None, int, array_like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If an array_like, compute the response at the frequencies given (in
radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to 2*pi radians/sample.
plot : callable
A callable that takes two arguments. If given, the return parameters
`w` and `h` are passed to plot. Useful for plotting the frequency
response inside `freqz`.
Returns
-------
w : ndarray
The normalized frequencies at which `h` was computed, in
radians/sample.
h : ndarray
The frequency response.
Notes
-----
Using Matplotlib's "plot" function as the callable for `plot` produces
unexpected results, this plots the real part of the complex transfer
function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``.
Examples
--------
>>> from scipy import signal
>>> b = signal.firwin(80, 0.5, window=('kaiser', 8))
>>> w, h = signal.freqz(b)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> plt.title('Digital filter frequency response')
>>> ax1 = fig.add_subplot(111)
>>> plt.plot(w, 20 * np.log10(abs(h)), 'b')
>>> plt.ylabel('Amplitude [dB]', color='b')
>>> plt.xlabel('Frequency [rad/sample]')
>>> ax2 = ax1.twinx()
>>> angles = np.unwrap(np.angle(h))
>>> plt.plot(w, angles, 'g')
>>> plt.ylabel('Angle (radians)', color='g')
>>> plt.grid()
>>> plt.axis('tight')
>>> plt.show()
"""
b, a = map(atleast_1d, (b, a))
if whole:
lastpoint = 2 * pi
else:
lastpoint = pi
if worN is None:
N = 512
w = numpy.linspace(0, lastpoint, N, endpoint=False)
elif isinstance(worN, int):
N = worN
w = numpy.linspace(0, lastpoint, N, endpoint=False)
else:
w = worN
w = atleast_1d(w)
zm1 = exp(-1j * w)
h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1)
if plot is not None:
plot(w, h)
return w, h
def group_delay(system, w=None, whole=False):
r"""Compute the group delay of a digital filter.
The group delay measures by how many samples amplitude envelopes of
various spectral components of a signal are delayed by a filter.
It is formally defined as the derivative of continuous (unwrapped) phase::
d jw
D(w) = - -- arg H(e)
dw
Parameters
----------
system : tuple of array_like (b, a)
Numerator and denominator coefficients of a filter transfer function.
w : {None, int, array-like}, optional
If None (default), then compute at 512 frequencies equally spaced
around the unit circle.
If a single integer, then compute at that many frequencies.
If array, compute the delay at the frequencies given
(in radians/sample).
whole : bool, optional
Normally, frequencies are computed from 0 to the Nyquist frequency,
pi radians/sample (upper-half of unit-circle). If `whole` is True,
compute frequencies from 0 to ``2*pi`` radians/sample.
Returns
-------
w : ndarray
The normalized frequencies at which the group delay was computed,
in radians/sample.
gd : ndarray
The group delay.
Notes
-----
The similar function in MATLAB is called `grpdelay`.
If the transfer function :math:`H(z)` has zeros or poles on the unit
circle, the group delay at corresponding frequencies is undefined.
When such a case arises the warning is raised and the group delay
is set to 0 at those frequencies.
For the details of numerical computation of the group delay refer to [1]_.
.. versionadded: 0.16.0
See Also
--------
freqz : Frequency response of a digital filter
References
----------
.. [1] Richard G. Lyons, "Understanding Digital Signal Processing,
3rd edition", p. 830.
Examples
--------
>>> from scipy import signal
>>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1')
>>> w, gd = signal.group_delay((b, a))
>>> import matplotlib.pyplot as plt
>>> plt.title('Digital filter group delay')
>>> plt.plot(w, gd)
>>> plt.ylabel('Group delay [samples]')
>>> plt.xlabel('Frequency [rad/sample]')
>>> plt.show()
"""
if w is None:
w = 512
if isinstance(w, int):
if whole:
w = np.linspace(0, 2 * pi, w, endpoint=False)
else:
w = np.linspace(0, pi, w, endpoint=False)
w = np.atleast_1d(w)
b, a = map(np.atleast_1d, system)
c = np.convolve(b, a[::-1])
cr = c * np.arange(c.size)
z = np.exp(-1j * w)
num = np.polyval(cr[::-1], z)
den = np.polyval(c[::-1], z)
singular = np.absolute(den) < 10 * EPSILON
if np.any(singular):
warnings.warn(
"The group delay is singular at frequencies [{0}], setting to 0".
format(", ".join("{0:.3f}".format(ws) for ws in w[singular]))
)
gd = np.zeros_like(w)
gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1
return w, gd
def _cplxreal(z, tol=None):
"""
Split into complex and real parts, combining conjugate pairs.
The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`)
elements. Every complex element must be part of a complex-conjugate pair,
which are combined into a single number (with positive imaginary part) in
the output. Two complex numbers are considered a conjugate pair if their
real and imaginary parts differ in magnitude by less than ``tol * abs(z)``.
Parameters
----------
z : array_like
Vector of complex numbers to be sorted and split
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
zc : ndarray
Complex elements of `z`, with each pair represented by a single value
having positive imaginary part, sorted first by real part, and then
by magnitude of imaginary part. The pairs are averaged when combined
to reduce error.
zr : ndarray
Real elements of `z` (those having imaginary part less than
`tol` times their magnitude), sorted by value.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxpair
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> zc, zr = _cplxreal(a)
>>> print zc
[ 1.+1.j 2.+1.j 2.+1.j 2.+2.j]
>>> print zr
[ 1. 3. 4.]
"""
z = atleast_1d(z)
if z.size == 0:
return z, z
elif z.ndim != 1:
raise ValueError('_cplxreal only accepts 1D input')
if tol is None:
# Get tolerance from dtype of input
tol = 100 * np.finfo((1.0 * z).dtype).eps
# Sort by real part, magnitude of imaginary part (speed up further sorting)
z = z[np.lexsort((abs(z.imag), z.real))]
# Split reals from conjugate pairs
real_indices = abs(z.imag) <= tol * abs(z)
zr = z[real_indices].real
if len(zr) == len(z):
# Input is entirely real
return array([]), zr
# Split positive and negative halves of conjugates
z = z[~real_indices]
zp = z[z.imag > 0]
zn = z[z.imag < 0]
if len(zp) != len(zn):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Find runs of (approximately) the same real part
same_real = np.diff(zp.real) <= tol * abs(zp[:-1])
diffs = numpy.diff(concatenate(([0], same_real, [0])))
run_starts = numpy.where(diffs > 0)[0]
run_stops = numpy.where(diffs < 0)[0]
# Sort each run by their imaginary parts
for i in range(len(run_starts)):
start = run_starts[i]
stop = run_stops[i] + 1
for chunk in (zp[start:stop], zn[start:stop]):
chunk[...] = chunk[np.lexsort([abs(chunk.imag)])]
# Check that negatives match positives
if any(abs(zp - zn.conj()) > tol * abs(zn)):
raise ValueError('Array contains complex value with no matching '
'conjugate.')
# Average out numerical inaccuracy in real vs imag parts of pairs
zc = (zp + zn.conj()) / 2
return zc, zr
def _cplxpair(z, tol=None):
"""
Sort into pairs of complex conjugates.
Complex conjugates in `z` are sorted by increasing real part. In each
pair, the number with negative imaginary part appears first.
If pairs have identical real parts, they are sorted by increasing
imaginary magnitude.
Two complex numbers are considered a conjugate pair if their real and
imaginary parts differ in magnitude by less than ``tol * abs(z)``. The
pairs are forced to be exact complex conjugates by averaging the positive
and negative values.
Purely real numbers are also sorted, but placed after the complex
conjugate pairs. A number is considered real if its imaginary part is
smaller than `tol` times the magnitude of the number.
Parameters
----------
z : array_like
1-dimensional input array to be sorted.
tol : float, optional
Relative tolerance for testing realness and conjugate equality.
Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for
float64)
Returns
-------
y : ndarray
Complex conjugate pairs followed by real numbers.
Raises
------
ValueError
If there are any complex numbers in `z` for which a conjugate
cannot be found.
See Also
--------
_cplxreal
Examples
--------
>>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j]
>>> z = _cplxpair(a)
>>> print(z)
[ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j
3.+0.j 4.+0.j]
"""
z = atleast_1d(z)
if z.size == 0 or np.isrealobj(z):
return np.sort(z)
if z.ndim != 1:
raise ValueError('z must be 1-dimensional')
zc, zr = _cplxreal(z, tol)
# Interleave complex values and their conjugates, with negative imaginary
# parts first in each pair
zc = np.dstack((zc.conj(), zc)).flatten()
z = np.append(zc, zr)
return z
def tf2zpk(b, a):
r"""Return zero, pole, gain (z, p, k) representation from a numerator,
denominator representation of a linear filter.
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
If some values of `b` are too close to 0, they are removed. In that case,
a BadCoefficients warning is emitted.
The `b` and `a` arrays are interpreted as coefficients for positive,
descending powers of the transfer function variable. So the inputs
:math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]`
can represent an analog filter of the form:
.. math::
H(s) = \frac
{b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M}
{a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N}
or a discrete-time filter of the form:
.. math::
H(z) = \frac
{b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M}
{a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N}
This "positive powers" form is found more commonly in controls
engineering. If `M` and `N` are equal (which is true for all filters
generated by the bilinear transform), then this happens to be equivalent
to the "negative powers" discrete-time form preferred in DSP:
.. math::
H(z) = \frac
{b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}}
{a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}}
Although this is true for common filters, remember that this is not true
in the general case. If `M` and `N` are not equal, the discrete-time
transfer function coefficients must first be converted to the "positive
powers" form before finding the poles and zeros.
"""
b, a = normalize(b, a)
b = (b + 0.0) / a[0]
a = (a + 0.0) / a[0]
k = b[0]
b /= b[0]
z = roots(b)
p = roots(a)
return z, p, k
def zpk2tf(z, p, k):
"""
Return polynomial transfer function representation from zeros and poles
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
"""
z = atleast_1d(z)
k = atleast_1d(k)
if len(z.shape) > 1:
temp = poly(z[0])
b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char)
if len(k) == 1:
k = [k[0]] * z.shape[0]
for i in range(z.shape[0]):
b[i] = k[i] * poly(z[i])
else:
b = k * poly(z)
a = atleast_1d(poly(p))
# Use real output if possible. Copied from numpy.poly, since
# we can't depend on a specific version of numpy.
if issubclass(b.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(z, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
b = b.real.copy()
if issubclass(a.dtype.type, numpy.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = numpy.asarray(p, complex)
pos_roots = numpy.compress(roots.imag > 0, roots)
neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots))
if len(pos_roots) == len(neg_roots):
if numpy.all(numpy.sort_complex(neg_roots) ==
numpy.sort_complex(pos_roots)):
a = a.real.copy()
return b, a
def tf2sos(b, a, pairing='nearest'):
"""
Return second-order sections from transfer function representation
Parameters
----------
b : array_like
Numerator polynomial coefficients.
a : array_like
Denominator polynomial coefficients.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See `zpk2sos`.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
zpk2sos, sosfilt
Notes
-----
It is generally discouraged to convert from TF to SOS format, since doing
so usually will not improve numerical precision errors. Instead, consider
designing filters in ZPK format and converting directly to SOS. TF is
converted to SOS by first converting to ZPK format, then converting
ZPK to SOS.
.. versionadded:: 0.16.0
"""
return zpk2sos(*tf2zpk(b, a), pairing=pairing)
def sos2tf(sos):
"""
Return a single transfer function from a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
b : ndarray
Numerator polynomial coefficients.
a : ndarray
Denominator polynomial coefficients.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
b = [1.]
a = [1.]
n_sections = sos.shape[0]
for section in range(n_sections):
b = np.polymul(b, sos[section, :3])
a = np.polymul(a, sos[section, 3:])
return b, a
def sos2zpk(sos):
"""
Return zeros, poles, and gain of a series of second-order sections
Parameters
----------
sos : array_like
Array of second-order filter coefficients, must have shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
Returns
-------
z : ndarray
Zeros of the transfer function.
p : ndarray
Poles of the transfer function.
k : float
System gain.
Notes
-----
.. versionadded:: 0.16.0
"""
sos = np.asarray(sos)
n_sections = sos.shape[0]
z = np.empty(n_sections*2, np.complex128)
p = np.empty(n_sections*2, np.complex128)
k = 1.
for section in range(n_sections):
zpk = tf2zpk(sos[section, :3], sos[section, 3:])
z[2*section:2*(section+1)] = zpk[0]
p[2*section:2*(section+1)] = zpk[1]
k *= zpk[2]
return z, p, k
def _nearest_real_complex_idx(fro, to, which):
"""Get the next closest real or complex element based on distance"""
assert which in ('real', 'complex')
order = np.argsort(np.abs(fro - to))
mask = np.isreal(fro[order])
if which == 'complex':
mask = ~mask
return order[np.where(mask)[0][0]]
def zpk2sos(z, p, k, pairing='nearest'):
"""
Return second-order sections from zeros, poles, and gain of a system
Parameters
----------
z : array_like
Zeros of the transfer function.
p : array_like
Poles of the transfer function.
k : float
System gain.
pairing : {'nearest', 'keep_odd'}, optional
The method to use to combine pairs of poles and zeros into sections.
See Notes below.
Returns
-------
sos : ndarray
Array of second-order filter coefficients, with shape
``(n_sections, 6)``. See `sosfilt` for the SOS filter format
specification.
See Also
--------
sosfilt
Notes
-----
The algorithm used to convert ZPK to SOS format is designed to
minimize errors due to numerical precision issues. The pairing
algorithm attempts to minimize the peak gain of each biquadratic
section. This is done by pairing poles with the nearest zeros, starting
with the poles closest to the unit circle.
*Algorithms*
The current algorithms are designed specifically for use with digital
filters. (The output coefficents are not correct for analog filters.)
The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'``
algorithms are mostly shared. The ``nearest`` algorithm attempts to
minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under
the constraint that odd-order systems should retain one section
as first order. The algorithm steps and are as follows:
As a pre-processing step, add poles or zeros to the origin as
necessary to obtain the same number of poles and zeros for pairing.
If ``pairing == 'nearest'`` and there are an odd number of poles,
add an additional pole and a zero at the origin.
The following steps are then iterated over until no more poles or
zeros remain:
1. Take the (next remaining) pole (complex or real) closest to the
unit circle to begin a new filter section.
2. If the pole is real and there are no other remaining real poles [#]_,
add the closest real zero to the section and leave it as a first
order section. Note that after this step we are guaranteed to be
left with an even number of real poles, complex poles, real zeros,
and complex zeros for subsequent pairing iterations.
3. Else:
1. If the pole is complex and the zero is the only remaining real
zero*, then pair the pole with the *next* closest zero
(guaranteed to be complex). This is necessary to ensure that
there will be a real zero remaining to eventually create a
first-order section (thus keeping the odd order).
2. Else pair the pole with the closest remaining zero (complex or
real).
3. Proceed to complete the second-order section by adding another
pole and zero to the current pole and zero in the section:
1. If the current pole and zero are both complex, add their
conjugates.
2. Else if the pole is complex and the zero is real, add the
conjugate pole and the next closest real zero.
3. Else if the pole is real and the zero is complex, add the
conjugate zero and the real pole closest to those zeros.
4. Else (we must have a real pole and real zero) add the next
real pole closest to the unit circle, and then add the real
zero closest to that pole.
.. [#] This conditional can only be met for specific odd-order inputs
with the ``pairing == 'keep_odd'`` method.
.. versionadded:: 0.16.0
Examples
--------
Design a 6th order low-pass elliptic digital filter for a system with a
sampling rate of 8000 Hz that has a pass-band corner frequency of
1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and
the attenuation in the stop-band should be at least 90 dB.
In the following call to `signal.ellip`, we could use ``output='sos'``,
but for this example, we'll use ``output='zpk'``, and then convert to SOS
format with `zpk2sos`:
>>> from scipy import signal
>>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk')
Now convert to SOS format.
>>> sos = signal.zpk2sos(z, p, k)
The coefficients of the numerators of the sections:
>>> sos[:, :3]
array([[ 0.0014154 , 0.00248707, 0.0014154 ],
[ 1. , 0.72965193, 1. ],
[ 1. , 0.17594966, 1. ]])
The symmetry in the coefficients occurs because all the zeros are on the
unit circle.
The coefficients of the denominators of the sections:
>>> sos[:, 3:]
array([[ 1. , -1.32543251, 0.46989499],
[ 1. , -1.26117915, 0.6262586 ],
[ 1. , -1.25707217, 0.86199667]])
The next example shows the effect of the `pairing` option. We have a
system with three poles and three zeros, so the SOS array will have
shape (2, 6). The means there is, in effect, an extra pole and an extra
zero at the origin in the SOS representation.
>>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j])
>>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j])
With ``pairing='nearest'`` (the default), we obtain
>>> signal.zpk2sos(z1, p1, 1)
array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ],
[ 1. , 1. , 0. , 1. , -1.6 , 0.65]])
The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles
{0, 0.75}, and the second section has the zeros {-1, 0} and poles
{0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin
have been assigned to different sections.
With ``pairing='keep_odd'``, we obtain:
>>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd')
array([[ 1. , 1. , 0. , 1. , -0.75, 0. ],
[ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]])
The extra pole and zero at the origin are in the same section.
The first section is, in effect, a first-order section.
"""
# TODO in the near future:
# 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259).
# 2. Make `decimate` use `sosfilt` instead of `lfilter`.
# 3. Make sosfilt automatically simplify sections to first order
# when possible. Note this might make `sosfiltfilt` a bit harder (ICs).
# 4. Further optimizations of the section ordering / pole-zero pairing.
# See the wiki for other potential issues.
valid_pairings = ['nearest', 'keep_odd']
if pairing not in valid_pairings:
raise ValueError('pairing must be one of %s, not %s'
% (valid_pairings, pairing))
if len(z) == len(p) == 0:
return array([[k, 0., 0., 1., 0., 0.]])
# ensure we have the same number of poles and zeros, and make copies
p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0))))
z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0))))
n_sections = (max(len(p), len(z)) + 1) // 2
sos = zeros((n_sections, 6))
if len(p) % 2 == 1 and pairing == 'nearest':
p = np.concatenate((p, [0.]))
z = np.concatenate((z, [0.]))
assert len(p) == len(z)
# Ensure we have complex conjugate pairs
# (note that _cplxreal only gives us one element of each complex pair):
z = np.concatenate(_cplxreal(z))
p = np.concatenate(_cplxreal(p))
p_sos = np.zeros((n_sections, 2), np.complex128)
z_sos = np.zeros_like(p_sos)
for si in range(n_sections):
# Select the next "worst" pole
p1_idx = np.argmin(np.abs(1 - np.abs(p)))
p1 = p[p1_idx]
p = np.delete(p, p1_idx)
# Pair that pole with a zero
if np.isreal(p1) and np.isreal(p).sum() == 0:
# Special case to set a first-order section
z1_idx = _nearest_real_complex_idx(z, p1, 'real')
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
p2 = z2 = 0
else:
if not np.isreal(p1) and np.isreal(z).sum() == 1:
# Special case to ensure we choose a complex zero to pair
# with so later (setting up a first-order section)
z1_idx = _nearest_real_complex_idx(z, p1, 'complex')
assert not np.isreal(z[z1_idx])
else:
# Pair the pole with the closest zero (real or complex)
z1_idx = np.argmin(np.abs(p1 - z))
z1 = z[z1_idx]
z = np.delete(z, z1_idx)
# Now that we have p1 and z1, figure out what p2 and z2 need to be
if not np.isreal(p1):
if not np.isreal(z1): # complex pole, complex zero
p2 = p1.conj()
z2 = z1.conj()
else: # complex pole, real zero
p2 = p1.conj()
z2_idx = _nearest_real_complex_idx(z, p1, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
else:
if not np.isreal(z1): # real pole, complex zero
z2 = z1.conj()
p2_idx = _nearest_real_complex_idx(p, z1, 'real')
p2 = p[p2_idx]
assert np.isreal(p2)
else: # real pole, real zero
# pick the next "worst" pole to use
idx = np.where(np.isreal(p))[0]
assert len(idx) > 0
p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))]
p2 = p[p2_idx]
# find a real zero to match the added pole
assert np.isreal(p2)
z2_idx = _nearest_real_complex_idx(z, p2, 'real')
z2 = z[z2_idx]
assert np.isreal(z2)
z = np.delete(z, z2_idx)
p = np.delete(p, p2_idx)
p_sos[si] = [p1, p2]
z_sos[si] = [z1, z2]
assert len(p) == len(z) == 0 # we've consumed all poles and zeros
del p, z
# Construct the system, reversing order so the "worst" are last
p_sos = np.reshape(p_sos[::-1], (n_sections, 2))
z_sos = np.reshape(z_sos[::-1], (n_sections, 2))
gains = np.ones(n_sections)
gains[0] = k
for si in range(n_sections):
x = zpk2tf(z_sos[si], p_sos[si], gains[si])
sos[si] = np.concatenate(x)
return sos
def normalize(b, a):
"""Normalize polynomial representation of a transfer function.
If values of `b` are too close to 0, they are removed. In that case, a
BadCoefficients warning is emitted.
"""
b, a = map(atleast_1d, (b, a))
if len(a.shape) != 1:
raise ValueError("Denominator polynomial must be rank-1 array.")
if len(b.shape) > 2:
raise ValueError("Numerator polynomial must be rank-1 or"
" rank-2 array.")
if len(b.shape) == 1:
b = asarray([b], b.dtype.char)
while a[0] == 0.0 and len(a) > 1:
a = a[1:]
outb = b * (1.0) / a[0]
outa = a * (1.0) / a[0]
if allclose(0, outb[:, 0], atol=1e-14):
warnings.warn("Badly conditioned filter coefficients (numerator): the "
"results may be meaningless", BadCoefficients)
while allclose(0, outb[:, 0], atol=1e-14) and (outb.shape[-1] > 1):
outb = outb[:, 1:]
if outb.shape[0] == 1:
outb = outb[0]
return outb, outa
def lp2lp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
M = max((d, n))
pwo = pow(wo, numpy.arange(M - 1, -1, -1))
start1 = max((n - d, 0))
start2 = max((d - n, 0))
b = b * pwo[start1] / pwo[start2:]
a = a * pwo[start1] / pwo[start1:]
return normalize(b, a)
def lp2hp(b, a, wo=1.0):
"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency, in
transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
try:
wo = float(wo)
except TypeError:
wo = float(wo[0])
d = len(a)
n = len(b)
if wo != 1:
pwo = pow(wo, numpy.arange(max((d, n))))
else:
pwo = numpy.ones(max((d, n)), b.dtype.char)
if d >= n:
outa = a[::-1] * pwo
outb = resize(b, (d,))
outb[n:] = 0.0
outb[:n] = b[::-1] * pwo[:n]
else:
outb = b[::-1] * pwo
outa = resize(a, (n,))
outa[d:] = 0.0
outa[:d] = a[::-1] * pwo[:d]
return normalize(outb, outa)
def lp2bp(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
ma = max([N, D])
Np = N + ma
Dp = D + ma
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, i + 1):
if ma - i + 2 * k == j:
val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i
aprime[Dp - j] = val
return normalize(bprime, aprime)
def lp2bs(b, a, wo=1.0, bw=1.0):
"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, in transfer function ('ba') representation.
"""
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = mintypecode((a, b))
M = max([N, D])
Np = M + M
Dp = M + M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
wosq = wo * wo
for j in range(Np + 1):
val = 0.0
for i in range(0, N + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * b[N - i] *
(wosq) ** (M - i - k) * bw ** i)
bprime[Np - j] = val
for j in range(Dp + 1):
val = 0.0
for i in range(0, D + 1):
for k in range(0, M - i + 1):
if i + 2 * k == j:
val += (comb(M - i, k) * a[D - i] *
(wosq) ** (M - i - k) * bw ** i)
aprime[Dp - j] = val
return normalize(bprime, aprime)
def bilinear(b, a, fs=1.0):
"""Return a digital filter from an analog one using a bilinear transform.
The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``.
"""
fs = float(fs)
a, b = map(atleast_1d, (a, b))
D = len(a) - 1
N = len(b) - 1
artype = float
M = max([N, D])
Np = M
Dp = M
bprime = numpy.zeros(Np + 1, artype)
aprime = numpy.zeros(Dp + 1, artype)
for j in range(Np + 1):
val = 0.0
for i in range(N + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * b[N - i] *
pow(2 * fs, i) * (-1) ** k)
bprime[j] = real(val)
for j in range(Dp + 1):
val = 0.0
for i in range(D + 1):
for k in range(i + 1):
for l in range(M - i + 1):
if k + l == j:
val += (comb(i, k) * comb(M - i, l) * a[D - i] *
pow(2 * fs, i) * (-1) ** k)
aprime[j] = real(val)
return normalize(bprime, aprime)
def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'):
"""Complete IIR digital and analog filter design.
Given passband and stopband frequencies and gains, construct an analog or
digital IIR filter of minimum order for a given basic type. Return the
output in numerator, denominator ('ba'), pole-zero ('zpk') or second order
sections ('sos') form.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
"""
try:
ordfunc = filter_dict[ftype][1]
except KeyError:
raise ValueError("Invalid IIR filter type: %s" % ftype)
except IndexError:
raise ValueError(("%s does not have order selection. Use "
"iirfilter function.") % ftype)
wp = atleast_1d(wp)
ws = atleast_1d(ws)
band_type = 2 * (len(wp) - 1)
band_type += 1
if wp[0] >= ws[0]:
band_type += 1
btype = {1: 'lowpass', 2: 'highpass',
3: 'bandstop', 4: 'bandpass'}[band_type]
N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog)
return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype,
ftype=ftype, output=output)
def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False,
ftype='butter', output='ba'):
"""
IIR digital and analog filter design given order and critical points.
Design an Nth order digital or analog filter and return the filter
coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
rp : float, optional
For Chebyshev and elliptic filters, provides the maximum ripple
in the passband. (dB)
rs : float, optional
For Chebyshev and elliptic filters, provides the minimum attenuation
in the stop band. (dB)
btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional
The type of filter. Default is 'bandpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
ftype : str, optional
The type of IIR filter to design:
- Butterworth : 'butter'
- Chebyshev I : 'cheby1'
- Chebyshev II : 'cheby2'
- Cauer/elliptic: 'ellip'
- Bessel/Thomson: 'bessel'
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
butter : Filter design using order and critical points
cheby1, cheby2, ellip, bessel
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord, ellipord
iirdesign : General filter design using passband and stopband spec
Notes
-----
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Generate a 17th-order Chebyshev II bandpass filter and plot the frequency
response:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band',
... analog=True, ftype='cheby2')
>>> w, h = signal.freqs(b, a, 1000)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.semilogx(w, 20 * np.log10(abs(h)))
>>> ax.set_title('Chebyshev Type II bandpass frequency response')
>>> ax.set_xlabel('Frequency [radians / second]')
>>> ax.set_ylabel('Amplitude [dB]')
>>> ax.axis((10, 1000, -100, 10))
>>> ax.grid(which='both', axis='both')
>>> plt.show()
"""
ftype, btype, output = [x.lower() for x in (ftype, btype, output)]
Wn = asarray(Wn)
try:
btype = band_dict[btype]
except KeyError:
raise ValueError("'%s' is an invalid bandtype for filter." % btype)
try:
typefunc = filter_dict[ftype][0]
except KeyError:
raise ValueError("'%s' is not a valid basic IIR filter." % ftype)
if output not in ['ba', 'zpk', 'sos']:
raise ValueError("'%s' is not a valid output form." % output)
if rp is not None and rp < 0:
raise ValueError("passband ripple (rp) must be positive")
if rs is not None and rs < 0:
raise ValueError("stopband attenuation (rs) must be positive")
# Get analog lowpass prototype
if typefunc in [buttap, besselap]:
z, p, k = typefunc(N)
elif typefunc == cheb1ap:
if rp is None:
raise ValueError("passband ripple (rp) must be provided to "
"design a Chebyshev I filter.")
z, p, k = typefunc(N, rp)
elif typefunc == cheb2ap:
if rs is None:
raise ValueError("stopband attenuation (rs) must be provided to "
"design an Chebyshev II filter.")
z, p, k = typefunc(N, rs)
elif typefunc == ellipap:
if rs is None or rp is None:
raise ValueError("Both rp and rs must be provided to design an "
"elliptic filter.")
z, p, k = typefunc(N, rp, rs)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % ftype)
# Pre-warp frequencies for digital filter design
if not analog:
if numpy.any(Wn < 0) or numpy.any(Wn > 1):
raise ValueError("Digital filter critical frequencies "
"must be 0 <= Wn <= 1")
fs = 2.0
warped = 2 * fs * tan(pi * Wn / fs)
else:
warped = Wn
# transform to lowpass, bandpass, highpass, or bandstop
if btype in ('lowpass', 'highpass'):
if numpy.size(Wn) != 1:
raise ValueError('Must specify a single critical frequency Wn')
if btype == 'lowpass':
z, p, k = _zpklp2lp(z, p, k, wo=warped)
elif btype == 'highpass':
z, p, k = _zpklp2hp(z, p, k, wo=warped)
elif btype in ('bandpass', 'bandstop'):
try:
bw = warped[1] - warped[0]
wo = sqrt(warped[0] * warped[1])
except IndexError:
raise ValueError('Wn must specify start and stop frequencies')
if btype == 'bandpass':
z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw)
elif btype == 'bandstop':
z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw)
else:
raise NotImplementedError("'%s' not implemented in iirfilter." % btype)
# Find discrete equivalent if necessary
if not analog:
z, p, k = _zpkbilinear(z, p, k, fs=fs)
# Transform to proper out type (pole-zero, state-space, numer-denom)
if output == 'zpk':
return z, p, k
elif output == 'ba':
return zpk2tf(z, p, k)
elif output == 'sos':
return zpk2sos(z, p, k)
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
else:
return degree
# TODO: merge these into existing functions or make public versions
def _zpkbilinear(z, p, k, fs):
"""
Return a digital filter from an analog one using a bilinear transform.
Transform a set of poles and zeros from the analog s-plane to the digital
z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for
``s``, maintaining the shape of the frequency response.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
fs : float
Sample rate, as ordinary frequency (e.g. hertz). No prewarping is
done in this function.
Returns
-------
z : ndarray
Zeros of the transformed digital filter transfer function.
p : ndarray
Poles of the transformed digital filter transfer function.
k : float
System gain of the transformed digital filter.
"""
z = atleast_1d(z)
p = atleast_1d(p)
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = (fs2 + z) / (fs2 - z)
p_z = (fs2 + p) / (fs2 - p)
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z = append(z_z, -ones(degree))
# Compensate for gain change
k_z = k * real(prod(fs2 - z) / prod(fs2 - p))
return z_z, p_z, k_z
def _zpklp2lp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a different frequency.
Return an analog low-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed low-pass filter transfer function.
p : ndarray
Poles of the transformed low-pass filter transfer function.
k : float
System gain of the transformed low-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s}{\omega_0}
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo) # Avoid int wraparound
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = wo * z
p_lp = wo * p
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _zpklp2hp(z, p, k, wo=1.0):
r"""
Transform a lowpass filter prototype to a highpass filter.
Return an analog high-pass filter with cutoff frequency `wo`
from an analog low-pass filter prototype with unity cutoff frequency,
using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired cutoff, as angular frequency (e.g. rad/s).
Defaults to no change.
Returns
-------
z : ndarray
Zeros of the transformed high-pass filter transfer function.
p : ndarray
Poles of the transformed high-pass filter transfer function.
k : float
System gain of the transformed high-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{\omega_0}{s}
This maintains symmetry of the lowpass and highpass responses on a
logarithmic scale.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
degree = _relative_degree(z, p)
# Invert positions radially about unit circle to convert LPF to HPF
# Scale all points radially from origin to shift cutoff frequency
z_hp = wo / z
p_hp = wo / p
# If lowpass had zeros at infinity, inverting moves them to origin.
z_hp = append(z_hp, zeros(degree))
# Cancel out gain change caused by inversion
k_hp = k * real(prod(-z) / prod(-p))
return z_hp, p_hp, k_hp
def _zpklp2bp(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandpass filter.
Return an analog band-pass filter with center frequency `wo` and
bandwidth `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired passband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired passband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-pass filter transfer function.
p : ndarray
Poles of the transformed band-pass filter transfer function.
k : float
System gain of the transformed band-pass filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}}
This is the "wideband" transformation, producing a passband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Scale poles and zeros to desired bandwidth
z_lp = z * bw/2
p_lp = p * bw/2
# Square root needs to produce complex result, not NaN
z_lp = z_lp.astype(complex)
p_lp = p_lp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2),
z_lp - sqrt(z_lp**2 - wo**2)))
p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2),
p_lp - sqrt(p_lp**2 - wo**2)))
# Move degree zeros to origin, leaving degree zeros at infinity for BPF
z_bp = append(z_bp, zeros(degree))
# Cancel out gain change from frequency scaling
k_bp = k * bw**degree
return z_bp, p_bp, k_bp
def _zpklp2bs(z, p, k, wo=1.0, bw=1.0):
r"""
Transform a lowpass filter prototype to a bandstop filter.
Return an analog band-stop filter with center frequency `wo` and
stopband width `bw` from an analog low-pass filter prototype with unity
cutoff frequency, using zeros, poles, and gain ('zpk') representation.
Parameters
----------
z : array_like
Zeros of the analog IIR filter transfer function.
p : array_like
Poles of the analog IIR filter transfer function.
k : float
System gain of the analog IIR filter transfer function.
wo : float
Desired stopband center, as angular frequency (e.g. rad/s).
Defaults to no change.
bw : float
Desired stopband width, as angular frequency (e.g. rad/s).
Defaults to 1.
Returns
-------
z : ndarray
Zeros of the transformed band-stop filter transfer function.
p : ndarray
Poles of the transformed band-stop filter transfer function.
k : float
System gain of the transformed band-stop filter.
Notes
-----
This is derived from the s-plane substitution
.. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2}
This is the "wideband" transformation, producing a stopband with
geometric (log frequency) symmetry about `wo`.
"""
z = atleast_1d(z)
p = atleast_1d(p)
wo = float(wo)
bw = float(bw)
degree = _relative_degree(z, p)
# Invert to a highpass filter with desired bandwidth
z_hp = (bw/2) / z
p_hp = (bw/2) / p
# Square root needs to produce complex result, not NaN
z_hp = z_hp.astype(complex)
p_hp = p_hp.astype(complex)
# Duplicate poles and zeros and shift from baseband to +wo and -wo
z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2),
z_hp - sqrt(z_hp**2 - wo**2)))
p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2),
p_hp - sqrt(p_hp**2 - wo**2)))
# Move any zeros that were at infinity to the center of the stopband
z_bs = append(z_bs, +1j*wo * ones(degree))
z_bs = append(z_bs, -1j*wo * ones(degree))
# Cancel out gain change caused by inversion
k_bs = k * real(prod(-z) / prod(-p))
return z_bs, p_bs, k_bs
def butter(N, Wn, btype='low', analog=False, output='ba'):
"""
Butterworth digital and analog filter design.
Design an Nth order digital or analog Butterworth filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Butterworth filter, this is the point at which the gain
drops to 1/sqrt(2) that of the passband (the "-3 dB point").
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
buttord
Notes
-----
The Butterworth filter has maximally flat frequency response in the
passband.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='butter')
def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type I digital and analog filter design.
Design an Nth order digital or analog Chebyshev type I filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type I filters, this is the point in the transition band at which
the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb1ord
Notes
-----
The Chebyshev type I filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the passband and increased ringing in the step response.
Type I filters roll off faster than Type II (`cheby2`), but Type II
filters do not have any ripple in the passband.
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type I frequency response (rp=5)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog,
output=output, ftype='cheby1')
def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'):
"""
Chebyshev type II digital and analog filter design.
Design an Nth order digital or analog Chebyshev type II filter and
return the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For Type II filters, this is the point in the transition band at which
the gain first reaches -`rs`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
cheb2ord
Notes
-----
The Chebyshev type II filter maximizes the rate of cutoff between the
frequency response's passband and stopband, at the expense of ripple in
the stopband and increased ringing in the step response.
Type II filters do not roll off as fast as Type I (`cheby1`).
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev Type II frequency response (rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog,
output=output, ftype='cheby2')
def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'):
"""
Elliptic (Cauer) digital and analog filter design.
Design an Nth order digital or analog elliptic filter and return
the filter coefficients.
Parameters
----------
N : int
The order of the filter.
rp : float
The maximum ripple allowed below unity gain in the passband.
Specified in decibels, as a positive number.
rs : float
The minimum attenuation required in the stop band.
Specified in decibels, as a positive number.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For elliptic filters, this is the point in the transition band at
which the gain first drops below -`rp`.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
See Also
--------
ellipord
Notes
-----
Also known as Cauer or Zolotarev filters, the elliptical filter maximizes
the rate of transition between the frequency response's passband and
stopband, at the expense of ripple in both, and increased ringing in the
step response.
As `rp` approaches 0, the elliptical filter becomes a Chebyshev
type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev
type I filter (`cheby1`). As both approach 0, it becomes a Butterworth
filter (`butter`).
The equiripple passband has N maxima or minima (for example, a
5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is
unity for odd-order filters, or -rp dB for even-order filters.
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the critical points:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptic filter frequency response (rp=5, rs=40)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.axhline(-40, color='green') # rs
>>> plt.axhline(-5, color='green') # rp
>>> plt.show()
"""
return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog,
output=output, ftype='elliptic')
def bessel(N, Wn, btype='low', analog=False, output='ba'):
"""Bessel/Thomson digital and analog filter design.
Design an Nth order digital or analog Bessel filter and return the
filter coefficients.
Parameters
----------
N : int
The order of the filter.
Wn : array_like
A scalar or length-2 sequence giving the critical frequencies.
For a Bessel filter, this is defined as the point at which the
asymptotes of the response are the same as a Butterworth filter of
the same order.
For digital filters, `Wn` is normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`Wn` is thus in
half-cycles / sample.)
For analog filters, `Wn` is an angular frequency (e.g. rad/s).
btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional
The type of filter. Default is 'lowpass'.
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
output : {'ba', 'zpk', 'sos'}, optional
Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or
second-order sections ('sos'). Default is 'ba'.
Returns
-------
b, a : ndarray, ndarray
Numerator (`b`) and denominator (`a`) polynomials of the IIR filter.
Only returned if ``output='ba'``.
z, p, k : ndarray, ndarray, float
Zeros, poles, and system gain of the IIR filter transfer
function. Only returned if ``output='zpk'``.
sos : ndarray
Second-order sections representation of the IIR filter.
Only returned if ``output=='sos'``.
Notes
-----
Also known as a Thomson filter, the analog Bessel filter has maximally
flat group delay and maximally linear phase response, with very little
ringing in the step response.
As order increases, the Bessel filter approaches a Gaussian filter.
The digital Bessel filter is generated using the bilinear
transform, which does not preserve the phase response of the analog
filter. As such, it is only approximately correct at frequencies
below about fs/4. To get maximally flat group delay at higher
frequencies, the analog Bessel filter must be transformed using
phase-preserving techniques.
For a given `Wn`, the lowpass and highpass filter have the same phase vs
frequency curves; they are "phase-matched".
The ``'sos'`` output parameter was added in 0.16.0.
Examples
--------
Plot the filter's frequency response, showing the flat group delay and
the relationship to the Butterworth's cutoff frequency:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.plot(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed')
>>> b, a = signal.bessel(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(np.abs(h)))
>>> plt.title('Bessel filter frequency response (with Butterworth)')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show()
>>> plt.figure()
>>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w))
>>> plt.title('Bessel filter group delay')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Group delay [seconds]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.show()
"""
return iirfilter(N, Wn, btype=btype, analog=analog,
output=output, ftype='bessel')
def maxflat():
pass
def yulewalk():
pass
def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type):
"""
Band Stop Objective Function for order minimization.
Returns the non-integer order for an analog band stop filter.
Parameters
----------
wp : scalar
Edge of passband `passb`.
ind : int, {0, 1}
Index specifying which `passb` edge to vary (0 or 1).
passb : ndarray
Two element sequence of fixed passband edges.
stopb : ndarray
Two element sequence of fixed stopband edges.
gstop : float
Amount of attenuation in stopband in dB.
gpass : float
Amount of ripple in the passband in dB.
type : {'butter', 'cheby', 'ellip'}
Type of filter.
Returns
-------
n : scalar
Filter order (possibly non-integer).
"""
passbC = passb.copy()
passbC[ind] = wp
nat = (stopb * (passbC[0] - passbC[1]) /
(stopb ** 2 - passbC[0] * passbC[1]))
nat = min(abs(nat))
if type == 'butter':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))
elif type == 'cheby':
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat)
elif type == 'ellip':
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
n = (d0[0] * d1[1] / (d0[1] * d1[0]))
else:
raise ValueError("Incorrect type: %s" % type)
return n
def buttord(wp, ws, gpass, gstop, analog=False):
"""Butterworth filter order selection.
Return the order of the lowest order digital or analog Butterworth filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Butterworth filter which meets specs.
wn : ndarray or float
The Butterworth natural frequency (i.e. the "3dB frequency"). Should
be used with `butter` to give filter results.
See Also
--------
butter : Filter design using order and critical points
cheb1ord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog bandpass filter with passband within 3 dB from 20 to
50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s.
Plot its frequency response, showing the passband and stopband
constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
>>> b, a = signal.butter(N, Wn, 'band', True)
>>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth bandpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
>>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
>>> plt.axis([10, 100, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop,
'butter'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))))
# Find the Butterworth natural frequency WN (or the "3dB" frequency")
# to give exactly gpass at passb.
try:
W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord))
except ZeroDivisionError:
W0 = 1.0
print("Warning, order is zero...check input parameters.")
# now convert this frequency back from lowpass prototype
# to the original analog filter
if filter_type == 1: # low
WN = W0 * passb
elif filter_type == 2: # high
WN = passb / W0
elif filter_type == 3: # stop
WN = numpy.zeros(2, float)
discr = sqrt((passb[1] - passb[0]) ** 2 +
4 * W0 ** 2 * passb[0] * passb[1])
WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0)
WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0)
WN = numpy.sort(abs(WN))
elif filter_type == 4: # pass
W0 = numpy.array([-W0, W0], float)
WN = (-W0 * (passb[1] - passb[0]) / 2.0 +
sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 +
passb[0] * passb[1]))
WN = numpy.sort(abs(WN))
else:
raise ValueError("Bad type: %s" % filter_type)
if not analog:
wn = (2.0 / pi) * arctan(WN)
else:
wn = WN
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb1ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type I filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type I
filter that loses no more than `gpass` dB in the passband and has at
least `gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type I filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby1` to give filter results.
See Also
--------
cheby1 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb2ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital lowpass filter such that the passband is within 3 dB up
to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40)
>>> b, a = signal.cheby1(N, 3, Wn, 'low')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev I lowpass filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass
>>> plt.axis([0.08, 1, -60, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Natural frequencies are just the passband edges
if not analog:
wn = (2.0 / pi) * arctan(passb)
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def cheb2ord(wp, ws, gpass, gstop, analog=False):
"""Chebyshev type II filter order selection.
Return the order of the lowest order digital or analog Chebyshev Type II
filter that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for a Chebyshev type II filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`cheby2` to give filter results.
See Also
--------
cheby2 : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, ellipord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to
0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above
0.6*(fs/2). Plot its frequency response, showing the passband and
stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60)
>>> b, a = signal.cheby2(N, 60, Wn, 'stop')
>>> w, h = signal.freqz(b, a)
>>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h)))
>>> plt.title('Chebyshev II bandstop filter fit to constraints')
>>> plt.xlabel('Normalized frequency')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop
>>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass
>>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop
>>> plt.axis([0.06, 1, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
if wp[0] < ws[0]:
filter_type += 1
else:
filter_type += 2
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'cheby'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * abs(gstop))
GPASS = 10 ** (0.1 * abs(gpass))
ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) /
arccosh(nat)))
# Find frequency where analog response is -gpass dB.
# Then convert back from low-pass prototype to the original filter.
new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))))
new_freq = 1.0 / new_freq
if filter_type == 1:
nat = passb / new_freq
elif filter_type == 2:
nat = passb * new_freq
elif filter_type == 3:
nat = numpy.zeros(2, float)
nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) +
sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 +
passb[1] * passb[0]))
nat[1] = passb[1] * passb[0] / nat[0]
elif filter_type == 4:
nat = numpy.zeros(2, float)
nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) +
sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) +
passb[1] * passb[0]))
nat[1] = passb[0] * passb[1] / nat[0]
if not analog:
wn = (2.0 / pi) * arctan(nat)
else:
wn = nat
if len(wn) == 1:
wn = wn[0]
return ord, wn
def ellipord(wp, ws, gpass, gstop, analog=False):
"""Elliptic (Cauer) filter order selection.
Return the order of the lowest order digital or analog elliptic filter
that loses no more than `gpass` dB in the passband and has at least
`gstop` dB attenuation in the stopband.
Parameters
----------
wp, ws : float
Passband and stopband edge frequencies.
For digital filters, these are normalized from 0 to 1, where 1 is the
Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in
half-cycles / sample.) For example:
- Lowpass: wp = 0.2, ws = 0.3
- Highpass: wp = 0.3, ws = 0.2
- Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6]
- Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5]
For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s).
gpass : float
The maximum loss in the passband (dB).
gstop : float
The minimum attenuation in the stopband (dB).
analog : bool, optional
When True, return an analog filter, otherwise a digital filter is
returned.
Returns
-------
ord : int
The lowest order for an Elliptic (Cauer) filter that meets specs.
wn : ndarray or float
The Chebyshev natural frequency (the "3dB frequency") for use with
`ellip` to give filter results.
See Also
--------
ellip : Filter design using order and critical points
buttord : Find order and critical points from passband and stopband spec
cheb1ord, cheb2ord
iirfilter : General filter design using order and critical frequencies
iirdesign : General filter design using passband and stopband spec
Examples
--------
Design an analog highpass filter such that the passband is within 3 dB
above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its
frequency response, showing the passband and stopband constraints in gray.
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> N, Wn = signal.ellipord(30, 10, 3, 60, True)
>>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True)
>>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500))
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Elliptical highpass filter fit to constraints')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.grid(which='both', axis='both')
>>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop
>>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass
>>> plt.axis([1, 300, -80, 3])
>>> plt.show()
"""
wp = atleast_1d(wp)
ws = atleast_1d(ws)
filter_type = 2 * (len(wp) - 1)
filter_type += 1
if wp[0] >= ws[0]:
filter_type += 1
# Pre-warp frequencies for digital filter design
if not analog:
passb = tan(pi * wp / 2.0)
stopb = tan(pi * ws / 2.0)
else:
passb = wp * 1.0
stopb = ws * 1.0
if filter_type == 1: # low
nat = stopb / passb
elif filter_type == 2: # high
nat = passb / stopb
elif filter_type == 3: # stop
wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12,
args=(0, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[0] = wp0
wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1],
args=(1, passb, stopb, gpass, gstop, 'ellip'),
disp=0)
passb[1] = wp1
nat = ((stopb * (passb[0] - passb[1])) /
(stopb ** 2 - passb[0] * passb[1]))
elif filter_type == 4: # pass
nat = ((stopb ** 2 - passb[0] * passb[1]) /
(stopb * (passb[0] - passb[1])))
nat = min(abs(nat))
GSTOP = 10 ** (0.1 * gstop)
GPASS = 10 ** (0.1 * gpass)
arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0))
arg0 = 1.0 / nat
d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2])
d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2])
ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0])))
if not analog:
wn = arctan(passb) * 2.0 / pi
else:
wn = passb
if len(wn) == 1:
wn = wn[0]
return ord, wn
def buttap(N):
"""Return (z,p,k) for analog prototype of Nth order Butterworth filter.
The filter will have an angular (e.g. rad/s) cutoff frequency of 1.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
z = numpy.array([])
m = numpy.arange(-N+1, N, 2)
# Middle value is 0 to ensure an exactly real pole
p = -numpy.exp(1j * pi * m / (2 * N))
k = 1
return z, p, k
def cheb1ap(N, rp):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rp` decibels of ripple in the passband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero error
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
z = numpy.array([])
# Ripple factor (epsilon)
eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0)
mu = 1.0 / N * arcsinh(1 / eps)
# Arrange poles in an ellipse on the left half of the S-plane
m = numpy.arange(-N+1, N, 2)
theta = pi * m / (2*N)
p = -sinh(mu + 1j*theta)
k = numpy.prod(-p, axis=0).real
if N % 2 == 0:
k = k / sqrt((1 + eps * eps))
return z, p, k
def cheb2ap(N, rs):
"""
Return (z,p,k) for Nth order Chebyshev type I analog lowpass filter.
The returned filter prototype has `rs` decibels of ripple in the stopband.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first reaches ``-rs``.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
return numpy.array([]), numpy.array([]), 1
# Ripple factor (epsilon)
de = 1.0 / sqrt(10 ** (0.1 * rs) - 1)
mu = arcsinh(1.0 / de) / N
if N % 2:
m = numpy.concatenate((numpy.arange(-N+1, 0, 2),
numpy.arange(2, N, 2)))
else:
m = numpy.arange(-N+1, N, 2)
z = -conjugate(1j / sin(m * pi / (2.0 * N)))
# Poles around the unit circle like Butterworth
p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N))
# Warp into Chebyshev II
p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag
p = 1.0 / p
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
return z, p, k
EPSILON = 2e-16
def _vratio(u, ineps, mp):
[s, c, d, phi] = special.ellipj(u, mp)
ret = abs(ineps - s / c)
return ret
def _kratio(m, k_ratio):
m = float(m)
if m < 0:
m = 0.0
if m > 1:
m = 1.0
if abs(m) > EPSILON and (abs(m) + EPSILON) < 1:
k = special.ellipk([m, 1 - m])
r = k[0] / k[1] - k_ratio
elif abs(m) > EPSILON:
r = -k_ratio
else:
r = 1e20
return abs(r)
def ellipap(N, rp, rs):
"""Return (z,p,k) of Nth order elliptic analog lowpass filter.
The filter is a normalized prototype that has `rp` decibels of ripple
in the passband and a stopband `rs` decibels down.
The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1,
defined as the point at which the gain first drops below ``-rp``.
References
----------
Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5
and 12.
"""
if abs(int(N)) != N:
raise ValueError("Filter order must be a nonnegative integer")
elif N == 0:
# Avoid divide-by-zero warning
# Even order filters have DC gain of -rp dB
return numpy.array([]), numpy.array([]), 10**(-rp/20)
elif N == 1:
p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0))
k = -p
z = []
return asarray(z), asarray(p), k
eps = numpy.sqrt(10 ** (0.1 * rp) - 1)
ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1)
ck1p = numpy.sqrt(1 - ck1 * ck1)
if ck1p == 1:
raise ValueError("Cannot design a filter with given rp and rs"
" specifications.")
val = special.ellipk([ck1 * ck1, ck1p * ck1p])
if abs(1 - ck1p * ck1p) < EPSILON:
krat = 0
else:
krat = N * val[0] / val[1]
m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250,
disp=0)
if m < 0 or m > 1:
m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250,
maxiter=250, disp=0)
capk = special.ellipk(m)
j = numpy.arange(1 - N % 2, N, 2)
jj = len(j)
[s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj))
snew = numpy.compress(abs(s) > EPSILON, s, axis=-1)
z = 1.0 / (sqrt(m) * snew)
z = 1j * z
z = numpy.concatenate((z, conjugate(z)))
r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p),
maxfun=250, maxiter=250, disp=0)
v0 = capk * r / (N * val[0])
[sv, cv, dv, phi] = special.ellipj(v0, 1 - m)
p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0)
if N % 2:
newp = numpy.compress(abs(p.imag) > EPSILON *
numpy.sqrt(numpy.sum(p * numpy.conjugate(p),
axis=0).real),
p, axis=-1)
p = numpy.concatenate((p, conjugate(newp)))
else:
p = numpy.concatenate((p, conjugate(p)))
k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real
if N % 2 == 0:
k = k / numpy.sqrt((1 + eps * eps))
return z, p, k
def besselap(N):
"""Return (z,p,k) for analog prototype of an Nth order Bessel filter.
The filter is normalized such that the filter asymptotes are the same as
a Butterworth filter of the same order with an angular (e.g. rad/s)
cutoff frequency of 1.
Parameters
----------
N : int
The order of the Bessel filter to return zeros, poles and gain for.
Values in the range 0-25 are supported.
Returns
-------
z : ndarray
Zeros. Is always an empty array.
p : ndarray
Poles.
k : scalar
Gain. Always 1.
"""
z = []
k = 1
if N == 0:
p = []
elif N == 1:
p = [-1]
elif N == 2:
p = [-.8660254037844386467637229 + .4999999999999999999999996j,
-.8660254037844386467637229 - .4999999999999999999999996j]
elif N == 3:
p = [-.9416000265332067855971980,
-.7456403858480766441810907 - .7113666249728352680992154j,
-.7456403858480766441810907 + .7113666249728352680992154j]
elif N == 4:
p = [-.6572111716718829545787781 - .8301614350048733772399715j,
-.6572111716718829545787788 + .8301614350048733772399715j,
-.9047587967882449459642637 - .2709187330038746636700923j,
-.9047587967882449459642624 + .2709187330038746636700926j]
elif N == 5:
p = [-.9264420773877602247196260,
-.8515536193688395541722677 - .4427174639443327209850002j,
-.8515536193688395541722677 + .4427174639443327209850002j,
-.5905759446119191779319432 - .9072067564574549539291747j,
-.5905759446119191779319432 + .9072067564574549539291747j]
elif N == 6:
p = [-.9093906830472271808050953 - .1856964396793046769246397j,
-.9093906830472271808050953 + .1856964396793046769246397j,
-.7996541858328288520243325 - .5621717346937317988594118j,
-.7996541858328288520243325 + .5621717346937317988594118j,
-.5385526816693109683073792 - .9616876881954277199245657j,
-.5385526816693109683073792 + .9616876881954277199245657j]
elif N == 7:
p = [-.9194871556490290014311619,
-.8800029341523374639772340 - .3216652762307739398381830j,
-.8800029341523374639772340 + .3216652762307739398381830j,
-.7527355434093214462291616 - .6504696305522550699212995j,
-.7527355434093214462291616 + .6504696305522550699212995j,
-.4966917256672316755024763 - 1.002508508454420401230220j,
-.4966917256672316755024763 + 1.002508508454420401230220j]
elif N == 8:
p = [-.9096831546652910216327629 - .1412437976671422927888150j,
-.9096831546652910216327629 + .1412437976671422927888150j,
-.8473250802359334320103023 - .4259017538272934994996429j,
-.8473250802359334320103023 + .4259017538272934994996429j,
-.7111381808485399250796172 - .7186517314108401705762571j,
-.7111381808485399250796172 + .7186517314108401705762571j,
-.4621740412532122027072175 - 1.034388681126901058116589j,
-.4621740412532122027072175 + 1.034388681126901058116589j]
elif N == 9:
p = [-.9154957797499037686769223,
-.8911217017079759323183848 - .2526580934582164192308115j,
-.8911217017079759323183848 + .2526580934582164192308115j,
-.8148021112269012975514135 - .5085815689631499483745341j,
-.8148021112269012975514135 + .5085815689631499483745341j,
-.6743622686854761980403401 - .7730546212691183706919682j,
-.6743622686854761980403401 + .7730546212691183706919682j,
-.4331415561553618854685942 - 1.060073670135929666774323j,
-.4331415561553618854685942 + 1.060073670135929666774323j]
elif N == 10:
p = [-.9091347320900502436826431 - .1139583137335511169927714j,
-.9091347320900502436826431 + .1139583137335511169927714j,
-.8688459641284764527921864 - .3430008233766309973110589j,
-.8688459641284764527921864 + .3430008233766309973110589j,
-.7837694413101441082655890 - .5759147538499947070009852j,
-.7837694413101441082655890 + .5759147538499947070009852j,
-.6417513866988316136190854 - .8175836167191017226233947j,
-.6417513866988316136190854 + .8175836167191017226233947j,
-.4083220732868861566219785 - 1.081274842819124562037210j,
-.4083220732868861566219785 + 1.081274842819124562037210j]
elif N == 11:
p = [-.9129067244518981934637318,
-.8963656705721166099815744 - .2080480375071031919692341j,
-.8963656705721166099815744 + .2080480375071031919692341j,
-.8453044014712962954184557 - .4178696917801248292797448j,
-.8453044014712962954184557 + .4178696917801248292797448j,
-.7546938934722303128102142 - .6319150050721846494520941j,
-.7546938934722303128102142 + .6319150050721846494520941j,
-.6126871554915194054182909 - .8547813893314764631518509j,
-.6126871554915194054182909 + .8547813893314764631518509j,
-.3868149510055090879155425 - 1.099117466763120928733632j,
-.3868149510055090879155425 + 1.099117466763120928733632j]
elif N == 12:
p = [-.9084478234140682638817772 - 95506365213450398415258360.0e-27j,
-.9084478234140682638817772 + 95506365213450398415258360.0e-27j,
-.8802534342016826507901575 - .2871779503524226723615457j,
-.8802534342016826507901575 + .2871779503524226723615457j,
-.8217296939939077285792834 - .4810212115100676440620548j,
-.8217296939939077285792834 + .4810212115100676440620548j,
-.7276681615395159454547013 - .6792961178764694160048987j,
-.7276681615395159454547013 + .6792961178764694160048987j,
-.5866369321861477207528215 - .8863772751320727026622149j,
-.5866369321861477207528215 + .8863772751320727026622149j,
-.3679640085526312839425808 - 1.114373575641546257595657j,
-.3679640085526312839425808 + 1.114373575641546257595657j]
elif N == 13:
p = [-.9110914665984182781070663,
-.8991314665475196220910718 - .1768342956161043620980863j,
-.8991314665475196220910718 + .1768342956161043620980863j,
-.8625094198260548711573628 - .3547413731172988997754038j,
-.8625094198260548711573628 + .3547413731172988997754038j,
-.7987460692470972510394686 - .5350752120696801938272504j,
-.7987460692470972510394686 + .5350752120696801938272504j,
-.7026234675721275653944062 - .7199611890171304131266374j,
-.7026234675721275653944062 + .7199611890171304131266374j,
-.5631559842430199266325818 - .9135900338325109684927731j,
-.5631559842430199266325818 + .9135900338325109684927731j,
-.3512792323389821669401925 - 1.127591548317705678613239j,
-.3512792323389821669401925 + 1.127591548317705678613239j]
elif N == 14:
p = [-.9077932138396487614720659 - 82196399419401501888968130.0e-27j,
-.9077932138396487614720659 + 82196399419401501888968130.0e-27j,
-.8869506674916445312089167 - .2470079178765333183201435j,
-.8869506674916445312089167 + .2470079178765333183201435j,
-.8441199160909851197897667 - .4131653825102692595237260j,
-.8441199160909851197897667 + .4131653825102692595237260j,
-.7766591387063623897344648 - .5819170677377608590492434j,
-.7766591387063623897344648 + .5819170677377608590492434j,
-.6794256425119233117869491 - .7552857305042033418417492j,
-.6794256425119233117869491 + .7552857305042033418417492j,
-.5418766775112297376541293 - .9373043683516919569183099j,
-.5418766775112297376541293 + .9373043683516919569183099j,
-.3363868224902037330610040 - 1.139172297839859991370924j,
-.3363868224902037330610040 + 1.139172297839859991370924j]
elif N == 15:
p = [-.9097482363849064167228581,
-.9006981694176978324932918 - .1537681197278439351298882j,
-.9006981694176978324932918 + .1537681197278439351298882j,
-.8731264620834984978337843 - .3082352470564267657715883j,
-.8731264620834984978337843 + .3082352470564267657715883j,
-.8256631452587146506294553 - .4642348752734325631275134j,
-.8256631452587146506294553 + .4642348752734325631275134j,
-.7556027168970728127850416 - .6229396358758267198938604j,
-.7556027168970728127850416 + .6229396358758267198938604j,
-.6579196593110998676999362 - .7862895503722515897065645j,
-.6579196593110998676999362 + .7862895503722515897065645j,
-.5224954069658330616875186 - .9581787261092526478889345j,
-.5224954069658330616875186 + .9581787261092526478889345j,
-.3229963059766444287113517 - 1.149416154583629539665297j,
-.3229963059766444287113517 + 1.149416154583629539665297j]
elif N == 16:
p = [-.9072099595087001356491337 - 72142113041117326028823950.0e-27j,
-.9072099595087001356491337 + 72142113041117326028823950.0e-27j,
-.8911723070323647674780132 - .2167089659900576449410059j,
-.8911723070323647674780132 + .2167089659900576449410059j,
-.8584264231521330481755780 - .3621697271802065647661080j,
-.8584264231521330481755780 + .3621697271802065647661080j,
-.8074790293236003885306146 - .5092933751171800179676218j,
-.8074790293236003885306146 + .5092933751171800179676218j,
-.7356166304713115980927279 - .6591950877860393745845254j,
-.7356166304713115980927279 + .6591950877860393745845254j,
-.6379502514039066715773828 - .8137453537108761895522580j,
-.6379502514039066715773828 + .8137453537108761895522580j,
-.5047606444424766743309967 - .9767137477799090692947061j,
-.5047606444424766743309967 + .9767137477799090692947061j,
-.3108782755645387813283867 - 1.158552841199330479412225j,
-.3108782755645387813283867 + 1.158552841199330479412225j]
elif N == 17:
p = [-.9087141161336397432860029,
-.9016273850787285964692844 - .1360267995173024591237303j,
-.9016273850787285964692844 + .1360267995173024591237303j,
-.8801100704438627158492165 - .2725347156478803885651973j,
-.8801100704438627158492165 + .2725347156478803885651973j,
-.8433414495836129204455491 - .4100759282910021624185986j,
-.8433414495836129204455491 + .4100759282910021624185986j,
-.7897644147799708220288138 - .5493724405281088674296232j,
-.7897644147799708220288138 + .5493724405281088674296232j,
-.7166893842372349049842743 - .6914936286393609433305754j,
-.7166893842372349049842743 + .6914936286393609433305754j,
-.6193710717342144521602448 - .8382497252826992979368621j,
-.6193710717342144521602448 + .8382497252826992979368621j,
-.4884629337672704194973683 - .9932971956316781632345466j,
-.4884629337672704194973683 + .9932971956316781632345466j,
-.2998489459990082015466971 - 1.166761272925668786676672j,
-.2998489459990082015466971 + 1.166761272925668786676672j]
elif N == 18:
p = [-.9067004324162775554189031 - 64279241063930693839360680.0e-27j,
-.9067004324162775554189031 + 64279241063930693839360680.0e-27j,
-.8939764278132455733032155 - .1930374640894758606940586j,
-.8939764278132455733032155 + .1930374640894758606940586j,
-.8681095503628830078317207 - .3224204925163257604931634j,
-.8681095503628830078317207 + .3224204925163257604931634j,
-.8281885016242836608829018 - .4529385697815916950149364j,
-.8281885016242836608829018 + .4529385697815916950149364j,
-.7726285030739558780127746 - .5852778162086640620016316j,
-.7726285030739558780127746 + .5852778162086640620016316j,
-.6987821445005273020051878 - .7204696509726630531663123j,
-.6987821445005273020051878 + .7204696509726630531663123j,
-.6020482668090644386627299 - .8602708961893664447167418j,
-.6020482668090644386627299 + .8602708961893664447167418j,
-.4734268069916151511140032 - 1.008234300314801077034158j,
-.4734268069916151511140032 + 1.008234300314801077034158j,
-.2897592029880489845789953 - 1.174183010600059128532230j,
-.2897592029880489845789953 + 1.174183010600059128532230j]
elif N == 19:
p = [-.9078934217899404528985092,
-.9021937639390660668922536 - .1219568381872026517578164j,
-.9021937639390660668922536 + .1219568381872026517578164j,
-.8849290585034385274001112 - .2442590757549818229026280j,
-.8849290585034385274001112 + .2442590757549818229026280j,
-.8555768765618421591093993 - .3672925896399872304734923j,
-.8555768765618421591093993 + .3672925896399872304734923j,
-.8131725551578197705476160 - .4915365035562459055630005j,
-.8131725551578197705476160 + .4915365035562459055630005j,
-.7561260971541629355231897 - .6176483917970178919174173j,
-.7561260971541629355231897 + .6176483917970178919174173j,
-.6818424412912442033411634 - .7466272357947761283262338j,
-.6818424412912442033411634 + .7466272357947761283262338j,
-.5858613321217832644813602 - .8801817131014566284786759j,
-.5858613321217832644813602 + .8801817131014566284786759j,
-.4595043449730988600785456 - 1.021768776912671221830298j,
-.4595043449730988600785456 + 1.021768776912671221830298j,
-.2804866851439370027628724 - 1.180931628453291873626003j,
-.2804866851439370027628724 + 1.180931628453291873626003j]
elif N == 20:
p = [-.9062570115576771146523497 - 57961780277849516990208850.0e-27j,
-.9062570115576771146523497 + 57961780277849516990208850.0e-27j,
-.8959150941925768608568248 - .1740317175918705058595844j,
-.8959150941925768608568248 + .1740317175918705058595844j,
-.8749560316673332850673214 - .2905559296567908031706902j,
-.8749560316673332850673214 + .2905559296567908031706902j,
-.8427907479956670633544106 - .4078917326291934082132821j,
-.8427907479956670633544106 + .4078917326291934082132821j,
-.7984251191290606875799876 - .5264942388817132427317659j,
-.7984251191290606875799876 + .5264942388817132427317659j,
-.7402780309646768991232610 - .6469975237605228320268752j,
-.7402780309646768991232610 + .6469975237605228320268752j,
-.6658120544829934193890626 - .7703721701100763015154510j,
-.6658120544829934193890626 + .7703721701100763015154510j,
-.5707026806915714094398061 - .8982829066468255593407161j,
-.5707026806915714094398061 + .8982829066468255593407161j,
-.4465700698205149555701841 - 1.034097702560842962315411j,
-.4465700698205149555701841 + 1.034097702560842962315411j,
-.2719299580251652601727704 - 1.187099379810885886139638j,
-.2719299580251652601727704 + 1.187099379810885886139638j]
elif N == 21:
p = [-.9072262653142957028884077,
-.9025428073192696303995083 - .1105252572789856480992275j,
-.9025428073192696303995083 + .1105252572789856480992275j,
-.8883808106664449854431605 - .2213069215084350419975358j,
-.8883808106664449854431605 + .2213069215084350419975358j,
-.8643915813643204553970169 - .3326258512522187083009453j,
-.8643915813643204553970169 + .3326258512522187083009453j,
-.8299435470674444100273463 - .4448177739407956609694059j,
-.8299435470674444100273463 + .4448177739407956609694059j,
-.7840287980408341576100581 - .5583186348022854707564856j,
-.7840287980408341576100581 + .5583186348022854707564856j,
-.7250839687106612822281339 - .6737426063024382240549898j,
-.7250839687106612822281339 + .6737426063024382240549898j,
-.6506315378609463397807996 - .7920349342629491368548074j,
-.6506315378609463397807996 + .7920349342629491368548074j,
-.5564766488918562465935297 - .9148198405846724121600860j,
-.5564766488918562465935297 + .9148198405846724121600860j,
-.4345168906815271799687308 - 1.045382255856986531461592j,
-.4345168906815271799687308 + 1.045382255856986531461592j,
-.2640041595834031147954813 - 1.192762031948052470183960j,
-.2640041595834031147954813 + 1.192762031948052470183960j]
elif N == 22:
p = [-.9058702269930872551848625 - 52774908289999045189007100.0e-27j,
-.9058702269930872551848625 + 52774908289999045189007100.0e-27j,
-.8972983138153530955952835 - .1584351912289865608659759j,
-.8972983138153530955952835 + .1584351912289865608659759j,
-.8799661455640176154025352 - .2644363039201535049656450j,
-.8799661455640176154025352 + .2644363039201535049656450j,
-.8534754036851687233084587 - .3710389319482319823405321j,
-.8534754036851687233084587 + .3710389319482319823405321j,
-.8171682088462720394344996 - .4785619492202780899653575j,
-.8171682088462720394344996 + .4785619492202780899653575j,
-.7700332930556816872932937 - .5874255426351153211965601j,
-.7700332930556816872932937 + .5874255426351153211965601j,
-.7105305456418785989070935 - .6982266265924524000098548j,
-.7105305456418785989070935 + .6982266265924524000098548j,
-.6362427683267827226840153 - .8118875040246347267248508j,
-.6362427683267827226840153 + .8118875040246347267248508j,
-.5430983056306302779658129 - .9299947824439872998916657j,
-.5430983056306302779658129 + .9299947824439872998916657j,
-.4232528745642628461715044 - 1.055755605227545931204656j,
-.4232528745642628461715044 + 1.055755605227545931204656j,
-.2566376987939318038016012 - 1.197982433555213008346532j,
-.2566376987939318038016012 + 1.197982433555213008346532j]
elif N == 23:
p = [-.9066732476324988168207439,
-.9027564979912504609412993 - .1010534335314045013252480j,
-.9027564979912504609412993 + .1010534335314045013252480j,
-.8909283242471251458653994 - .2023024699381223418195228j,
-.8909283242471251458653994 + .2023024699381223418195228j,
-.8709469395587416239596874 - .3039581993950041588888925j,
-.8709469395587416239596874 + .3039581993950041588888925j,
-.8423805948021127057054288 - .4062657948237602726779246j,
-.8423805948021127057054288 + .4062657948237602726779246j,
-.8045561642053176205623187 - .5095305912227258268309528j,
-.8045561642053176205623187 + .5095305912227258268309528j,
-.7564660146829880581478138 - .6141594859476032127216463j,
-.7564660146829880581478138 + .6141594859476032127216463j,
-.6965966033912705387505040 - .7207341374753046970247055j,
-.6965966033912705387505040 + .7207341374753046970247055j,
-.6225903228771341778273152 - .8301558302812980678845563j,
-.6225903228771341778273152 + .8301558302812980678845563j,
-.5304922463810191698502226 - .9439760364018300083750242j,
-.5304922463810191698502226 + .9439760364018300083750242j,
-.4126986617510148836149955 - 1.065328794475513585531053j,
-.4126986617510148836149955 + 1.065328794475513585531053j,
-.2497697202208956030229911 - 1.202813187870697831365338j,
-.2497697202208956030229911 + 1.202813187870697831365338j]
elif N == 24:
p = [-.9055312363372773709269407 - 48440066540478700874836350.0e-27j,
-.9055312363372773709269407 + 48440066540478700874836350.0e-27j,
-.8983105104397872954053307 - .1454056133873610120105857j,
-.8983105104397872954053307 + .1454056133873610120105857j,
-.8837358034555706623131950 - .2426335234401383076544239j,
-.8837358034555706623131950 + .2426335234401383076544239j,
-.8615278304016353651120610 - .3403202112618624773397257j,
-.8615278304016353651120610 + .3403202112618624773397257j,
-.8312326466813240652679563 - .4386985933597305434577492j,
-.8312326466813240652679563 + .4386985933597305434577492j,
-.7921695462343492518845446 - .5380628490968016700338001j,
-.7921695462343492518845446 + .5380628490968016700338001j,
-.7433392285088529449175873 - .6388084216222567930378296j,
-.7433392285088529449175873 + .6388084216222567930378296j,
-.6832565803536521302816011 - .7415032695091650806797753j,
-.6832565803536521302816011 + .7415032695091650806797753j,
-.6096221567378335562589532 - .8470292433077202380020454j,
-.6096221567378335562589532 + .8470292433077202380020454j,
-.5185914574820317343536707 - .9569048385259054576937721j,
-.5185914574820317343536707 + .9569048385259054576937721j,
-.4027853855197518014786978 - 1.074195196518674765143729j,
-.4027853855197518014786978 + 1.074195196518674765143729j,
-.2433481337524869675825448 - 1.207298683731972524975429j,
-.2433481337524869675825448 + 1.207298683731972524975429j]
elif N == 25:
p = [-.9062073871811708652496104,
-.9028833390228020537142561 - 93077131185102967450643820.0e-27j,
-.9028833390228020537142561 + 93077131185102967450643820.0e-27j,
-.8928551459883548836774529 - .1863068969804300712287138j,
-.8928551459883548836774529 + .1863068969804300712287138j,
-.8759497989677857803656239 - .2798521321771408719327250j,
-.8759497989677857803656239 + .2798521321771408719327250j,
-.8518616886554019782346493 - .3738977875907595009446142j,
-.8518616886554019782346493 + .3738977875907595009446142j,
-.8201226043936880253962552 - .4686668574656966589020580j,
-.8201226043936880253962552 + .4686668574656966589020580j,
-.7800496278186497225905443 - .5644441210349710332887354j,
-.7800496278186497225905443 + .5644441210349710332887354j,
-.7306549271849967721596735 - .6616149647357748681460822j,
-.7306549271849967721596735 + .6616149647357748681460822j,
-.6704827128029559528610523 - .7607348858167839877987008j,
-.6704827128029559528610523 + .7607348858167839877987008j,
-.5972898661335557242320528 - .8626676330388028512598538j,
-.5972898661335557242320528 + .8626676330388028512598538j,
-.5073362861078468845461362 - .9689006305344868494672405j,
-.5073362861078468845461362 + .9689006305344868494672405j,
-.3934529878191079606023847 - 1.082433927173831581956863j,
-.3934529878191079606023847 + 1.082433927173831581956863j,
-.2373280669322028974199184 - 1.211476658382565356579418j,
-.2373280669322028974199184 + 1.211476658382565356579418j]
else:
raise ValueError("Bessel Filter not supported for order %s" % N)
return asarray(z), asarray(p), k
filter_dict = {'butter': [buttap, buttord],
'butterworth': [buttap, buttord],
'cauer': [ellipap, ellipord],
'elliptic': [ellipap, ellipord],
'ellip': [ellipap, ellipord],
'bessel': [besselap],
'cheby1': [cheb1ap, cheb1ord],
'chebyshev1': [cheb1ap, cheb1ord],
'chebyshevi': [cheb1ap, cheb1ord],
'cheby2': [cheb2ap, cheb2ord],
'chebyshev2': [cheb2ap, cheb2ord],
'chebyshevii': [cheb2ap, cheb2ord],
}
band_dict = {'band': 'bandpass',
'bandpass': 'bandpass',
'pass': 'bandpass',
'bp': 'bandpass',
'bs': 'bandstop',
'bandstop': 'bandstop',
'bands': 'bandstop',
'stop': 'bandstop',
'l': 'lowpass',
'low': 'lowpass',
'lowpass': 'lowpass',
'lp': 'lowpass',
'high': 'highpass',
'highpass': 'highpass',
'h': 'highpass',
'hp': 'highpass',
}
| bsd-3-clause |
Philippe12/external_chromium_org | chrome/test/nacl_test_injection/buildbot_chrome_nacl_stage.py | 26 | 11131 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Do all the steps required to build and test against nacl."""
import optparse
import os.path
import re
import shutil
import subprocess
import sys
import find_chrome
# Copied from buildbot/buildbot_lib.py
def TryToCleanContents(path, file_name_filter=lambda fn: True):
"""
Remove the contents of a directory without touching the directory itself.
Ignores all failures.
"""
if os.path.exists(path):
for fn in os.listdir(path):
TryToCleanPath(os.path.join(path, fn), file_name_filter)
# Copied from buildbot/buildbot_lib.py
def TryToCleanPath(path, file_name_filter=lambda fn: True):
"""
Removes a file or directory.
Ignores all failures.
"""
if os.path.exists(path):
if file_name_filter(path):
print 'Trying to remove %s' % path
if os.path.isdir(path):
shutil.rmtree(path, ignore_errors=True)
else:
try:
os.remove(path)
except Exception:
pass
else:
print 'Skipping %s' % path
# TODO(ncbray): this is somewhat unsafe. We should fix the underlying problem.
def CleanTempDir():
# Only delete files and directories like:
# a) C:\temp\83C4.tmp
# b) /tmp/.org.chromium.Chromium.EQrEzl
file_name_re = re.compile(
r'[\\/]([0-9a-fA-F]+\.tmp|\.org\.chrom\w+\.Chrom\w+\..+)$')
file_name_filter = lambda fn: file_name_re.search(fn) is not None
path = os.environ.get('TMP', os.environ.get('TEMP', '/tmp'))
if len(path) >= 4 and os.path.isdir(path):
print
print "Cleaning out the temp directory."
print
TryToCleanContents(path, file_name_filter)
else:
print
print "Cannot find temp directory, not cleaning it."
print
def RunCommand(cmd, cwd, env):
sys.stdout.write('\nRunning %s\n\n' % ' '.join(cmd))
sys.stdout.flush()
retcode = subprocess.call(cmd, cwd=cwd, env=env)
if retcode != 0:
sys.stdout.write('\nFailed: %s\n\n' % ' '.join(cmd))
sys.exit(retcode)
def RunTests(name, cmd, nacl_dir, env):
sys.stdout.write('\n\nBuilding files needed for %s testing...\n\n' % name)
RunCommand(cmd + ['do_not_run_tests=1', '-j8'], nacl_dir, env)
sys.stdout.write('\n\nRunning %s tests...\n\n' % name)
RunCommand(cmd, nacl_dir, env)
def BuildAndTest(options):
# Refuse to run under cygwin.
if sys.platform == 'cygwin':
raise Exception('I do not work under cygwin, sorry.')
# By default, use the version of Python is being used to run this script.
python = sys.executable
if sys.platform == 'darwin':
# Mac 10.5 bots tend to use a particularlly old version of Python, look for
# a newer version.
macpython27 = '/Library/Frameworks/Python.framework/Versions/2.7/bin/python'
if os.path.exists(macpython27):
python = macpython27
script_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(os.path.dirname(os.path.dirname(script_dir)))
nacl_dir = os.path.join(src_dir, 'native_client')
# Decide platform specifics.
if options.browser_path:
chrome_filename = options.browser_path
else:
chrome_filename = find_chrome.FindChrome(src_dir, [options.mode])
if chrome_filename is None:
raise Exception('Cannot find a chome binary - specify one with '
'--browser_path?')
env = dict(os.environ)
if sys.platform in ['win32', 'cygwin']:
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
bits = 64
else:
bits = 32
msvs_path = ';'.join([
r'c:\Program Files\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\VC',
r'c:\Program Files\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 9.0\Common7\Tools',
r'c:\Program Files\Microsoft Visual Studio 8\VC',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\VC',
r'c:\Program Files\Microsoft Visual Studio 8\Common7\Tools',
r'c:\Program Files (x86)\Microsoft Visual Studio 8\Common7\Tools',
])
env['PATH'] += ';' + msvs_path
scons = [python, 'scons.py']
elif sys.platform == 'darwin':
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
else:
p = subprocess.Popen(['file', chrome_filename], stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if p_stdout.find('executable x86_64') >= 0:
bits = 64
else:
bits = 32
scons = [python, 'scons.py']
else:
p = subprocess.Popen(
'uname -m | '
'sed -e "s/i.86/ia32/;s/x86_64/x64/;s/amd64/x64/;s/arm.*/arm/"',
shell=True, stdout=subprocess.PIPE)
(p_stdout, _) = p.communicate()
assert p.returncode == 0
if options.bits == 64:
bits = 64
elif options.bits == 32:
bits = 32
elif p_stdout.find('64') >= 0:
bits = 64
else:
bits = 32
# xvfb-run has a 2-second overhead per invocation, so it is cheaper to wrap
# the entire build step rather than each test (browser_headless=1).
# We also need to make sure that there are at least 24 bits per pixel.
# https://code.google.com/p/chromium/issues/detail?id=316687
scons = [
'xvfb-run',
'--auto-servernum',
'--server-args', '-screen 0 1024x768x24',
python, 'scons.py',
]
if options.jobs > 1:
scons.append('-j%d' % options.jobs)
scons.append('disable_tests=%s' % options.disable_tests)
if options.buildbot is not None:
scons.append('buildbot=%s' % (options.buildbot,))
# Clean the output of the previous build.
# Incremental builds can get wedged in weird ways, so we're trading speed
# for reliability.
shutil.rmtree(os.path.join(nacl_dir, 'scons-out'), True)
# check that the HOST (not target) is 64bit
# this is emulating what msvs_env.bat is doing
if '64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or \
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', ''):
# 64bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 9.0\\Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files (x86)\\'
'Microsoft Visual Studio 8.0\\Common7\\Tools\\')
else:
# 32bit HOST
env['VS90COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 9.0\\'
'Common7\\Tools\\')
env['VS80COMNTOOLS'] = ('c:\\Program Files\\Microsoft Visual Studio 8.0\\'
'Common7\\Tools\\')
# Run nacl/chrome integration tests.
# Note that we have to add nacl_irt_test to --mode in order to get
# inbrowser_test_runner to run.
# TODO(mseaborn): Change it so that inbrowser_test_runner is not a
# special case.
cmd = scons + ['--verbose', '-k', 'platform=x86-%d' % bits,
'--mode=opt-host,nacl,nacl_irt_test',
'chrome_browser_path=%s' % chrome_filename,
]
if not options.integration_bot and not options.morenacl_bot:
cmd.append('disable_flaky_tests=1')
cmd.append('chrome_browser_tests')
# Propagate path to JSON output if present.
# Note that RunCommand calls sys.exit on errors, so potential errors
# from one command won't be overwritten by another one. Overwriting
# a successful results file with either success or failure is fine.
if options.json_build_results_output_file:
cmd.append('json_build_results_output_file=%s' %
options.json_build_results_output_file)
# Download the toolchain(s).
RunCommand([python,
os.path.join(nacl_dir, 'build', 'download_toolchains.py'),
'--no-arm-trusted', '--no-pnacl', 'TOOL_REVISIONS'],
nacl_dir, os.environ)
CleanTempDir()
if options.enable_newlib:
RunTests('nacl-newlib', cmd, nacl_dir, env)
if options.enable_glibc:
RunTests('nacl-glibc', cmd + ['--nacl_glibc'], nacl_dir, env)
def MakeCommandLineParser():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode', dest='mode', default='Debug',
help='Debug/Release mode')
parser.add_option('-j', dest='jobs', default=1, type='int',
help='Number of parallel jobs')
parser.add_option('--enable_newlib', dest='enable_newlib', default=-1,
type='int', help='Run newlib tests?')
parser.add_option('--enable_glibc', dest='enable_glibc', default=-1,
type='int', help='Run glibc tests?')
parser.add_option('--json_build_results_output_file',
help='Path to a JSON file for machine-readable output.')
# Deprecated, but passed to us by a script in the Chrome repo.
# Replaced by --enable_glibc=0
parser.add_option('--disable_glibc', dest='disable_glibc',
action='store_true', default=False,
help='Do not test using glibc.')
parser.add_option('--disable_tests', dest='disable_tests',
type='string', default='',
help='Comma-separated list of tests to omit')
builder_name = os.environ.get('BUILDBOT_BUILDERNAME', '')
is_integration_bot = 'nacl-chrome' in builder_name
parser.add_option('--integration_bot', dest='integration_bot',
type='int', default=int(is_integration_bot),
help='Is this an integration bot?')
is_morenacl_bot = (
'More NaCl' in builder_name or
'naclmore' in builder_name)
parser.add_option('--morenacl_bot', dest='morenacl_bot',
type='int', default=int(is_morenacl_bot),
help='Is this a morenacl bot?')
# Not used on the bots, but handy for running the script manually.
parser.add_option('--bits', dest='bits', action='store',
type='int', default=None,
help='32/64')
parser.add_option('--browser_path', dest='browser_path', action='store',
type='string', default=None,
help='Path to the chrome browser.')
parser.add_option('--buildbot', dest='buildbot', action='store',
type='string', default=None,
help='Value passed to scons as buildbot= option.')
return parser
def Main():
parser = MakeCommandLineParser()
options, args = parser.parse_args()
if options.integration_bot and options.morenacl_bot:
parser.error('ERROR: cannot be both an integration bot and a morenacl bot')
# Set defaults for enabling newlib.
if options.enable_newlib == -1:
options.enable_newlib = 1
# Set defaults for enabling glibc.
if options.enable_glibc == -1:
if options.integration_bot or options.morenacl_bot:
options.enable_glibc = 1
else:
options.enable_glibc = 0
if args:
parser.error('ERROR: invalid argument')
BuildAndTest(options)
if __name__ == '__main__':
Main()
| bsd-3-clause |
xodus7/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/io_test.py | 137 | 5063 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.learn IO operation tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn.estimators._sklearn import accuracy_score
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.python.platform import test
# pylint: enable=wildcard-import
class IOTest(test.TestCase):
# pylint: disable=undefined-variable
"""tf.learn IO operation tests."""
def test_pandas_dataframe(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.DataFrame(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels[0], list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
else:
print("No pandas installed. pandas-related tests are skipped.")
def test_pandas_series(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
labels = pd.Series(iris.target)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
score = accuracy_score(labels, list(classifier.predict_classes(data)))
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
def test_string_data_formats(self):
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
with self.assertRaises(ValueError):
learn.io.extract_pandas_data(pd.DataFrame({"Test": ["A", "B"]}))
with self.assertRaises(ValueError):
learn.io.extract_pandas_labels(pd.DataFrame({"Test": ["A", "B"]}))
def test_dask_io(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# test dask.dataframe
df = pd.DataFrame(
dict(
a=list("aabbcc"), b=list(range(6))),
index=pd.date_range(
start="20100101", periods=6))
ddf = dd.from_pandas(df, npartitions=3)
extracted_ddf = extract_dask_data(ddf)
self.assertEqual(
extracted_ddf.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_ddf.divisions))
self.assertEqual(
extracted_ddf.columns.tolist(), ["a", "b"],
"Failed with columns = {0}".format(extracted_ddf.columns))
# test dask.series
labels = ddf["a"]
extracted_labels = extract_dask_labels(labels)
self.assertEqual(
extracted_labels.divisions, (0, 2, 4, 6),
"Failed with divisions = {0}".format(extracted_labels.divisions))
# labels should only have one column
with self.assertRaises(ValueError):
extract_dask_labels(ddf)
else:
print("No dask installed. dask-related tests are skipped.")
def test_dask_iris_classification(self):
if HAS_DASK and HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
random.seed(42)
iris = datasets.load_iris()
data = pd.DataFrame(iris.data)
data = dd.from_pandas(data, npartitions=2)
labels = pd.DataFrame(iris.target)
labels = dd.from_pandas(labels, npartitions=2)
classifier = learn.LinearClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(data),
n_classes=3)
classifier.fit(data, labels, steps=100)
predictions = data.map_partitions(classifier.predict).compute()
score = accuracy_score(labels.compute(), predictions)
self.assertGreater(score, 0.5, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
| apache-2.0 |
rajat1994/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <[email protected]>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
duncan-brown/pycbc | examples/distributions/spin_examples.py | 14 | 1894 | import matplotlib.pyplot as plt
import numpy
import pycbc.coordinates as co
from pycbc import distributions
# We can choose any bounds between 0 and pi for this distribution but in
# units of pi so we use between 0 and 1
theta_low = 0.
theta_high = 1.
# Units of pi for the bounds of the azimuthal angle which goes from 0 to 2 pi
phi_low = 0.
phi_high = 2.
# Create a distribution object from distributions.py. Here we are using the
# Uniform Solid Angle function which takes
# theta = polar_bounds(theta_lower_bound to a theta_upper_bound), and then
# phi = azimuthal_ bound(phi_lower_bound to a phi_upper_bound).
uniform_solid_angle_distribution = distributions.UniformSolidAngle(
polar_bounds=(theta_low,theta_high),
azimuthal_bounds=(phi_low,phi_high))
# Now we can take a random variable sample from that distribution. In this
# case we want 50000 samples.
solid_angle_samples = uniform_solid_angle_distribution.rvs(size=500000)
# Make spins with unit length for coordinate transformation below.
spin_mag = numpy.ndarray(shape=(500000), dtype=float)
for i in range(0,500000):
spin_mag[i] = 1.
# Use the pycbc.coordinates as co spherical_to_cartesian function to convert
# from spherical polar coordinates to cartesian coordinates
spinx, spiny, spinz = co.spherical_to_cartesian(spin_mag,
solid_angle_samples['phi'],
solid_angle_samples['theta'])
# Choose 50 bins for the histograms.
n_bins = 50
plt.figure(figsize=(10,10))
plt.subplot(2, 2, 1)
plt.hist(spinx, bins = n_bins)
plt.title('Spin x samples')
plt.subplot(2, 2, 2)
plt.hist(spiny, bins = n_bins)
plt.title('Spin y samples')
plt.subplot(2, 2, 3)
plt.hist(spinz, bins = n_bins)
plt.title('Spin z samples')
plt.tight_layout()
plt.show()
| gpl-3.0 |
Eric89GXL/mne-python | mne/time_frequency/tests/test_tfr.py | 2 | 32209 | from itertools import product
import datetime
import os.path as op
import numpy as np
from numpy.testing import (assert_array_equal, assert_equal, assert_allclose)
import pytest
import matplotlib.pyplot as plt
import mne
from mne import (Epochs, read_events, pick_types, create_info, EpochsArray,
Info, Transform)
from mne.io import read_raw_fif
from mne.utils import (_TempDir, run_tests_if_main, requires_h5py,
requires_pandas, grand_average, catch_logging)
from mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss,
tfr_multitaper, AverageTFR, read_tfrs,
write_tfrs, combine_tfr, cwt, _compute_tfr,
EpochsTFR)
from mne.time_frequency import tfr_array_multitaper, tfr_array_morlet
from mne.viz.utils import _fake_click
from mne.tests.test_epochs import assert_metadata_equal
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
event_fname = op.join(data_path, 'test-eve.fif')
raw_ctf_fname = op.join(data_path, 'test_ctf_raw.fif')
def test_tfr_ctf():
"""Test that TFRs can be calculated on CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = mne.make_fixed_length_events(raw, duration=0.5)
epochs = mne.Epochs(raw, events)
for method in (tfr_multitaper, tfr_morlet):
method(epochs, [10], 1) # smoke test
def test_morlet():
"""Test morlet with and without zero mean."""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert (np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert (np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test time-frequency transform (PSD and ITC)."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True, decim=slice(0, 2))
# Test picks argument and average parameter
pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=True, average=False)
power_picks, itc_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks, average=True)
epochs_power_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=False, picks=picks, average=False)
power_picks_avg = epochs_power_picks.average()
# the actual data arrays here are equivalent, too...
assert_allclose(power.data, power_picks.data)
assert_allclose(power.data, power_picks_avg.data)
assert_allclose(itc.data, itc_picks.data)
# test on evoked
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_allclose,
power.data, power_evoked.data)
# complex output
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
return_itc=False, average=True, output="complex")
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
output="complex", average=False, return_itc=True)
epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles,
output="complex", average=False,
return_itc=False)
epochs_amplitude_2 = abs(epochs_power_complex)
epochs_amplitude_3 = epochs_amplitude_2.copy()
epochs_amplitude_3.data[:] = np.inf # test that it's actually copied
# test that the power computed via `complex` is equivalent to power
# computed within the method.
assert_allclose(epochs_amplitude_2.data**2, epochs_power_picks.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test sub
ret = itc * 23 # test mult
itc = ret / 23 # test dic
power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert 'meg' in power
assert 'grad' in power
assert 'mag' not in power
assert 'eeg' not in power
assert power.nave == nave
assert itc.nave == nave
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (power_.data.shape == (len(picks), len(freqs), 2))
assert (power_.data.shape == itc_.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert gave.data.shape == (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2])
assert itc2.ch_names[1:] == gave.ch_names
assert gave.nave == 2
itc2.drop_channels(itc2.info["bads"])
assert_allclose(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_allclose(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
return_itc=False)
tfr_data = tfr.data[0]
assert (tfr_data.shape == (len(picks), len(freqs), len(times)))
tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
decim=slice(0, 2), average=False,
return_itc=False).data[0]
assert (tfr2.shape == (len(picks), len(freqs), 2))
single_power = tfr_morlet(epochs, freqs, 2, average=False,
return_itc=False).data
single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
average=False, return_itc=False).data
single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
average=False, return_itc=False).data
single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
average=False, return_itc=False).data
assert_allclose(np.mean(single_power, axis=0), power.data)
assert_allclose(np.mean(single_power2, axis=0), power.data[:, :, :2])
assert_allclose(np.mean(single_power3, axis=0), power.data[:, :, 1:3])
assert_allclose(np.mean(single_power4, axis=0), power.data[:, :, 2:4])
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
power_pick, power_drop = mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
# Test decimation:
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in [2, 3, 8, 9]:
for use_fft in [True, False]:
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
use_fft=use_fft, return_itc=True,
decim=decim)
assert_equal(power.data.shape[2],
np.ceil(float(len(times)) / decim))
freqs = list(range(50, 55))
decim = 2
_, n_chan, n_time = data.shape
tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
return_itc=False).data[0]
assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))
# Test cwt modes
Ws = morlet(512, [10, 20], n_cycles=2)
pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
for use_fft in [True, False]:
for mode in ['same', 'valid', 'full']:
cwt(data[0], Ws, use_fft=use_fft, mode=mode)
# Test invalid frequency arguments
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_morlet(epochs, freqs=np.arange(0, 3), n_cycles=7)
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_morlet(epochs, freqs=np.arange(-4, -1), n_cycles=7)
# Test decim parameter checks
pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, use_fft=True, return_itc=True,
decim='decim')
# When convolving in time, wavelets must not be longer than the data
pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws,
use_fft=False)
with pytest.warns(UserWarning, match='one of the wavelets.*is longer'):
cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True)
# Check for off-by-one errors when using wavelets with an even number of
# samples
psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full')
assert_equal(psd.shape, (2, 1, 420))
def test_dpsswavelet():
"""Test DPSS tapers."""
freqs = np.arange(5, 25, 3)
Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,
zero_mean=True)
assert (len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert (np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert (len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@pytest.mark.slowtest
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float64) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float64)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
# Test channel picking
power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])
assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))
assert_equal(power_epochs_picked.ch_names, ['SIM0001'])
pytest.raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_allclose(power.data, power_picks.data)
assert_allclose(power.data, power_averaged.data)
assert_allclose(power.times, power_epochs.times)
assert_allclose(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_allclose(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_allclose,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert (tmax > 0.3 and tmax < 0.7)
assert not np.any(itc.data < 0.)
assert (fmax > 40 and fmax < 60)
assert (power2.data.shape == (len(picks), len(freqs), 2))
assert (power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
# Test invalid frequency arguments
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_multitaper(epochs, freqs=np.arange(0, 3), n_cycles=7)
with pytest.raises(ValueError, match=" 'freqs' must be greater than 0"):
tfr_multitaper(epochs, freqs=np.arange(-4, -1), n_cycles=7)
def test_crop():
"""Test TFR cropping."""
data = np.zeros((3, 4, 5))
times = np.array([.1, .2, .3, .4, .5])
freqs = np.array([.10, .20, .30, .40])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(tmin=0.2)
assert_array_equal(tfr.times, [0.2, 0.3, 0.4, 0.5])
assert tfr.data.ndim == 3
assert tfr.data.shape[-1] == 4
tfr.crop(fmax=0.3)
assert_array_equal(tfr.freqs, [0.1, 0.2, 0.3])
assert tfr.data.ndim == 3
assert tfr.data.shape[-2] == 3
tfr.crop(tmin=0.3, tmax=0.4, fmin=0.1, fmax=0.2)
assert_array_equal(tfr.times, [0.3, 0.4])
assert tfr.data.ndim == 3
assert tfr.data.shape[-1] == 2
assert_array_equal(tfr.freqs, [0.1, 0.2])
assert tfr.data.shape[-2] == 2
@requires_h5py
@requires_pandas
def test_io():
"""Test TFR IO capacities."""
from pandas import DataFrame
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
info['meas_date'] = datetime.datetime(year=2020, month=2, day=5,
tzinfo=datetime.timezone.utc)
info._check_consistency()
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert isinstance(tfr2.info, Info)
assert isinstance(tfr2.info['dev_head_t'], Transform)
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
pytest.raises(IOError, tfr.save, fname)
tfr.comment = None
# test old meas_date
info['meas_date'] = (1, 2)
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert (isinstance(tfr.info, mne.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
pytest.raises(ValueError, read_tfrs, fname, condition='nonono')
# Test save of EpochsTFR.
n_events = 5
data = np.zeros((n_events, 3, 2, 3))
# create fake metadata
rng = np.random.RandomState(42)
rt = np.round(rng.uniform(size=(n_events,)), 3)
trialtypes = np.array(['face', 'place'])
trial = trialtypes[(rng.uniform(size=(n_events,)) > .5).astype(int)]
meta = DataFrame(dict(RT=rt, Trial=trial))
# fake events and event_id
events = np.zeros([n_events, 3])
events[:, 0] = np.arange(n_events)
events[:, 2] = np.ones(n_events)
event_id = {'a/b': 1}
tfr = EpochsTFR(info, data=data, times=times, freqs=freqs,
comment='test', method='crazy-tfr', events=events,
event_id=event_id, metadata=meta)
tfr.save(fname, True)
read_tfr = read_tfrs(fname)[0]
assert_array_equal(tfr.data, read_tfr.data)
assert_metadata_equal(tfr.metadata, read_tfr.metadata)
assert_array_equal(tfr.events, read_tfr.events)
assert tfr.event_id == read_tfr.event_id
def test_plot():
"""Test TFR plotting."""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.plot([1, 2], title='title', colorbar=False,
mask=np.ones(tfr.data.shape[1:], bool))
plt.close('all')
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (1, 1))
ax3 = plt.subplot2grid((2, 2), (0, 1))
tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
plt.close('all')
tfr.plot([1, 2], title='title', colorbar=False, exclude='bads')
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('+')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('-')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('pageup')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('pagedown')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
def test_plot_joint():
"""Test TFR joint plotting."""
raw = read_raw_fif(raw_fname)
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
topomap_args = {'res': 8, 'contours': 0, 'sensors': False}
for combine in ('mean', 'rms', None):
with catch_logging() as log:
tfr.plot_joint(title='auto', colorbar=True,
combine=combine, topomap_args=topomap_args,
verbose='debug')
plt.close('all')
log = log.getvalue()
assert 'Plotting topomap for grad data' in log
# check various timefreqs
for timefreqs in (
{(tfr.times[0], tfr.freqs[1]): (0.1, 0.5),
(tfr.times[-1], tfr.freqs[-1]): (0.2, 0.6)},
[(tfr.times[1], tfr.freqs[1])]):
tfr.plot_joint(timefreqs=timefreqs, topomap_args=topomap_args)
plt.close('all')
# test bad timefreqs
timefreqs = ([(-100, 1)], tfr.times[1], [1],
[(tfr.times[1], tfr.freqs[1], tfr.freqs[1])])
for these_timefreqs in timefreqs:
pytest.raises(ValueError, tfr.plot_joint, these_timefreqs)
# test that the object is not internally modified
tfr_orig = tfr.copy()
tfr.plot_joint(baseline=(0, None), exclude=[tfr.ch_names[0]],
topomap_args=topomap_args)
plt.close('all')
assert_array_equal(tfr.data, tfr_orig.data)
assert set(tfr.ch_names) == set(tfr_orig.ch_names)
assert set(tfr.times) == set(tfr_orig.times)
# test tfr with picked channels
tfr.pick_channels(tfr.ch_names[:-1])
tfr.plot_joint(title='auto', colorbar=True, topomap_args=topomap_args)
def test_add_channels():
"""Test tfr splitting / re-appending channel types."""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
tfr_meg = tfr.copy().pick_types(meg=True)
tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
assert all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg])
have_all = all(ch in tfr_new.ch_names
for ch in tfr.ch_names if ch != 'STIM 001')
assert have_all
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names)
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg])
pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf)
def test_compute_tfr():
"""Test _compute_tfr function."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=[], exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
sfreq = epochs.info['sfreq']
freqs = np.arange(10, 20, 3).astype(float)
# Check all combination of options
for func, use_fft, zero_mean, output in product(
(tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),
('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')):
# Check exception
if (func == tfr_array_multitaper) and (output == 'phase'):
pytest.raises(NotImplementedError, func, data, sfreq=sfreq,
freqs=freqs, output=output)
continue
# Check runs
out = func(data, sfreq=sfreq, freqs=freqs, use_fft=use_fft,
zero_mean=zero_mean, n_cycles=2., output=output)
# Check shapes
shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
if ('avg' in output) or ('itc' in output):
assert_array_equal(shape[1:], out.shape)
else:
assert_array_equal(shape, out.shape)
# Check types
if output in ('complex', 'avg_power_itc'):
assert_equal(np.complex128, out.dtype)
else:
assert_equal(np.float64, out.dtype)
assert (np.all(np.isfinite(out)))
# Check errors params
for _data in (None, 'foo', data[0]):
pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq)
for _freqs in (None, 'foo', [[0]]):
pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq)
for _sfreq in (None, 'foo'):
pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq)
for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
for value in (None, 'foo'):
kwargs = {key: value} # FIXME pep8
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
**kwargs)
with pytest.raises(ValueError, match='above Nyquist'):
_compute_tfr(data, [sfreq], sfreq)
# No time_bandwidth param in morlet
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
method='morlet', time_bandwidth=1)
# No phase in multitaper XXX Check ?
pytest.raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
method='multitaper', output='phase')
# Inter-trial coherence tests
out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
assert np.sum(out >= 1) == 0
assert np.sum(out <= 0) == 0
# Check decim shapes
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
_decim = slice(None, None, decim) if isinstance(decim, int) else decim
n_time = len(np.arange(data.shape[2])[_decim])
shape = np.r_[data.shape[:2], len(freqs), n_time]
for method in ('multitaper', 'morlet'):
# Single trials
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2.)
assert_array_equal(shape, out.shape)
# Averages
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
output='avg_power', n_cycles=2.)
assert_array_equal(shape[1:], out.shape)
@pytest.mark.parametrize('method', ('multitaper', 'morlet'))
@pytest.mark.parametrize('decim', (1, slice(1, None, 2), 3))
def test_compute_tfr_correct(method, decim):
"""Test that TFR actually gets us our freq back."""
sfreq = 1000.
t = np.arange(1000) / sfreq
f = 50.
data = np.sin(2 * np.pi * 50. * t)
data *= np.hanning(data.size)
data = data[np.newaxis, np.newaxis]
freqs = np.arange(10, 111, 10)
assert f in freqs
tfr = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2)[0, 0]
assert freqs[np.argmax(np.abs(tfr).mean(-1))] == f
@requires_pandas
def test_getitem_epochsTFR():
"""Test GetEpochsMixin in the context of EpochsTFR."""
from pandas import DataFrame
# Setup for reading the raw data and select a few trials
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
n_events = 10
# create fake metadata
rng = np.random.RandomState(42)
rt = rng.uniform(size=(n_events,))
trialtypes = np.array(['face', 'place'])
trial = trialtypes[(rng.uniform(size=(n_events,)) > .5).astype(int)]
meta = DataFrame(dict(RT=rt, Trial=trial))
event_id = dict(a=1, b=2, c=3, d=4)
epochs = Epochs(raw, events[:n_events], event_id=event_id, metadata=meta,
decim=1)
freqs = np.arange(12., 17., 2.) # define frequencies of interest
n_cycles = freqs / 2. # 0.5 second time windows for all frequencies
# Choose time x (full) bandwidth product
time_bandwidth = 4.0 # With 0.5 s time windows, this gives 8 Hz smoothing
kwargs = dict(freqs=freqs, n_cycles=n_cycles, use_fft=True,
time_bandwidth=time_bandwidth, return_itc=False,
average=False, n_jobs=1)
power = tfr_multitaper(epochs, **kwargs)
# Check decim affects sfreq
power_decim = tfr_multitaper(epochs, decim=2, **kwargs)
assert power.info['sfreq'] / 2. == power_decim.info['sfreq']
# Check that power and epochs metadata is the same
assert_metadata_equal(epochs.metadata, power.metadata)
assert_metadata_equal(epochs[::2].metadata, power[::2].metadata)
assert_metadata_equal(epochs['RT < .5'].metadata,
power['RT < .5'].metadata)
# Check that get power is functioning
assert_array_equal(power[3:6].data, power.data[3:6])
assert_array_equal(power[3:6].events, power.events[3:6])
indx_check = (power.metadata['Trial'] == 'face')
try:
indx_check = indx_check.to_numpy()
except Exception:
pass # older Pandas
indx_check = indx_check.nonzero()
assert_array_equal(power['Trial == "face"'].events,
power.events[indx_check])
assert_array_equal(power['Trial == "face"'].data,
power.data[indx_check])
# Check that the wrong Key generates a Key Error for Metadata search
with pytest.raises(KeyError):
power['Trialz == "place"']
# Test length function
assert len(power) == n_events
assert len(power[3:6]) == 3
# Test iteration function
for ind, power_ep in enumerate(power):
assert_array_equal(power_ep, power.data[ind])
if ind == 5:
break
# Test that current state is maintained
assert_array_equal(power.next(), power.data[ind + 1])
run_tests_if_main()
| bsd-3-clause |
iismd17/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pchmieli/h2o-3 | py2/testdir_single_jvm/test_GLM_hastie_shuffle.py | 20 | 5925 | import unittest, time, sys, random, copy
sys.path.extend(['.','..','../..','py'])
import h2o2 as h2o
import h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_glm, h2o_util
from h2o_test import verboseprint, dump_json, OutputObj
# Dataset created from this:
# Elements of Statistical Learning 2nd Ed.; Hastie, Tibshirani, Friedman; Feb 2011
# example 10.2 page 357
# Ten features, standard independent Gaussian. Target y is:
# y[i] = 1 if sum(X[i]) > .34 else -1
# 9.34 is the median of a chi-squared random variable with 10 degrees of freedom
# (sum of squares of 10 standard Gaussians)
# http://www.stanford.edu/~hastie/local.ftp/Springer/ESLII_print5.pdf
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
# we can force a col type to enum now? with param columnTypes
# "Numeric"
# make the last column enum
# Instead of string for parse, make this a dictionary, with column index, value
# that's used for updating the ColumnTypes array before making it a string for parse
columnTypeDict = {10: 'Enum'}
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, columnTypeDict=columnTypeDict,
hex_key=csvFilename + ".hex", schema='put', timeoutSecs=30)
pA = h2o_cmd.ParseObj(parseResult)
iA = h2o_cmd.InspectObj(pA.parse_key)
parse_key = pA.parse_key
numRows = iA.numRows
numCols = iA.numCols
labelList = iA.labelList
for i in range(10):
print "Summary on column", i
# FIX! how come only 0 works here for column
co = h2o_cmd.runSummary(key=parse_key, column=i)
for k,v in co:
print k, v
expected = []
allowedDelta = 0
labelListUsed = list(labelList)
labelListUsed.remove('C11')
numColsUsed = numCols - 1
parameters = {
'validation_frame': parse_key,
'ignored_columns': None,
# FIX! for now just use a column that's binomial
'response_column': 'C11',
# FIX! when is this needed? redundant for binomial?
'balance_classes': False,
'max_after_balance_size': None,
'standardize': False,
'family': 'binomial',
'link': None,
'alpha': '[1e-4]',
'lambda': '[0.5,0.25, 0.1]',
'lambda_search': None,
'nlambdas': None,
'lambda_min_ratio': None,
# 'use_all_factor_levels': False,
}
start = time.time()
model_key = 'hastie_glm.hex'
bmResult = h2o.n0.build_model(
algo='glm',
model_id=model_key,
training_frame=parse_key,
parameters=parameters,
timeoutSecs=60)
bm = OutputObj(bmResult, 'bm')
modelResult = h2o.n0.models(key=model_key)
model = OutputObj(modelResult['models'][0]['output'], 'model')
h2o_glm.simpleCheckGLM(self, model, parameters, labelList, labelListUsed)
cmmResult = h2o.n0.compute_model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
cmm = OutputObj(cmmResult, 'cmm')
mmResult = h2o.n0.model_metrics(model=model_key, frame=parse_key, timeoutSecs=60)
mm = OutputObj(mmResult, 'mm')
prResult = h2o.n0.predict(model=model_key, frame=parse_key, timeoutSecs=60)
pr = OutputObj(prResult['model_metrics'][0]['predictions'], 'pr')
# compare this glm to the first one. since the files are replications, the results
# should be similar?
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'AUC', validation, self.validation1)
else:
# self.validation1 = copy.deepcopy(validation)
self.validation1 = None
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM_hastie_shuffle(self):
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
# This test also adds file shuffling, to see that row order doesn't matter
csvFilename = "1mx10_hastie_10_2.data.gz"
bucket = 'home-0xdiag-datasets'
csvPathname = 'standard' + '/' + csvFilename
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename1xShuf = "hastie_1x.data_shuf"
pathname1xShuf = SYNDATASETS_DIR + '/' + filename1xShuf
h2o_util.file_shuffle(pathname1x, pathname1xShuf)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1xShuf, pathname1xShuf, pathname2x)
filename2xShuf = "hastie_2x.data_shuf"
pathname2xShuf = SYNDATASETS_DIR + '/' + filename2xShuf
h2o_util.file_shuffle(pathname2x, pathname2xShuf)
glm_doit(self, filename2xShuf, None, pathname2xShuf, timeoutSecs=45)
# too big to shuffle?
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2xShuf,pathname2xShuf,pathname4x)
glm_doit(self,filename4x, None, pathname4x, timeoutSecs=120)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
drpngx/tensorflow | tensorflow/python/estimator/inputs/pandas_io_test.py | 7 | 11057 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def makeTestDataFrameWithYAsDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
a_label = np.arange(10, 14)
b_label = np.arange(50, 54)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.DataFrame({'a_target': a_label, 'b_target': b_label}, index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_RaisesWhenTargetColumnIsAList(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.assertRaisesRegexp(TypeError,
'target_column must be a string type'):
pandas_io.pandas_input_fn(x, y, batch_size=2,
shuffle=False,
num_epochs=1,
target_column=['one', 'two'])
def testPandasInputFn_NonBoolShuffle(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaisesRegexp(ValueError,
'shuffle must be provided and explicitly '
'set as boolean'):
# Default shuffle is None
pandas_io.pandas_input_fn(x, y_noindex)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFnWhenYIsDataFrame_ProducesExpectedOutput(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, targets = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(targets['a_target'], [10, 11])
self.assertAllEqual(targets['b_target'], [50, 51])
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumns(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'b'})
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, targets = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(targets['a'], [10, 11])
self.assertAllEqual(targets['b'], [50, 51])
def testPandasInputFnYIsDataFrame_HandlesOverlappingColumnsInTargets(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrameWithYAsDataFrame()
y = y.rename(columns={'a_target': 'a', 'b_target': 'a_n'})
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, targets = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(targets['a'], [10, 11])
self.assertAllEqual(targets['a_n'], [50, 51])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
xiaoxiamii/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
rneher/FitnessInference | flu/figure_scripts/flu_figures_inference.py | 1 | 12931 |
#########################################################################################
#
# author: Richard Neher
# email: [email protected]
#
# Reference: Richard A. Neher, Colin A Russell, Boris I Shraiman.
# "Predicting evolution from the shape of genealogical trees"
#
##################################################
#!/ebio/ag-neher/share/programs/bin/python2.7
#
#script that reads in precomputed repeated prediction of influenza and
#and plots the average predictions using external, internal nodes for each year
#in addition, it compares this to predictions rewarding Koel et al mutations
#and to predictions using explicit temporal information (frequency dynamics within
#clades)
#
import glob,argparse,sys
sys.path.append('/ebio/ag-neher/share/users/rneher/FluPrediction_code/flu/src')
import test_flu_prediction as test_flu
import analysis_utils as AU
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import scoreatpercentile
file_formats = [] #['.pdf', '.svg']
# set matplotlib plotting parameters
plt.rcParams.update(test_flu.mpl_params)
figure_folder = '../figures_ms/'
# set flutype, prediction regions, and basic parameters
parser = test_flu.make_flu_parser()
params=parser.parse_args()
params.year='????'
params.sample_size = 100
D = params.diffusion = 0.5
gamma = params.gamma = 3.0
omega = params.omega = 0.001
params.collapse = False
metric = 'nuc'
# make file identifiers
base_name, name_mod = test_flu.get_fname(params)
#remove year
base_name = '_'.join(base_name.split('_')[:1]+base_name.split('_')[2:])
base_name = base_name.replace('_????','')
# load data (with Koel boost and without), save in dictionary
prediction_distances={}
normed_distances={}
for boost in [0.0,0.5,1.0]:
params.boost = boost
years,tmp_pred, tmp_normed = AU.load_prediction_data(params, metric)
prediction_distances.update(tmp_pred)
normed_distances.update(tmp_normed)
##################################################################################
## main figure 3c
##################################################################################
# make figure
plt.figure(figsize = (12,6))
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal and L&L predictions
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distances[('minimal',boost,'minimal')][yi],
prediction_distances[('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
for method, sym, col, shift, label in [[('fitness,terminal nodes',0.0,'pred(T)'), 's', 'k', -0.25, 'top ranked terminal nodes'],
[('fitness,internal nodes',0.0,'pred(I)'), 'd', 'r', 0.25, 'top ranked internal nodes ']]:
plt.plot(years+shift, prediction_distances[method], sym, c= col, ms=8,
label=label) #+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
plt.xlabel('year')
plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(-0.06,0.95,'C', transform = plt.gca().transAxes, fontsize = 36)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_'+base_name+'_'+name_mod+'_internal_external_revised'+ff)
##################################################################################
## Fig 4: compare bootstrap distributions of prediction results
## Bootstrapping is over years
##
##################################################################################
#sorted_methods = [a for a in sorted(normed_distances.items(), key=lambda x:x[1]) if a[0][0]
# not in ['ladder rank', 'date', 'expansion, internal nodes', 'L&L'] or a[0][1]==0.0]
tick_labels = { ('fitness,internal nodes', 0.0, 'pred(I)'):'internal',
('fitness,terminal nodes', 0.0, 'pred(T)'):'terminal',
('expansion, internal nodes', 0.0, 'growth'):'growth',
('L&L', 0.0, r'L\&L'):r'L\&L',
('ladder rank',0.0, 'ladder rank'):'ladder rank'}
sorted_methods = [a for a in sorted(normed_distances.items(), key=lambda x:x[1][0]) if a[0][:2] in
[#('internal and expansion', 0.5),
#('internal and expansion', 0.0),
('fitness,internal nodes', 0.0),
('fitness,terminal nodes', 0.0),
('expansion, internal nodes', 0.0),
('L&L', 0.0),
('ladder rank',0.0)] ]
plt.figure(figsize = (8,5))
plt.boxplot([a[1][1][-1] for a in sorted_methods],positions = range(len(sorted_methods)))
#plt.xticks(range(len(sorted_methods)), [a[0][-1] for a in sorted_methods], rotation=30, horizontalalignment='right')
plt.xticks(range(len(sorted_methods)), [tick_labels[a[0]] for a in sorted_methods], rotation=30, horizontalalignment='right')
plt.ylabel(r'distance $\bar{d}$ to next season')
plt.xlim([-0.5, len(sorted_methods)-0.5])
plt.grid()
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig5_'+base_name+'_'+name_mod+'_method_comparison'+ff)
##################################################################################
## Fig 3c-1 Comparison to L&L
##################################################################################
# make figure
plt.figure(figsize = (12,6))
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distances[('minimal',boost,'minimal')][yi],
prediction_distances[('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
method, sym, col, shift, label = ('fitness,terminal nodes',0.0,'pred(T)'), 's', 'k', -0.25, 'top ranked terminal nodes '
plt.plot(years+shift, prediction_distances[method], sym, c= col, ms=8, label=label+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
method, sym, col, shift, label = ('L&L',0.0,'L\&L'), 'o', 'r', 0.25, r'prediction by \L{}uksza and L\"assig'
plt.plot(years[AU.laessig_years(years)]+shift, prediction_distances[method][AU.laessig_years(years)],
sym, c= col, ms=8, label=label+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
#plt.ylabel('nucleodide distance to next season\n(relative to average)')
plt.xlabel('year')
plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(0.02,0.9,'Fig.~3-S1', transform = plt.gca().transAxes, fontsize = 20)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_s1_'+base_name+'_'+name_mod+'_LL_external_revised'+ff)
##################################################################################
## Fig 3c-2 inclusion of Koel boost -- no temporal compnent
##################################################################################
# make figure
plt.figure(figsize = (12,6))
plt.title(r'Rewarding Koel mutations -- w/o calde growth estimate: $\bar{d}='
+', '.join(map(str,[np.round(normed_distances[('fitness,internal nodes',boost,'pred(I)')][0],2)
for boost in [0.0, 0.5, 1.0]]))+'$ for $\delta = 0, 0.5, 1$', fontsize = 16)
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal
method, sym, col, shift, label = ('fitness,internal nodes',0.0,'pred(I)'), 's', 'k', -0.25, 'pred(I)+Koel boost'
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distances[('minimal',boost,'minimal')][yi],
prediction_distances[('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
plt.plot(year+np.linspace(-0.5, 0.5,7)[1:-1:2], [prediction_distances[(method[0], koel, method[-1])][yi] for koel in [0.0, 0.5, 1.0]],
sym, c= col, ms=8,ls='-', label=label+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
#plt.ylabel('nucleodide distance to next season\n(relative to average)')
plt.xlabel('year')
#plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(0.02,0.93,'Fig.~3-S2', transform = plt.gca().transAxes, fontsize = 20)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_s2_'+base_name+'_'+name_mod+'_koel_boost_revised'+ff)
##################################################################################
## Fig 3c-3 inclusion of Koel boost -- with temporal compnent
##################################################################################
# make figure
plt.figure(figsize = (12,6))
plt.title(r'Rewarding Koel mutations -- with calde growth estimate: $\bar{d}='
+', '.join(map(str,[np.round(normed_distances[('internal and expansion',boost,'pred(I)+growth')][0],2)
for boost in [0.0, 0.5, 1.0]]))+'$ for $\delta = 0, 0.5, 1$', fontsize = 16)
# plot line for random expection
plt.plot([min(years)-0.5,max(years)+0.5], [1,1], lw=2, c='k')
# add shaded boxes and optimal
method, sym, col, shift, label = ('internal and expansion',0.0,'pred(I)+growth'), 's', 'k', -0.25, 'pred(I)+Koel boost+ growth'
for yi,year in enumerate(years):
plt.gca().add_patch(plt.Rectangle([year-0.5, 0.2], 1.0, 1.8, color='k', alpha=0.05*(1+np.mod(year,2))))
plt.plot([year-0.5, year+0.5], [prediction_distances[('minimal',boost,'minimal')][yi],
prediction_distances[('minimal',boost,'minimal')][yi]],
lw=2, c='k', ls = '--')
plt.plot(year+np.linspace(-0.5, 0.5,7)[1:-1:2], [prediction_distances[(method[0], koel, method[-1])][yi] for koel in [0.0, 0.5, 1.0]],
sym, c= col, ms=8,ls='-', label=label+r' $\bar{d}='+str(np.round(normed_distances[method][0],2))+'$')
# set limits, ticks, legends
plt.ylim([0.2, 1.7])
plt.yticks([0.5, 1, 1.5])
plt.xlim([min(years)-0.5,max(years)+0.5])
plt.xticks(years[::2])
plt.ylabel(r'$\Delta(\mathrm{prediction})$ to next season')
#plt.ylabel('nucleodide distance to next season\n(relative to average)')
plt.xlabel('year')
#plt.legend(loc=9, ncol=1,numpoints=1)
#add panel label
plt.text(0.02,0.93,'Fig.~3-S3', transform = plt.gca().transAxes, fontsize = 20)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_s3_'+base_name+'_'+name_mod+'_koel_boost_growth_revised'+ff)
##################################################################################
## Fig 3c-4 Temporal distribution of top strains
##################################################################################
# make figure
plt.figure(figsize = (10,6))
bins=np.cumsum([0,31,28,31,30,31,30,31,31,30,31,30,31,31,28,31,30,31])
bc = (bins[1:]+bins[:-1])*0.5
bin_label = ['Jan', 'Feb', 'Mar', 'Apr', 'May','Jun','Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', 'Mar', 'Apr', 'May']
params.boost = 0.0
sampling_dates = AU.load_date_distribution(params, 'mean_fitness')
for year in sorted(sampling_dates.keys()):
y,x = np.histogram(sampling_dates[year], bins=bins)
plt.plot(bc, y, 'o', label = str(year), ls='-')
# set limits, ticks, legends
plt.ylabel('distribution of predicted strains')
plt.xticks(bc, bin_label)
plt.xlabel('sampling date')
plt.legend(loc=1, ncol=2,numpoints=1)
#add panel label
plt.text(0.02,0.9,'Fig.~3-S4', transform = plt.gca().transAxes, fontsize = 20)
#save figure
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig3C_s4_'+base_name+'_'+name_mod+'_sampling_dates_by_year'+ff)
plt.figure()
all_dates = []
for d in sampling_dates.values(): all_dates.extend(d)
plt.hist(all_dates, bins=bins)
#add panel label
plt.text(0.02,0.9,'Fig.~3-S5', transform = plt.gca().transAxes, fontsize = 20)
plt.ylabel('distribution of predicted strains')
plt.xlabel('sampling date')
plt.xticks(bc, bin_label)
plt.tight_layout()
for ff in file_formats:
plt.savefig(figure_folder+'Fig4C_s5_'+base_name+'_'+name_mod+'_sampling_dates'+ff)
| mit |
pdamodaran/yellowbrick | docs/conf.py | 1 | 12991 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# conf
# Yellowbrick documentation build config file, created by sphinx-quickstart
#
# Created: Tue Jul 05 19:45:43 2016 -0400
# Copyright (C) 2016-2019 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: conf.py [] [email protected] $
"""
Yellowbrick documentation build config file, created by sphinx-quickstart.
This file is executed with the current directory set to its containing dir
by ``execfile()``, e.g. the working directory will be yellowbrick/docs.
Ensure that all specified paths relative to the docs directory are made
absolute by using ``os.path.abspath``.
Note that not all possible configuration values are present in this
autogenerated file.
All configuration values have a default; values that are commented out
serve to show the default.
See: https://www.sphinx-doc.org/en/master/usage/configuration.html
for more details on configuring the documentation build.
"""
##########################################################################
## Imports
##########################################################################
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# Set the backend of matplotlib to prevent build errors.
import matplotlib
matplotlib.use('agg')
# Import yellowbrick information.
import yellowbrick as yb
##########################################################################
## General configuration
##########################################################################
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.8'
# General information about the project.
project = 'Yellowbrick'
copyright = '2016-2019, The scikit-yb developers.'
author = 'The scikit-yb developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
version = yb.get_version(short=True)
# The full version, including alpha/beta/rc tags.
release = "v" + yb.get_version(short=False)
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.todo',
'numpydoc',
'matplotlib.sphinxext.plot_directive',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) for all docs.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
##########################################################################
## Extension Configuration
##########################################################################
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# Auto-plot settings either as extension or (file format, dpi)
plot_formats = [
'png',
'pdf',
# ('hires.png', 350),
]
# By default, include the source code generating plots in documentation
plot_include_source = True
# Whether to show a link to the source in HTML.
plot_html_show_source_link = True
# Code that should be executed before each plot.
plot_pre_code = (
"import numpy as np\n"
"import matplotlib.pyplot as plt\n"
"from yellowbrick.datasets import *\n"
)
# Whether to show links to the files in HTML.
plot_html_show_formats = True
# A dictionary containing any non-standard rcParams that should be applied before each plot.
plot_rcparams = {
"figure.figsize": (9,6),
"figure.dpi": 128,
}
# Autodoc requires numpy to skip class members otherwise we get an exception:
# toctree contains reference to nonexisting document
# See: https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
# Locations of objects.inv files for intersphinx extension that auto-links
# to external api docs.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'matplotlib': ('http://matplotlib.org/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'cycler': ('http://matplotlib.org/cycler/', None),
'sklearn': ('http://scikit-learn.org/stable/', None)
}
##########################################################################
## Options for HTML output
##########################################################################
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'yellowbrick v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet("theme_overrides.css")
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'yellowbrickdoc'
##########################################################################
## Options for LaTeX output
##########################################################################
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples.
latex_documents = [
(
master_doc, # source start file
'yellowbrick.tex', # target name
'{} Documentation'.format(project), # title
author, # author
'manual' # documentclass [howto,manual, or own class]
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
##########################################################################
## Options for manual page output
##########################################################################
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
project,
'{} Documentation'.format(project),
[author],
1
)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'yellowbrick',
'{} Documentation'.format(project),
author,
'yellowbrick',
'machine learning visualization',
'scientific visualization',
),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False | apache-2.0 |
aetilley/scikit-learn | sklearn/grid_search.py | 103 | 36232 | """
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>
# Andreas Mueller <[email protected]>
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin, ChangedBehaviorWarning
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
This estimator will be cloned and then fitted.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each grid point.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default 1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, default=3
If an integer is passed, it is the number of folds.
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a an hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" method and a "predict" method like
any classifier except that the parameters of the classifier
used to predict is optimized by cross-validation.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
A object of that type is instantiated for each parameter setting.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : integer or cross-validation generator, optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| bsd-3-clause |
ran5515/DeepDecision | tensorflow/python/estimator/inputs/queues/feeding_functions_test.py | 58 | 9375 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests feeding functions using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.platform import test
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def vals_to_list(a):
return {
key: val.tolist() if isinstance(val, np.ndarray) else val
for key, val in a.items()
}
class _FeedingFunctionsTestCase(test.TestCase):
"""Tests for feeding functions."""
def testArrayFeedFnBatchOne(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 16
expected = {
"index_placeholder": [i],
"value_placeholder": [[2 * i, 2 * i + 1]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchFive(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [15, 0, 1, 2, 3],
"value_placeholder": [[30, 31], [0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchTwoWithOneEpoch(self):
array = np.arange(5) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"value_placeholder": [10, 11]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"value_placeholder": [12, 13]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"value_placeholder": [14]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundred(self):
array = np.arange(32).reshape([16, 2])
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, 100)
expected = {
"index_placeholder":
list(range(0, 16)) * 6 + list(range(0, 4)),
"value_placeholder":
np.arange(32).reshape([16, 2]).tolist() * 6 +
[[0, 1], [2, 3], [4, 5], [6, 7]]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testArrayFeedFnBatchOneHundredWithSmallerArrayAndMultipleEpochs(self):
array = np.arange(2) + 10
placeholders = ["index_placeholder", "value_placeholder"]
aff = ff._ArrayFeedFn(placeholders, array, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"value_placeholder": [10, 11, 10, 11],
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOne(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 1)
# cycle around a couple times
for x in range(0, 100):
i = x % 32
expected = {
"index_placeholder": [i + 96],
"a_placeholder": [32 + i],
"b_placeholder": [64 + i]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchFive(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 5)
# cycle around a couple times
for _ in range(0, 101, 2):
aff()
expected = {
"index_placeholder": [127, 96, 97, 98, 99],
"a_placeholder": [63, 32, 33, 34, 35],
"b_placeholder": [95, 64, 65, 66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchTwoWithOneEpoch(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 37)
array2 = np.arange(64, 69)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 101))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [96, 97],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [98, 99],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [100],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundred(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 64)
array2 = np.arange(64, 96)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 128))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, 100)
expected = {
"index_placeholder": list(range(96, 128)) * 3 + list(range(96, 100)),
"a_placeholder": list(range(32, 64)) * 3 + list(range(32, 36)),
"b_placeholder": list(range(64, 96)) * 3 + list(range(64, 68))
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testPandasFeedFnBatchOneHundredWithSmallDataArrayAndMultipleEpochs(self):
if not HAS_PANDAS:
return
array1 = np.arange(32, 34)
array2 = np.arange(64, 66)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(96, 98))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._PandasFeedFn(placeholders, df, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [96, 97, 96, 97],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnBatchTwoWithOneEpoch(self):
a = np.arange(32, 37)
b = np.arange(64, 69)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=2, num_epochs=1)
expected = {
"index_placeholder": [0, 1],
"a_placeholder": [32, 33],
"b_placeholder": [64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [2, 3],
"a_placeholder": [34, 35],
"b_placeholder": [66, 67]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
expected = {
"index_placeholder": [4],
"a_placeholder": [36],
"b_placeholder": [68]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
def testOrderedDictNumpyFeedFnLargeBatchWithSmallArrayAndMultipleEpochs(self):
a = np.arange(32, 34)
b = np.arange(64, 66)
x = {"a": a, "b": b}
ordered_dict_x = collections.OrderedDict(
sorted(x.items(), key=lambda t: t[0]))
placeholders = ["index_placeholder", "a_placeholder", "b_placeholder"]
aff = ff._OrderedDictNumpyFeedFn(
placeholders, ordered_dict_x, batch_size=100, num_epochs=2)
expected = {
"index_placeholder": [0, 1, 0, 1],
"a_placeholder": [32, 33, 32, 33],
"b_placeholder": [64, 65, 64, 65]
}
actual = aff()
self.assertEqual(expected, vals_to_list(actual))
if __name__ == "__main__":
test.main()
| apache-2.0 |
booya-at/paraBEM | examples/plots/panel_doublet_far.py | 2 | 1682 | import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import parabem
from parabem.pan3d import doublet_3_0_n0
from parabem.utils import check_path
pnt1 = parabem.PanelVector3(-0.5, -0.5, 0)
pnt2 = parabem.PanelVector3(0.5, -0.5, 0)
pnt3 = parabem.PanelVector3(0.5, 0.5, 0)
pnt4 = parabem.PanelVector3(-0.5, 0.5, 0)
source = parabem.Panel3([pnt1, pnt2, pnt3, pnt4])
fig = plt.figure()
x = np.arange(-4, 4, 0.01)
y = []
for xi in x:
target1 = parabem.PanelVector3(xi, 0, 0.0001)
target2 = parabem.PanelVector3(xi, 0, 0.5)
target3 = parabem.PanelVector3(xi, 0, 1.)
val1 = doublet_3_0_n0(target1, source)
val2 = doublet_3_0_n0(target2, source)
val3 = doublet_3_0_n0(target3, source)
y.append([val1, val2, val3])
ax1 = fig.add_subplot(131)
axes = fig.gca().set_ylim([-2, 5])
ax1.plot(x, y)
y = []
for xi in x:
target1 = parabem.Vector3(0, xi, 0.0001)
target2 = parabem.Vector3(0, xi, 0.5)
target3 = parabem.Vector3(0, xi, 1)
val1 = doublet_3_0_n0(target1, source)
val2 = doublet_3_0_n0(target2, source)
val3 = doublet_3_0_n0(target3, source)
y.append([val1, val2, val3])
ax2 = fig.add_subplot(132)
axes = fig.gca().set_ylim([-2, 5])
ax2.plot(x, y)
y = []
for xi in x:
target1 = parabem.Vector3(0, 0, xi)
target2 = parabem.Vector3(0.5, 0, xi)
target3 = parabem.Vector3(1, 0, xi)
val1 = doublet_3_0_n0(target1, source)
val2 = doublet_3_0_n0(target2, source)
val3 = doublet_3_0_n0(target3, source)
y.append([val1, val2, val3])
ax3 = fig.add_subplot(133)
axes = fig.gca().set_ylim([-2, 5])
ax3.plot(x, y)
plt.savefig(check_path("results/3d/doublet_far.png"))
| gpl-3.0 |
jenhantao/nuclearReceptorOverlap | makeGradientPositionHeatMaps.py | 1 | 3840 | # given a group summary file creates a heatmap showing the strength of each factors peak score in each merged region
### imports ###
import sys
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import rcParams
import matplotlib.cm as cm
rcParams.update({'figure.autolayout': True})
# inputs: path to groups summary file, path to a peaks file, and base path for output files
# outputs: creates plot image file in in outPath
def plotScores(summaryPath, inputPath, outPath):
with open(inputPath) as f:
data = f.readlines()
start = 0
for line in data:
if line[0] == "#":
start += 1
ids = set()
for line in data[start:]:
tokens = line.strip().split("\t")
id = tokens[0]
ids.add(id)
with open(summaryPath) as f:
data = f.readlines()
factors = data[0].strip().split("\t")[4:]
mergedRegions = []
lineDict = {}
otherIDs = set()
for line in data[1:]:
tokens = line.strip().split("\t")
if not "random" in tokens[3]:
peakScores = []
for peakScore in tokens[4:]:
if "," in peakScore:
avgScore = np.sum(map(float,peakScore.split(",")))
peakScores.append(avgScore)
else:
peakScores.append(float(peakScore))
chromosome = tokens[3][3:tokens[3].index(":")]
if chromosome.lower() == "x":
chromosome = 20
elif chromosome.lower() == "y":
chromosome = 21
elif chromosome.lower() == "mt":
chromosome = 22
else:
chromosome = int(chromosome)
start = int(tokens[3][tokens[3].index(":")+1:tokens[3].index("-")])
end = int(tokens[3][tokens[3].index("-")+1:])
id =tokens[2]
lineDict[id] = line
if id in ids:
mergedRegions.append((id,chromosome, start, end, peakScores))
# sort by each factor and produce one heatmap for each
for plotNumber in range(len(factors)):
mergedRegions = sorted(mergedRegions, key=lambda x: x[4][plotNumber])
chromBreaks = [] # marks the breaks between chromosomes
chromosomes = [] # chromosome labels
scoreMatrix = np.zeros((len(factors),len(mergedRegions)))
scoreArray = []
sortedFile = open(outPath+"_sorted_summary_"+factors[plotNumber]+".tsv", "w")
sortedFile.write(data[0])
for i in range(len(mergedRegions)):
reg = mergedRegions[i]
peakScores = reg[-1]
id = reg[0]
chrom = reg[1]
if not chrom in chromosomes:
chromosomes.append(chrom)
chromBreaks.append(i)
sortedFile.write(lineDict[id])
for factorNumber in range(len(peakScores)):
if True:
if peakScores[factorNumber] != 0.0:
scoreMatrix[factorNumber][i] = math.log(peakScores[factorNumber])
scoreArray.append(math.log(peakScores[factorNumber]))
else:
scoreMatrix[factorNumber][i] = peakScores[factorNumber]
scoreArray.append(peakScores[factorNumber])
else:
scoreMatrix[factorNumber][i] = peakScores[factorNumber]
scoreArray.append(peakScores[factorNumber])
# remove zero columns
toPlotFactors = []
toPlotScores =[]
for i in range(len(factors)):
if np.sum(scoreMatrix[i]) > 0.0:
toPlotFactors.append(factors[i])
toPlotScores.append(scoreMatrix[i])
toPlotScores = np.array(toPlotScores)
fig, ax = plt.subplots()
plt.pcolor(toPlotScores, cmap=cm.Blues)
plt.colorbar()
# fix ticks and labels
ax.set_yticks(np.arange(len(toPlotFactors))+0.5, minor=False)
ax.set_xticks(chromBreaks)
ax.set_xticklabels([])
ax.set_yticklabels(toPlotFactors, minor=False)
plt.title("Log Peak Scores per Merged Region Per Factor")
# save files
plt.savefig(outPath+ "_positionHeatmap_"+factors[plotNumber]+".png", dpi=400)
plt.close()
sortedFile.close()
if __name__ == "__main__":
summaryPath = sys.argv[1]
inputPath = sys.argv[2]
outPath = sys.argv[3]
plotScores(summaryPath, inputPath, outPath)
| mit |
lcnature/brainiak | examples/fcma/voxel_selection.py | 5 | 4504 | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from brainiak.fcma.voxelselector import VoxelSelector
from brainiak.fcma.preprocessing import prepare_fcma_data
from brainiak import io
from sklearn import svm
import sys
from mpi4py import MPI
import logging
import numpy as np
import nibabel as nib
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
# if want to output log to a file instead of outputting log to the console,
# replace "stream=sys.stdout" with "filename='fcma.log'"
logging.basicConfig(level=logging.INFO, format=format, stream=sys.stdout)
logger = logging.getLogger(__name__)
"""
example running command in run_voxel_selection.sh
"""
if __name__ == '__main__':
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'programming starts in %d process(es)' %
MPI.COMM_WORLD.Get_size()
)
if len(sys.argv) != 7:
logger.error('the number of input argument is not correct')
sys.exit(1)
data_dir = sys.argv[1]
suffix = sys.argv[2]
mask_file = sys.argv[3]
epoch_file = sys.argv[4]
images = io.load_images_from_dir(data_dir, suffix=suffix)
mask = io.load_boolean_mask(mask_file)
conditions = io.load_labels(epoch_file)
raw_data, _, labels = prepare_fcma_data(images, conditions, mask)
# setting the random argument produces random voxel selection results
# for non-parametric statistical analysis.
# There are three random options:
# RandomType.NORANDOM is the default
# RandomType.REPRODUCIBLE permutes the voxels in the same way every run
# RandomType.UNREPRODUCIBLE permutes the voxels differently across runs
# example:
# from brainiak.fcma.preprocessing import RandomType
# raw_data, _, labels = prepare_fcma_data(images, conditions, mask,
# random=RandomType.REPRODUCIBLE)
# if providing two masks, just append the second mask as the last input argument
# and specify raw_data2
# example:
# images = io.load_images_from_dir(data_dir, extension)
# mask2 = io.load_boolean_mask('face_scene/mask.nii.gz')
# raw_data, raw_data2, labels = prepare_fcma_data(images, conditions, mask,
# mask2)
epochs_per_subj = int(sys.argv[5])
num_subjs = int(sys.argv[6])
# the following line is an example to leaving a subject out
#vs = VoxelSelector(labels[0:204], epochs_per_subj, num_subjs-1, raw_data[0:204])
# if using all subjects
vs = VoxelSelector(labels, epochs_per_subj, num_subjs, raw_data)
# if providing two masks, just append raw_data2 as the last input argument
#vs = VoxelSelector(labels, epochs_per_subj, num_subjs, raw_data, raw_data2=raw_data2)
# for cross validation, use SVM with precomputed kernel
clf = svm.SVC(kernel='precomputed', shrinking=False, C=10)
results = vs.run(clf)
# this output is just for result checking
if MPI.COMM_WORLD.Get_rank()==0:
logger.info(
'correlation-based voxel selection is done'
)
#print(results[0:100])
mask_img = nib.load(mask_file)
mask = mask_img.get_data().astype(np.bool)
score_volume = np.zeros(mask.shape, dtype=np.float32)
score = np.zeros(len(results), dtype=np.float32)
seq_volume = np.zeros(mask.shape, dtype=np.int)
seq = np.zeros(len(results), dtype=np.int)
with open('result_list.txt', 'w') as fp:
for idx, tuple in enumerate(results):
fp.write(str(tuple[0]) + ' ' + str(tuple[1]) + '\n')
score[tuple[0]] = tuple[1]
seq[tuple[0]] = idx
score_volume[mask] = score
seq_volume[mask] = seq
io.save_as_nifti_file(score_volume, mask_img.affine,
'result_score.nii.gz')
io.save_as_nifti_file(seq_volume, mask_img.affine,
'result_seq.nii.gz')
| apache-2.0 |
chubbymaggie/datasketch | benchmark/lshensemble_benchmark.py | 2 | 10558 | """
Benchmark dataset from:
https://github.com/ekzhu/set-similarity-search-benchmark.
Use "Canada US and UK Open Data":
Indexed sets: canada_us_uk_opendata.inp.gz
Query sets (10 stratified samples from 10 percentile intervals):
Size from 10 - 1k: canada_us_uk_opendata_queries_1k.inp.gz
Size from 10 - 10k: canada_us_uk_opendata_queries_10k.inp.gz
Size from 10 - 100k: canada_us_uk_opendata_queries_100k.inp.gz
"""
import time, argparse, sys, json
import numpy as np
import scipy.stats
import random
import collections
import gzip
import random
import os
import pickle
import pandas as pd
from SetSimilaritySearch import SearchIndex
import farmhash
from datasketch import MinHashLSHEnsemble, MinHash
def _hash_32(d):
return farmhash.hash32(d)
def bootstrap_sets(sets_file, sample_ratio, num_perms, skip=1,
pad_for_asym=False):
print("Creating sets...")
sets = collections.deque([])
random.seed(41)
with gzip.open(sets_file, "rt") as f:
for i, line in enumerate(f):
if i < skip:
# Skip lines
continue
if random.random() > sample_ratio:
continue
s = np.array([int(d) for d in \
line.strip().split("\t")[1].split(",")])
sets.append(s)
sys.stdout.write("\rRead {} sets".format(len(sets)))
sys.stdout.write("\n")
sets = list(sets)
keys = list(range(len(sets)))
# Generate paddings for asym.
max_size = max(len(s) for s in sets)
paddings = dict()
if pad_for_asym:
padding_sizes = sorted(list(set([max_size-len(s) for s in sets])))
for num_perm in num_perms:
paddings[num_perm] = dict()
for i, padding_size in enumerate(padding_sizes):
if i == 0:
prev_size = 0
pad = MinHash(num_perm, hashfunc=_hash_32)
else:
prev_size = padding_sizes[i-1]
pad = paddings[num_perm][prev_size].copy()
for w in range(prev_size, padding_size):
pad.update(str(w)+"_tmZZRe8DE23s")
paddings[num_perm][padding_size] = pad
# Generate minhash
print("Creating MinHash...")
minhashes = dict()
for num_perm in num_perms:
print("Using num_parm = {}".format(num_perm))
ms = []
for s in sets:
m = MinHash(num_perm, hashfunc=_hash_32)
for word in s:
m.update(str(word))
if pad_for_asym:
# Add padding to the minhash
m.merge(paddings[num_perm][max_size-len(s)])
ms.append(m)
sys.stdout.write("\rMinhashed {} sets".format(len(ms)))
sys.stdout.write("\n")
minhashes[num_perm] = ms
return (minhashes, sets, keys)
def benchmark_lshensemble(threshold, num_perm, num_part, m, storage_config,
index_data, query_data):
print("Building LSH Ensemble index")
(minhashes, indexed_sets, keys) = index_data
lsh = MinHashLSHEnsemble(threshold=threshold, num_perm=num_perm,
num_part=num_part, m=m, storage_config=storage_config)
lsh.index((key, minhash, len(s))
for key, minhash, s in \
zip(keys, minhashes[num_perm], indexed_sets))
print("Querying")
(minhashes, sets, keys) = query_data
probe_times = []
process_times = []
results = []
for qs, minhash in zip(sets, minhashes[num_perm]):
# Record probing time
start = time.perf_counter()
result = list(lsh.query(minhash, len(qs)))
probe_times.append(time.perf_counter() - start)
# Record post processing time.
start = time.perf_counter()
[_compute_containment(qs, indexed_sets[key]) for key in result]
process_times.append(time.perf_counter() - start)
results.append(result)
sys.stdout.write("\rQueried {} sets".format(len(results)))
sys.stdout.write("\n")
return results, probe_times, process_times
def benchmark_ground_truth(threshold, index, query_data):
(_, query_sets, _) = query_data
times = []
results = []
for q in query_sets:
start = time.perf_counter()
result = [key for key, _ in index.query(q)]
duration = time.perf_counter() - start
times.append(duration)
results.append(result)
sys.stdout.write("\rQueried {} sets".format(len(results)))
sys.stdout.write("\n")
return results, times
def _compute_containment(x, y):
if len(x) == 0 or len(y) == 0:
return 0.0
intersection = len(np.intersect1d(x, y, assume_unique=True))
return float(intersection) / float(len(x))
levels = {
"test": {
"thresholds": [1.0,],
"num_parts": [4,],
"num_perms": [32,],
"m": 2,
},
"lite": {
"thresholds": [0.5, 0.75, 1.0],
"num_parts": [8, 16],
"num_perms": [32, 64],
"m": 8,
},
"medium": {
"thresholds": [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"num_parts": [8, 16, 32],
"num_perms": [32, 128, 224],
"m": 8,
},
"complete": {
"thresholds": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"num_parts": [8, 16, 32],
"num_perms": [32, 64, 96, 128, 160, 192, 224, 256],
"m": 8,
},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run LSH Ensemble benchmark using data sets obtained "
"from https://github.com/ekzhu/set-similarity-search-benchmarks.")
parser.add_argument("--indexed-sets", type=str, required=True,
help="Input indexed set file (gzipped), each line is a set: "
"<set_size> <1>,<2>,<3>..., where each <?> is an element.")
parser.add_argument("--query-sets", type=str, required=True,
help="Input query set file (gzipped), each line is a set: "
"<set_size> <1>,<2>,<3>..., where each <?> is an element.")
parser.add_argument("--query-results", type=str,
default="lshensemble_benchmark_query_results.csv")
parser.add_argument("--ground-truth-results", type=str,
default="lshensemble_benchmark_ground_truth_results.csv")
parser.add_argument("--indexed-sets-sample-ratio", type=float, default=0.1)
parser.add_argument("--level", type=str, choices=levels.keys(),
default="complete")
parser.add_argument("--skip-ground-truth", action="store_true")
parser.add_argument("--use-asym-minhash", action="store_true")
parser.add_argument("--use-redis", action="store_true")
parser.add_argument("--redis-host", type=str, default="localhost")
parser.add_argument("--redis-port", type=int, default=6379)
args = parser.parse_args(sys.argv[1:])
level = levels[args.level]
index_data, query_data = None, None
index_data_cache = "{}.pickle".format(args.indexed_sets)
query_data_cache = "{}.pickle".format(args.query_sets)
if os.path.exists(index_data_cache):
print("Using cached indexed sets {}".format(index_data_cache))
with open(index_data_cache, "rb") as d:
index_data = pickle.load(d)
else:
print("Using indexed sets {}".format(args.indexed_sets))
index_data = bootstrap_sets(args.indexed_sets,
args.indexed_sets_sample_ratio, num_perms=level["num_perms"],
pad_for_asym=args.use_asym_minhash)
with open(index_data_cache, "wb") as d:
pickle.dump(index_data, d)
if os.path.exists(query_data_cache):
print("Using cached query sets {}".format(query_data_cache))
with open(query_data_cache, "rb") as d:
query_data = pickle.load(d)
else:
print("Using query sets {}".format(args.query_sets))
query_data = bootstrap_sets(args.query_sets, 1.0,
num_perms=level["num_perms"], skip=0)
with open(query_data_cache, "wb") as d:
pickle.dump(query_data, d)
if not args.skip_ground_truth:
rows = []
# Build search index separately, only works for containment.
print("Building search index...")
index = SearchIndex(index_data[1], similarity_func_name="containment",
similarity_threshold=0.1)
for threshold in level["thresholds"]:
index.similarity_threshold = threshold
print("Running ground truth benchmark threshold = {}".format(threshold))
ground_truth_results, ground_truth_times = \
benchmark_ground_truth(threshold, index, query_data)
for t, r, query_set, query_key in zip(ground_truth_times,
ground_truth_results, query_data[1], query_data[2]):
rows.append((query_key, len(query_set), threshold, t,
",".join(str(k) for k in r)))
df_groundtruth = pd.DataFrame.from_records(rows,
columns=["query_key", "query_size", "threshold",
"query_time", "results"])
df_groundtruth.to_csv(args.ground_truth_results)
storage_config = {"type": "dict"}
if args.use_redis:
storage_config = {
"type": "redis",
"redis": {
"host": args.redis_host,
"port": args.redis_port,
},
}
rows = []
for threshold in level["thresholds"]:
for num_part in level["num_parts"]:
for num_perm in level["num_perms"]:
print("Running LSH Ensemble benchmark "
"threshold = {}, num_part = {}, num_perm = {}".format(
threshold, num_part, num_perm))
results, probe_times, process_times = benchmark_lshensemble(
threshold, num_perm, num_part, level["m"], storage_config,
index_data, query_data)
for probe_time, process_time, result, query_set, query_key in zip(\
probe_times, process_times, results, \
query_data[1], query_data[2]):
rows.append((query_key, len(query_set), threshold,
num_part, num_perm, probe_time, process_time,
",".join(str(k) for k in result)))
df = pd.DataFrame.from_records(rows,
columns=["query_key", "query_size", "threshold", "num_part",
"num_perm", "probe_time", "process_time", "results"])
df.to_csv(args.query_results)
| mit |
chrsrds/scikit-learn | sklearn/__check_build/__init__.py | 57 | 1681 | """ Module to give helpful messages to the user that did not
compile scikit-learn properly.
"""
import os
INPLACE_MSG = """
It appears that you are importing a local scikit-learn source tree. For
this, you need to have an inplace install. Maybe you are in the source
directory and you need to try from another location."""
STANDARD_MSG = """
If you have used an installer, please check that it is suited for your
Python version, your operating system and your platform."""
def raise_build_error(e):
# Raise a comprehensible error and list the contents of the
# directory to help debugging on the mailing list.
local_dir = os.path.split(__file__)[0]
msg = STANDARD_MSG
if local_dir == "sklearn/__check_build":
# Picking up the local install: this will work only if the
# install is an 'inplace build'
msg = INPLACE_MSG
dir_content = list()
for i, filename in enumerate(os.listdir(local_dir)):
if ((i + 1) % 3):
dir_content.append(filename.ljust(26))
else:
dir_content.append(filename + '\n')
raise ImportError("""%s
___________________________________________________________________________
Contents of %s:
%s
___________________________________________________________________________
It seems that scikit-learn has not been built correctly.
If you have installed scikit-learn from source, please do not forget
to build the package before using it: run `python setup.py install` or
`make` in the source directory.
%s""" % (e, local_dir, ''.join(dir_content).strip(), msg))
try:
from ._check_build import check_build # noqa
except ImportError as e:
raise_build_error(e)
| bsd-3-clause |
vince8290/dana | dAna_v3.py | 1 | 67678 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 16:59:00 2015
@author: Vincent Chochois / u5040252
"""
from ui_files.main3 import *
from ui_files.events import *
from ui_files.samples import *
from PyQt4.QtGui import *
from PyQt4.QtCore import * # inclut QTimer..
import pyqtgraph as pg # pour accès à certaines constantes pyqtgraph, widget, etc...))
import re
# from scipy import stats
import pandas as pd
import numpy as np
# from functools import *
from collections import *
from matplotlib import pyplot as plt
from matplotlib import style
import threading
from win32com.shell import shell, shellcon
import sys, glob, os, time
import yaml
class pyMS(QMainWindow, Ui_MainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
self.setupUi(parent) # Obligatoire
# configuration
self.cfgfolder = os.path.join(os.environ['USERPROFILE'], 'dAna')
self.cfgfile = os.path.join(self.cfgfolder, 'default.yaml')
self.cfg_load()
# initialize variables
self.today = time.strftime('%Y%m%d')
self.selectedfiles = []
self.folderlist = defaultdict(dict)
self.filelist = defaultdict(dict)
self.previous_cols = []
self.datafolder = os.path.join(r"C:\Users\\U5040252\Cloudstation\Projects\Mass_Spec_Data", time.strftime("%Y"))
# connect actions
self.actionOpen_datafile.triggered.connect(self.select_folder)
self.action_events.triggered.connect(self.events_manager)
self.action_samples.triggered.connect(self.samples_manager)
self.treeWidget.itemChanged.connect(self.treeWidget_changed)
self.treeWidget.itemClicked.connect(self.treeWidget_select)
self.action_update_samplelist.triggered.connect(self.chlorophylls)
self.action_getpeaks.triggered.connect(self.export_results)
# connect button clicks
self.update_btn.clicked.connect(self.update)
self.export_btn.clicked.connect(self.export)
self.roi_btn.clicked.connect(self.add_roi)
self.addfolder_btn.clicked.connect(self.select_folder)
self.removefolder_btn.clicked.connect(self.remove_folder)
# clear lists
self.treeWidget.clear()
self.columnlist.clear()
# get list of selected files
self.selectedfiles = []
# get list of selected columns
self.selectedcolumns = list(map(lambda x: x.text(), self.columnlist.selectedItems()))
self.graph_rawdata.addLegend(size=None, offset=(50, 50))
self.legend = self.graph_rawdata.plotItem.legend
# if self.datafolder == None or not os.path.isdir(self.datafolder):
self.select_folder()
self.__init__graphs()
def __init__graphs(self):
# general graph config
aa = True if self.aa.currentText=="ON" else False
pg.setConfigOptions(antialias=aa)
self.legendLabelStyle = {'color': '#000', 'size': self.legendfontsize.currentText() + 'pt', 'bold': False, 'italic': False}
self.titleStyle = {'color': '#000', 'size': self.titlefontsize.currentText()+ 'pt', 'bold': True, 'italic': False}
self.labelStyle = {'color': '#000', 'font-size': self.axislabelsize.currentText() + 'pt', 'bold': False, 'italic': False}
self.tickLabelStyle = {'color': '#000', 'size': self.ticklabelfontsize.currentText() + 'pt', 'bold': False, 'italic': False}
self.gridColors = [120, 120, 120]
# set fonts
font = 'Arial'
self.axlab_font = {'family': font, 'weight': 'normal', 'size': self.axislabelsize.currentText()}
self.ticklab_font = {'family': font, 'weight': 'normal', 'size': self.ticklabelfontsize.currentText() }
self.title_font = {'family': font, 'weight': 'bold', 'size': self.titlefontsize.currentText()}
self.legend_font = {'family': font, 'weight': 'bold', 'size': self.legendfontsize.currentText() }
self.annotation_font = {'family': font, 'weight': 'bold', 'size': self.annotationfontsize.currentText()}
self.tickfont = QFont(font, int(self.titlefontsize.currentText()))
# clear graphs
self.graph_rawdata.clear()
self.graph_rawdata.plotItem.setTitle("Empty")
self.roi = None
self.remove_roi()
# Create empty curves
self.courbes={}
# RAWDATA GRAPH
self.graph_rawdata.setBackgroundBrush(QBrush(QColor(Qt.white)))
self.graph_rawdata.showGrid(x=True, y=True) # affiche la grille
self.graph_rawdata.getAxis('bottom').setPen(pg.mkPen(*self.gridColors)) # couleur de l'axe + grille
self.graph_rawdata.getAxis('left').setPen(pg.mkPen(*self.gridColors)) # couleur de l'axe + grille
self.graph_rawdata.getAxis('bottom').setLabel('Time', units='sec', **self.labelStyle) # label de l'axe
self.graph_rawdata.getAxis('left').setLabel('', units='', **self.labelStyle) # label de l'axe
self.graph_rawdata.setMouseEnabled(x=True, y=True)
self.graph_rawdata.getAxis('left').setStyle(tickFont=self.tickfont)
self.graph_rawdata.getAxis('bottom').setStyle(tickFont=self.tickfont)
if len(self.selectedfiles) == 1:
self.init_graphs_onefile()
else:
self.init_graphs_multifile()
def cfg_generate(self, f):
# check if config folder and config file exist
if not os.path.isdir(self.cfgfolder):
os.makedirs(os.path.join(self.cfgfolder))
# generate default values
config = dict(
encoding="ISO-8859-1",
datafolder=self.datafolder,
eventcolors={"CUSTOM": [0, 0, 0],
"BIC": [0, 0, 0],
"CELLS": [0, 170, 0],
"LIGHT ON": [220, 220, 0],
"LIGHT OFF": [220, 220, 0],
"AZ": [170, 0, 0],
"EZ": [0, 0, 170]},
figWidth=20,
figHeight=12,
title=True,
xlabel=True,
ylabel=True,
style='vc',
outputfolder=shell.SHGetFolderPath(0, shellcon.CSIDL_MYPICTURES, None, 0),
color_palette=[(0, 0, 0, 255),
(240, 130, 0, 255),
(66, 160, 255, 255),
(0, 170, 115, 255),
(250, 240, 60, 255),
(0, 110, 190, 255),
(220, 100, 0, 255),
(210, 110, 160, 255),
(70, 120, 200, 255),
(220, 60, 60, 255),
(70, 220, 110, 255),
(255, 190, 70, 255),
(100, 0, 150, 255),
(230, 80, 40, 255),
(166, 206, 227, 255),
(178, 223, 138, 255),
(51, 154, 153, 255),
(253, 191, 111, 255),
(202, 178, 214, 255),
(00, 200, 100, 255),
(30, 120, 210, 255),
(50, 160, 50, 255),
(10, 40, 30, 255),
(255, 127, 0, 255),
(106, 61, 154, 255),
(177, 89, 40, 255),
(166, 206, 227, 255),
(178, 223, 138, 255),
(251, 154, 153, 255),
(253, 191, 111, 255),
(202, 178, 214, 255),
(200, 200, 100, 255),
(70, 120, 230, 255),
(220, 60, 60, 255),
(70, 220, 110, 255),
(255, 190, 70, 255),
(100, 0, 150, 255),
(230, 80, 40, 255),
(166, 206, 227, 255),
(178, 223, 138, 255),
(51, 154, 153, 255),
(253, 191, 111, 255),
(202, 178, 214, 255),
(00, 200, 100, 255),
(30, 120, 210, 255),
(50, 160, 50, 255),
(10, 40, 30, 255),
(255, 127, 0, 255),
(106, 61, 154, 255),
(177, 89, 40, 255),
(166, 206, 227, 255),
(178, 223, 138, 255),
(251, 154, 153, 255),
(253, 191, 111, 255),
(202, 178, 214, 255),
(200, 200, 100, 255),
(70, 120, 230, 255),
(220, 60, 60, 255),
(70, 220, 110, 255),
(255, 190, 70, 255),
(100, 0, 150, 255),
(230, 80, 40, 255),
(166, 206, 227, 255),
(178, 223, 138, 255),
(51, 154, 153, 255),
(253, 191, 111, 255),
(202, 178, 214, 255),
(00, 200, 100, 255),
(30, 120, 210, 255),
(50, 160, 50, 255),
(10, 40, 30, 255),
(255, 127, 0, 255),
(106, 61, 154, 255),
(177, 89, 40, 255),
(166, 206, 227, 255),
(178, 223, 138, 255),
(251, 154, 153, 255),
(253, 191, 111, 255),
(202, 178, 214, 255),
(200, 200, 100, 255)],
color_columns={"Mass32": (250, 0, 0, 255),
"Mass40": (230, 100, 0, 255),
"Mass44": (0, 250, 0, 255),
"Mass45": (50, 200, 250, 255),
"Mass46": (250, 150, 250, 255),
"Mass47": (0, 100, 250, 255),
"Mass49": (100, 0, 250, 255),
"totalCO2": (0, 0, 0, 255),
"logE49": (100, 100, 100, 255),
"O2evol": (250, 0, 0, 255),
"d32dt": (250, 150, 100, 255),
"d32dt_d": (250, 100, 50, 255),
"d32dt_cd": (250, 0, 0, 255),
"d40dt": (230, 180, 70, 255),
"d44dt": (70, 250, 70, 255),
"d45dt": (120, 240, 250, 255),
"d46dt": (250, 170, 250, 255),
"d47dt": (70, 150, 250, 255),
"d49dt": (140, 70, 250, 255),
"d40dt_d": (200, 150, 0, 255),
"d44dt_d": (0, 250, 0, 255),
"d45dt_d": (50, 200, 250, 255),
"d46dt_d": (250, 150, 250, 255),
"d47dt_d": (0, 100, 250, 255),
"d49dt_d": (100, 0, 250, 255),
"dtotalCO2dt": (120, 120, 120, 255),
"dtotalCO2dt_d": (0, 0, 0, 255),
"logE47": (100, 100, 100, 255),
"enrichrate47": (0, 100, 250, 255),
"enrichrate49": (100, 0, 250),
"d32dt_chl": (250, 0, 0, 255),
"d40dt_chl": (200, 150, 0, 255),
"d44dt_chl": (0, 250, 0, 255),
"d45dt_chl": (50, 200, 250, 255),
"d46dt_chl": (250, 150, 250, 255),
"d47dt_chl": (0, 100, 250, 255),
"d49dt_chl": (100, 0, 250, 255),
"dtotalCO2dt_chl": (120, 120, 120, 255),
"AF": (227, 48, 215, 255),
"dAFdt": (227, 48, 215, 255)},
units={'Mass32': 'µmol/L',
'Mass40': 'V',
'Mass44': 'µmol/L',
'Mass45': 'µmol/L',
'Mass46': 'µmol/L',
'Mass47': 'µmol/L',
'Mass49': 'µmol/L',
'totalCO2': 'µmol/L',
'd40dt': 'V/s',
'd44dt': 'µmol/L/s',
'd45dt': 'µmol/L/s',
'd46dt': 'µmol/L/s',
'd47dt': 'µmol/L/s',
'd49dt': 'µmol/L/s',
'dtotalCO2dt': 'µmol/L/s',
'd32dt': 'µmol/L/s',
'd32dt_c': 'µmol/L/s',
'd32dt_cd': 'µmol/L/s',
'enrichrate47': 's-1',
'enrichrate49': 's-1',
'dAFdt': 's-1',
'rel_d49dt': 's-1',
'rel_d45dt': 's-1',
'logE47': '',
'logE49': '',
'AF': '',
'd40dt_d': 'V/s',
'd44dt_d': 'µmol/L/s',
'd45dt_d': 'µmol/L/s',
'd46dt_d': 'µmol/L/s',
'd47dt_d': 'µmol/L/s',
'd49dt_d': 'µmol/L/s',
'dtotalCO2dt_d': 'µmol/L/s',
'd32dt_d': 'µmol/L/s',
'd44dt_chl': 'µmol/s/mg chl',
'd45dt_chl': 'µmol/s/mg chl',
'd46dt_chl': 'µmol/s/mg chl',
'd47dt_chl': 'µmol/s/mg chl',
'd49dt_chl': 'µmol/s/mg chl',
'dtotalCO2dt_chl': 'µmol/s/mg chl',
'd32dt_chl': 'µmol/s/mg chl',
'd32dt_d_chl': 'µmol/s/mg chl',
'd32dt_cd_chl': 'µmol/s/mg chl',
'enrichrate47_chl': '/s/mg chl',
'enrichrate49_chl': '/s/mg chl'},
ev_align="CELLS",
linewidth='2.5',
annlinewidth='4.0',
titlefontsize='24',
legendfontsize='16',
axislabelsize='18',
ticklabelfontsize='18',
annotationfontsize='18',
aa="ON",
figdpi='600',
figformatlist="PNG+SVG",
defaultname="untitled"
)
# save config in cfgfile
with open(os.path.join(self.cfgfolder, str(f) + '.yaml'), 'w+') as outfile:
outfile.write(yaml.dump(config))
def cfg_load(self):
if not os.path.isdir(self.cfgfolder) or not os.path.isfile(self.cfgfile):
self.cfg_generate("default")
if os.path.isfile(os.path.join(self.cfgfolder, 'last.yaml')):
self.cfgfile = os.path.join(self.cfgfolder, 'last.yaml')
with open(self.cfgfile, 'r') as f:
try:
cfg = yaml.load(f)
# general
self.encoding = cfg['encoding']
self.datafolder = cfg['datafolder']
self.outputfolder = cfg['outputfolder']
# colors
self.eventcolors = cfg['eventcolors']
self.color_palette = cfg['color_palette']
self.color_columns = cfg['color_columns']
# graphs
self.figWidth = cfg['figWidth']
self.figHeight = cfg['figHeight']
self.title = cfg['title']
self.xlabel = cfg['xlabel']
self.ylabel = cfg['ylabel']
self.style = cfg['style']
self.units = cfg['units']
self.ev_align.setCurrentIndex(self.ev_align.findText(cfg['ev_align']))
self.linewidth.setCurrentIndex(self.linewidth.findText(cfg['linewidth']))
self.annlinewidth.setCurrentIndex(self.annlinewidth.findText(cfg['annlinewidth']))
self.titlefontsize.setCurrentIndex(self.titlefontsize.findText(cfg['titlefontsize']))
self.legendfontsize.setCurrentIndex(self.legendfontsize.findText(cfg['legendfontsize']))
self.axislabelsize.setCurrentIndex(self.axislabelsize.findText(cfg['axislabelsize']))
self.ticklabelfontsize.setCurrentIndex(self.ticklabelfontsize.findText(cfg['ticklabelfontsize']))
self.annotationfontsize.setCurrentIndex(self.annotationfontsize.findText(cfg['annotationfontsize']))
self.aa.setCurrentIndex(self.aa.findText(cfg['aa']))
self.figdpi.setCurrentIndex(self.figdpi.findText(cfg['figdpi']))
self.figformatlist.setCurrentIndex(self.figformatlist.findText(cfg['figformatlist']))
self.defaultname.setText(cfg['defaultname'])
except yaml.YAMLError as exc:
print(exc)
def cfg_update(self):
if not os.path.isdir(self.cfgfolder):
self.cfg_generate("default")
self.cfgfile=os.path.join(self.cfgfolder, "last.yaml")
config = dict(
encoding=self.encoding,
datafolder=self.datafolder,
eventcolors=self.eventcolors,
figWidth=str(self.figWidth),
figHeight=str(self.figHeight),
title=self.title,
xlabel=self.xlabel,
ylabel=self.ylabel,
style=self.style,
outputfolder=self.outputfolder,
color_palette=self.color_palette,
color_columns=self.color_columns,
units=self.units,
ev_align=self.ev_align.currentText(),
linewidth=str(self.linewidth.currentText()),
annlinewidth=str(self.annlinewidth.currentText()),
titlefontsize=str(self.titlefontsize.currentText()),
legendfontsize=str(self.legendfontsize.currentText()),
axislabelsize=str(self.axislabelsize.currentText()),
ticklabelfontsize=str(self.ticklabelfontsize.currentText()),
annotationfontsize=str(self.annotationfontsize.currentText()),
aa=self.aa.currentText(),
figdpi=str(self.figdpi.currentText()),
figformatlist=str(self.figformatlist.currentText()),
defaultname=str(self.defaultname.text())
)
# save config in last.yaml
with open(os.path.join(self.cfgfile), 'w+') as outfile:
outfile.write(yaml.dump(config))
def init_graphs_onefile(self):
# title
self.graph_rawdata.plotItem.setTitle(self.selectedfiles[0].split(".csv")[0], **self.titleStyle)
# set colors
self.color=self.color_columns
# Create empty curves
for i, col in enumerate(self.selectedcolumns):
self.courbes[col] = self.graph_rawdata.plot(pen=pg.mkPen(self.color[col], units='',
width=float(self.linewidth.currentText())),
name=self.selectedcolumns[i])
def init_graphs_multifile(self):
# set title
if len(self.selectedcolumns) > 0:
self.graph_rawdata.plotItem.setTitle(self.selectedcolumns[0], **self.titleStyle)
# Create empty curves
for i, f in enumerate(self.selectedfiles):
self.color[self.filelist[f]['path']] = self.color_palette[i]
self.courbes[self.filelist[f]['path']] = self.graph_rawdata.plot(pen=pg.mkPen(self.color[self.filelist[f]['path']],
width=float(self.linewidth.currentText())),
name=f)
def events_manager(self):
Ui_EventsDialog(selectedfiles=self.selectedfiles, filelist=self.filelist)
def samples_manager(self):
Ui_SamplesDialog(datafolder=list(self.folderlist.values())[0]['path'])
def append_to_df(self, filepath):
tmp = pd.read_csv(filepath, encoding=self.encoding)
tmp['filepath'] = filepath
self.df = self.df.append(tmp)
del tmp
def update(self):
# save current config as last.yaml
self.cfg_update()
#create progress dialog
self.update_progress = QtGui.QProgressDialog("Updating plots", "Cancel", 0, 0, self)
self.update_progress.setWindowTitle('Loading files ...')
self.update_progress.setWindowModality(QtCore.Qt.WindowModal)
self.update_progress.canceled.connect(self.update_progress.close)
self.update_progress.show()
self.update_progress.setRange(0, 10)
# search for selected files
self.remove_roi()
old_filelist = self.filelist
old_folderlist = self.folderlist
for folder_short, folder in old_folderlist.items():
folder_long = folder['path']
# remove folder from list
foitem = self.treeWidget.findItems(folder_short, QtCore.Qt.MatchExactly, 0)[0]
self.treeWidget.invisibleRootItem().removeChild(foitem)
del self.folderlist[folder_short]
# add folder to list, again...
self.add_folder(folder_long)
# create a new dataframe if not already exists
try:
isinstance(self.df, pd.DataFrame)
except:
self.df = pd.DataFrame(columns=['filepath'])
self.update_progress.setLabelText('Updating dataframes ...')
self.update_progress.setValue(1)
# remove non selected files
self.df = self.df.loc[self.df.filepath.isin([self.filelist[x]['path'] for x in self.selectedfiles]), :]
# update dataframe with newly selected files or files that have been updated
for f in self.selectedfiles:
if os.path.isfile(self.filelist[f]['path']):
# file was not selected before
if self.filelist[f]['path'] not in set(self.df.filepath):
self.append_to_df(self.filelist[f]['path'])
else: # file was selected before. let's check if it has been updated
oldsize = self.filelist[f]['size']
newsize = os.path.getsize(self.filelist[f]['path'])
if oldsize != newsize: # file size has changed = we have to reload it
# remove old data from df
self.df = self.df.loc[self.df.filepath != self.filelist[f]['path'], :]
# add new file to df
self.append_to_df(self.filelist[f]['path'])
# align all plots to same event
self.update_progress.setLabelText('Aligning plots...')
self.update_progress.setValue(2)
self.align_events()
# populate new columns list
self.previous_cols = list(map(lambda x: x.text(), self.columnlist.selectedItems()))
self.columnlist.clear()
self.columnlist.addItems([col for col in self.df.columns if
col not in ["time", "time2", "time3", "file", "eventdetails", "eventtype", "avgdt", "filepath"]])
# select same column(s) as before if there were some
for c in self.previous_cols:
self.columnlist.findItems(c, QtCore.Qt.MatchExactly)[0].setSelected(True)
# initialize colors
self.color = {}
# remove all legends
for a in self.courbes:
self.legend.items = []
while self.legend.layout.count() > 0:
self.legend.removeAt(0)
# get list of selected columns
self.selectedcolumns = list(map(lambda x: x.text(), self.columnlist.selectedItems()))
# generate plots
self.update_progress.setLabelText('Generating plots ...')
self.update_progress.setValue(3)
# only one file selected
if len(self.selectedfiles) == 1:
# allow selection of several columns
self.columnlist.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
# define colors
self.color = self.color_columns
# initialize graphs
self.__init__graphs()
self.update_graph_onefile()
elif len(self.selectedfiles) > 1:
# set graph title
# allow only 1 graph at a time
self.columnlist.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
# define colors in files list
for i, f in enumerate(self.selectedfiles):
self.color[self.filelist[f]['path']] = self.color_palette[i]
# update graphs
self.__init__graphs()
self.update_graph_multifile()
# change default name
# self.defaultname.setText(f)
# add events / annotations
self.update_progress.setLabelText('Annotate plots ...')
self.update_progress.setValue(4)
self.annotate()
#close progress dialog
self.update_progress.setValue(10)
self.update_progress.close()
def align_events(self, timecol="time2"):
ev = self.ev_align.currentText()
# clean events
self.df.loc[~self.df.eventtype.isin(self.eventcolors), 'eventtype'] = ""
# set column to use for time
# if timecol not in self.df.columns:
# timecol = 'time'
timecol = 'time2' if 'time2' in self.df.columns else 'time3'
# create time3 column
self.df['time3'] = self.df.time2 if 'time2' in self.df.columns else self.df.time
# do not align
if ev == 'do not align':
return
#set an anchor time at which we want to align the events
anchortime = { 'CELLS': 200,
'LIGHT ON': 350,
'LIGHT OFF': 600,
'BIC': 10,
'AZ': 20,
'EZ': 400}
# align...
for r in self.df.loc[self.df.eventtype == ev, ['filepath', timecol]].iterrows():
sample, t_old = r[1]['filepath'], r[1][timecol]
shift = anchortime[ev] - t_old
self.df.loc[self.df.filepath == sample, 'time3'] = self.df.loc[self.df.filepath == sample, timecol] + shift
# print(sample, t_old, shift)
def update_graph_onefile(self):
file_short = self.selectedfiles[0]
tmp = self.df.loc[self.df.filepath == self.filelist[file_short]['path'], :]
# for a in self.courbes:
# later on, clear all items and re-add
self.legend.items = []
while self.legend.layout.count() > 0:
self.legend.removeAt(0)
for i, col in enumerate(self.selectedcolumns):
self.courbes[col].setData(np.array(tmp['time3']), np.array(tmp[col]))
self.legend.addItem(self.courbes[col], col)
if isinstance(self.legend.items[i][1], pg.graphicsItems.LabelItem.LabelItem):
self.legend.items[i][1].setText(self.legend.items[i][1].text, **self.legendLabelStyle)
# set y label and units
ylab={}
for c in self.selectedcolumns:
if c in self.units:
if self.units[c] in ylab.keys():
ylab[self.units[c]].append(c.strip("Mass"))
else:
ylab[self.units[c]] = [c.strip("Mass")]
self.graph_rawdata.getAxis('left').setLabel(text=" ; ".join(["{0} ({1})".format(", ".join(v), k) for k, v in ylab.items()]),
units='',
**self.labelStyle) # label de l'axe
def update_graph_multifile(self):
for _ in self.courbes:
# later on, clear all items and re-add
self.legend.items = []
while self.legend.layout.count() > 0:
self.legend.removeAt(0)
try:
for i, f in enumerate(self.selectedfiles):
tmp = self.df.loc[self.df.filepath == self.filelist[f]['path'], :]
self.courbes[self.filelist[f]['path']].setData(np.array(tmp['time3']), np.array(tmp[self.selectedcolumns[0]]))
self.legend.addItem(self.courbes[self.filelist[f]['path']], "/".join([self.filelist[f]['folder'], f]))
if isinstance(self.legend.items[i][1], pg.graphicsItems.LabelItem.LabelItem):
self.legend.items[i][1].setText(self.legend.items[i][1].text, **self.legendLabelStyle)
if self.selectedcolumns[0] in self.units:
self.graph_rawdata.getAxis('left').setLabel(text=self.selectedcolumns[0],
units=self.units[self.selectedcolumns[0]],
unitPrefix=None,
**self.labelStyle) # label de l'axe
except IndexError:
# we were loading the files for the first time. no columns selected...
self.update_progress.close()
def annotate(self):
try:
# exit if no file is selected for annotation
if self.eventslist.currentIndex() == 0:
return
# get range
xrange = self.graph_rawdata.viewRange()[0]
yrange = self.graph_rawdata.viewRange()[1]
ann = self.df.loc[(self.df.eventtype.isin(self.eventcolors)) &
(self.df.filepath == self.filelist[self.eventslist.currentText()]['path']),
['time3', 'eventtype', 'eventdetails']]
# print(ann)
for event in ann.time3:
# print(event)
try:
clr = self.eventcolors[ann.loc[ann.time3 == event, 'eventtype'].values[0]]
except:
clr=(0,0,0,255)
self.graph_rawdata.addItem(pg.InfiniteLine(pos=event,
angle=90,
pen=pg.mkPen(clr,
width=float(self.annlinewidth.currentText())),
movable=False,
bounds=None))
annotation = pg.TextItem(text=str(ann.loc[ann.time3 == event, 'eventdetails'].values[0]),
color=self.eventcolors[ann.loc[ann.time3 == event, 'eventtype'].values[0]],
angle=-90)
annotation.setPos(event - np.diff(xrange)[0]/100, yrange[0])
self.graph_rawdata.addItem(annotation)
except Exception as e:
print("problem with annotation")
print(str(e))
def export(self):
self.export_progress = QtGui.QProgressDialog("Exporting image files", "Cancel", 0, 0, self)
self.export_progress.setWindowTitle('Please wait...')
self.export_progress.setWindowModality(QtCore.Qt.WindowModal)
self.export_progress.canceled.connect(self.export_progress.close)
self.export_progress.show()
self.export_progress.setRange(0, 3 + len(self.figformatlist.currentText().split("+")))
if len(self.selectedfiles) == 1:
self.export_individual_graphs()
else:
self.export_compiled_graphs()
def export_individual_graphs(self):
# get limits
xrange = self.graph_rawdata.viewRange()[0]
yrange = self.graph_rawdata.viewRange()[1]
self.export_progress.setValue(1)
# set style
style.use(self.style)
# set outputfolder
# if self.destination_folder.currentText() == os.path.split(shell.SHGetFolderPath(0, shellcon.CSIDL_MYPICTURES, None, 0))[-1]:
# outputfolder = os.path.join(os.path.split(shell.SHGetFolderPath(0, shellcon.CSIDL_MYPICTURES, None, 0)),
# time.strftime('%Y%m%d') + "_exports",
# self.selectedfiles[0])
# else:
# outputfolder = os.path.join(self.folderlist[self.destination_folder.text()],
# time.strftime('%Y%m%d') + "_figures",
# self.selectedfiles[0])
# set and check existence of outputfolder
outputfolder = os.path.join(self.outputfolder,
time.strftime('%Y%m%d') + "_exports",
self.selectedfiles[0])
# create folders if necessary
if not os.path.isdir(outputfolder):
os.makedirs(outputfolder)
# subset the dataframe to only the files we are considering
tmp = self.df.loc[self.df.filepath == self.filelist[self.selectedfiles[0]]['path'], ['time3', 'eventtype', 'eventdetails', 'echant', *self.selectedcolumns]]
self.export_progress.setValue(2)
# create figure
plt.figure(figsize=(float(self.figWidth), float(self.figHeight)))
ax = defaultdict(object)
self.export_progress.setValue(3)
for i, col in enumerate(self.selectedcolumns):
# add annotations
ax[col] = tmp.set_index('time3')[col].plot(lw=float(self.linewidth.currentText()),
c=[x/255 for x in self.color_columns[col]],
ls='-',
alpha=1.0,
label=str(col))
if self.eventslist.currentText() == self.selectedfiles[0]:
if i == len(self.selectedcolumns) - 1:
t = tmp.loc[(tmp.eventtype.isin(self.eventcolors.keys())) &
(tmp['time3'] > xrange[0]) &
(tmp['time3'] < xrange[1]),
['time3', 'eventtype', 'eventdetails', 'echant']]
# highlight light event
if 'LIGHT ON' in t.eventtype.tolist() and 'LIGHT OFF' in t.eventtype.tolist():
plt.axvspan(t.loc[t.eventtype == 'LIGHT ON', ['time3']].values[0],
t.loc[t.eventtype == 'LIGHT OFF', ['time3']].values[0],
color='yellow',
alpha=0.050)
for e in t['time3']:
etime, etype, edetails = t.loc[t['time3'] == e, :]['time3'].values[0], \
t.loc[t['time3'] == e, :].eventtype.values[0], \
t.loc[t['time3'] == e, :].eventdetails.values[0]
plt.axvline(etime, color=[x/255 for x in self.eventcolors[etype]], lw=float(self.annlinewidth.currentText()))
ax[col].text(x=etime - np.diff(xrange)[0]/100,
y=yrange[0],
s=edetails,
rotation=90,
color=[x/255 for x in self.eventcolors[etype]],
alpha=1.0,
verticalalignment='bottom',
horizontalalignment='left',
fontdict=self.annotation_font
)
# set limits
plt.xlim(*xrange)
plt.ylim(*yrange)
# set title
if self.title:
plt.title(str(self.selectedfiles[0]), fontdict=self.title_font)
# tick labels font size
plt.tick_params(labelsize=self.ticklab_font['size'])
# labels
if self.xlabel:
plt.xlabel('Time (s)', fontdict=self.axlab_font)
if self.ylabel:
ylab = []
for c in self.selectedcolumns:
if c in self.units:
ylab.append("{0} ({1})".format(c.strip("Mass"), self.units[c]))
else:
ylab.append(c)
plt.ylabel(", ".join(ylab), fontdict=self.axlab_font)
# set different options (legend, lines, etc...)
plt.legend(loc=0, prop=self.legend_font) # 1=bot left, 2=top left, 3=top right, 4=bot right
plt.axhline(0, color='k')
plt.grid(True)
# save file
self.export_savefile(outputfolder)
def export_compiled_graphs(self, ext=None):
# get limits
xrange = self.graph_rawdata.viewRange()[0]
yrange = self.graph_rawdata.viewRange()[1]
# set style
style.use(self.style)
# create figure
plt.figure(figsize=(float(self.figWidth), float(self.figHeight)))
ax = defaultdict(object)
self.export_progress.setValue(1)
for i, f in enumerate(self.selectedfiles):
# determine dataframe to use for plot
tmp = self.df.loc[self.df.filepath==self.filelist[f]['path'], :]
ax[f] = tmp.set_index('time3')[self.selectedcolumns[0]].plot(lw=float(self.linewidth.currentText()),
c=[x/255 for x in self.color[self.filelist[f]['path']]],
ls='-',
label=str(f),
alpha=1.0)
# add annotations
if self.eventslist.currentText() == f:
t = tmp.loc[(tmp.eventtype.isin(self.eventcolors.keys())) &
(tmp['time3'] > xrange[0]) &
(tmp['time3'] < xrange[1]),
['time3', 'eventtype', 'eventdetails', 'echant']]
# highlight light event
if 'LIGHT ON' in t.eventtype.tolist() and 'LIGHT OFF' in t.eventtype.tolist():
plt.axvspan(t.loc[t.eventtype == 'LIGHT ON', ['time3']].values[0],
t.loc[t.eventtype == 'LIGHT OFF', ['time3']].values[0],
color='yellow',
alpha=0.050)
for e in t['time3']:
etime, etype, edetails = t.loc[t['time3'] == e, :]['time3'].values[0], \
t.loc[t['time3'] == e, :].eventtype.values[0], \
t.loc[t['time3'] == e, :].eventdetails.values[0]
plt.axvline(etime, color=[x/255 for x in self.eventcolors[etype]], lw=float(self.annlinewidth.currentText()))
ax[f].text(x=etime - np.diff(xrange)[0]/100,
y=yrange[0],
s=edetails,
rotation=90,
color=[x/255 for x in self.eventcolors[etype]],
alpha=1.0,
verticalalignment='bottom',
horizontalalignment='left',
fontdict=self.annotation_font
)
# set limits
plt.xlim(*xrange)
plt.ylim(*yrange)
# set title
if self.title:
plt.title(str(self.selectedcolumns[0]), fontdict = self.title_font)
# tick labels fontsize
plt.tick_params(labelsize=self.ticklab_font['size'])
# labels
if self.xlabel:
plt.xlabel('Time (s)', fontdict=self.axlab_font)
if self.ylabel:
ylab = []
for c in self.selectedcolumns:
if c in self.units:
ylab.append("{0} ({1})".format(c.strip("Mass"), self.units[c]))
else:
ylab.append(c)
plt.ylabel(", ".join(ylab), fontdict=self.axlab_font)
# set different options (legend, lines, etc...)
plt.legend(loc=0, prop=self.legend_font) # 1=bot left, 2=top left, 3=top right, 4=bot right
plt.axhline(0, color='k')
plt.grid(True)
# plt.show()
self.export_progress.setValue(3)
# set and check existence of outputfolder
outputfolder = os.path.join(self.outputfolder,
time.strftime('%Y%m%d') + "_exports",
self.selectedcolumns[0])
# save file
self.export_savefile(outputfolder)
def export_savefile(self, outputfolder):
# check if outputfolder exists, if not create it
if not os.path.isdir(outputfolder):
os.makedirs(outputfolder)
# extension(s)
if '+' in self.figformatlist.currentText():
exts = self.figformatlist.currentText().split("+")
else:
exts = [self.figformatlist.currentText()]
fname = ""
for ext in exts:
# increment filename if already exists
i = 1
fname = os.path.join(outputfolder, self.defaultname.text() + "_" + str(i) + "." + ext)
while os.path.isfile(fname):
i += 1
fname = os.path.join(outputfolder, self.defaultname.text() + "_" + str(i) + "." + ext)
# save file(s)
plt.savefig(fname, dpi=int(self.figdpi.currentText()), bbox_inches='tight')
self.export_progress.setValue(4)
if os.path.isfile(fname):
print(time.strftime("%Y%m%d %H:%M:%S - saved: "), fname)
else:
print("Failed to save:", fname)
# close figure
plt.close("all")
self.export_progress.close()
# FILES and FOLDERS management
# def load_df(self, filepath):
# samplestmp = pd.read_csv(glob.glob(os.path.join(folder, "*_SAMPLES.csv"))[0], encoding='ISO-8859-1)
def select_folder(self):
#select start folder
initial_folders = [ self.datafolder,
'C:\\Mass_Spec_Data\\',
'C:\\Users\\u5040252.RSB0001280\\CloudStation\\Dropbox\\data Vincent\\'
'C:\\',
'/Users/u5475569/Desktop/',
'/']
for f in initial_folders:
if os.path.isdir(f):
startfolder = f
break
# prompt for folder
folder = QtGui.QFileDialog.getExistingDirectory(self, 'Select folder', startfolder)
# add folder to list
self.add_folder(folder)
def add_folder(self, folder, update=False):
# check if folder is a folder
if not os.path.isdir(folder):
QtGui.QMessageBox.warning(self, 'Not a folder', "{} is not a folder".format(folder))
return
# check if folder is already in list
folder_short = os.path.split(folder)[-1]
if folder_short in self.folderlist.keys():
QtGui.QMessageBox.warning(self, 'Folder already added', "{} has already been added to the list".format(folder_short))
return
# check if sample list is present
filelist = glob.glob(os.path.join(folder, "*_SAMPLES.csv"))
if len(filelist) == 0:
msgbox = QtGui.QMessageBox(self)
msgbox.setIcon(QMessageBox.Warning)
msgbox.setText( "Sample file not found")
msgbox.setDetailedText("The folder you specify here must contain a sample list file which contains the list of all the samples of that day. \n"
"Typically, the name follows this pattern: \"YYYYMMDD_SAMPLES.csv\" where YYYYMMDD is the date of that experiment.")
msgbox.setWindowTitle("Sample file not found")
msgbox.setInformativeText("The folder specified:\n({})\ndid not contain any valid samples file".format(folder))
msgbox.setStandardButtons(QMessageBox.Ok)
msgbox.exec_()
return
# add parent to list
parent_folder = self.add_parent(self.treeWidget.invisibleRootItem(), 0, folder_short)
# add children
samplelist_file = glob.glob(os.path.join(folder, "*_SAMPLES.csv"))[0]
if os.path.isdir(os.path.join(folder)) and os.path.isfile(samplelist_file):
samples = pd.read_csv(samplelist_file, encoding='ISO-8859-1')
samples.sort_values(by=['date', 'time'], ascending=[False, False], inplace=True)
for i, file_long in enumerate([x for x in samples.filename if os.path.isfile(os.path.join(folder, x))]):
#add entry to self.filelist
file_short = file_long.split(".csv")[0]
if file_short not in self.filelist.keys():
self.filelist[file_short] = {'folder': folder_short,
'path': os.path.join(folder, file_long),
'size': os.path.getsize(os.path.join(folder, file_long)),
'cuvette': int(samples.loc[samples.filename == file_long, 'cuvette'])}
checked = QtCore.Qt.Checked if file_short in self.selectedfiles else QtCore.Qt.Unchecked
self.add_child(parent_folder, 0, file_short, checked)
self.folderlist[folder_short] = {'path':folder, 'nfiles': i}
# self.datafolder = folder
# load dataframe to masterdf
print(glob.glob(os.path.join(folder, "*_SAMPLES.csv"))[0])
# samplestmp = pd.read_csv(glob.glob(os.path.join(folder, "*_SAMPLES.csv"))[0], encoding='ISO-8859-1)
self.update_destination_folder()
def remove_folder(self):
# items to be removed
removed_items = [item.text(0) for item in self.treeWidget.selectedItems()]
# remove from folder list
self.folderlist = {k: v for k, v in self.folderlist.items() if k not in removed_items}
# remove from selected
to_be_removed = {k for k, v in self.filelist.items() if v['folder'] in removed_items}
self.selectedfiles = list(set(self.selectedfiles) - to_be_removed)
# remove from filelist
self.filelist = {k: v for k, v in self.filelist.items() if v['folder'] not in removed_items}
# remove item from treeview
for i in self.treeWidget.selectedItems():
self.treeWidget.invisibleRootItem().removeChild(i)
self.update_destination_folder()
if len(self.selectedfiles) > 0:
self.update()
def add_parent(self, parent, column, title):
item = QtGui.QTreeWidgetItem(parent, [title])
# item.setData(column, QtCore.Qt.UserRole, data)
item.setChildIndicatorPolicy(QtGui.QTreeWidgetItem.ShowIndicator)
item.setExpanded(True)
return item
def add_child(self, parent, column, title, checked=QtCore.Qt.Unchecked):
item = QtGui.QTreeWidgetItem(parent, [title])
# item.setData(column, QtCore.Qt.UserRole, data)
item.setCheckState(column, checked)
return item
def treeWidget_select(self, item=None, column=0):
# we clicked on a file
if item.parent() is not None:
f = item.text(column)
if item in self.treeWidget.selectedItems():
if item.checkState(column) == QtCore.Qt.Checked: # it was checked, we will uncheck it
if f in self.selectedfiles:
self.selectedfiles.remove(f)
item.setCheckState(0, QtCore.Qt.Unchecked)
self.treeWidget.setItemSelected(item, False)
else: # it was unchecked, we will check it
if f not in self.selectedfiles:
self.selectedfiles.append(f)
item.setCheckState(0, QtCore.Qt.Checked)
self.treeWidget.setItemSelected(item, False)
# else we clicked on a folder.
else:
if item in self.treeWidget.selectedItems(): # folder is selected. we have to check all of its children
for i in range(item.childCount()):
if item.child(i).text(column) not in self.selectedfiles:
self.selectedfiles.append(item.child(i).text(column))
item.child(i).setCheckState(0, QtCore.Qt.Checked)
self.treeWidget.setItemSelected(item.child(i), False)
else: # then we uncheck all the children
for i in range(item.childCount()):
if item.child(i).text(column) in self.selectedfiles:
self.selectedfiles.remove(item.child(i).text(column))
item.child(i).setCheckState(0, QtCore.Qt.Unchecked)
self.treeWidget.setItemSelected(item.child(i), False)
# empty list of items in eventstreewidget and update
self.update_eventslist()
def update_eventslist(self):
# Add a folder in eventslist (graphs options) and destination folder (export options)
self.eventslist.clear()
self.eventslist.addItem('None')
self.eventslist.addItems(self.selectedfiles)
self.eventslist.setCurrentIndex(1)
def update_destination_folder(self):
self.destination_folder.clear()
self.destination_folder.addItem(os.path.split(shell.SHGetFolderPath(0, shellcon.CSIDL_MYPICTURES, None, 0))[-1])
self.destination_folder.addItems(list(self.folderlist.keys()))
self.destination_folder.setCurrentIndex(1)
def treeWidget_changed(self, item, column=0):
f = item.text(column)
if item.checkState(column) == QtCore.Qt.Checked:
if f not in self.selectedfiles:
self.selectedfiles.append(f)
if item.checkState(column) == QtCore.Qt.Unchecked:
if f in self.selectedfiles:
self.selectedfiles.remove(f)
# ROI management
def add_roi(self):
if self.roi is not None:
self.remove_roi()
self.roi_btn.setText("Add ROI")
else:
self.roi = pg.LinearRegionItem([320, 340])
self.roi.setZValue(-1)
self.graph_rawdata.addItem(self.roi)
self.roi.sigRegionChanged.connect(self.updateTable)
# self.roi_btn.setEnabled(False)
self.roi_btn.setText("Remove ROI")
self.updateTable()
def remove_roi(self):
if self.roi is not None:
self.graph_rawdata.removeItem(self.roi)
# self.roi_btn.setEnabled(True)
self.roi_btn.setText("Add ROI")
self.roi = None
else:
pass
def updateTable(self):
# 1 file: columns go to rows
if len(self.selectedfiles) == 1:
data = [('Column', self.selectedfiles[0])]
for c in self.selectedcolumns:
mean = self.df.loc[(self.df.time3 >= self.roi.getRegion()[0]) &
(self.df.time3 <= self.roi.getRegion()[1]), c].mean()
data.append((c, mean))
# many files: files go to rows
elif len(self.selectedfiles) > 1:
data = [('File', self.selectedcolumns[0])]
for f in self.selectedfiles:
mean = round(self.df.loc[(self.df.time3 >= self.roi.getRegion()[0]) &
(self.df.time3 <= self.roi.getRegion()[1]) &
(self.df.filepath == self.filelist[f]['path']), self.selectedcolumns[0]].mean(), 6)
data.append((f, mean))
data = np.array(data)
self.dataTable.setData(data)
self.dataTable.show()
def chlorophylls(self):
"""
:param pattern: ex:""
:param outputfile: ex:"chlorophyll.csv"
:return:
"""
pattern = "2"
outputfile = "chlorophylls.csv"
outputfile = os.path.join(self.datafolder, outputfile)
print(outputfile)
# if chlorophyll file already exists, then load csv and update it
if os.path.isfile(outputfile): # updating
# backup existing file
backupfile = os.path.join(self.datafolder, os.path.splitext(outputfile)[0] + "_" + time.strftime(
"%Y-%m-%d_%H-%M") + ".backup")
chl = pd.read_csv(outputfile, encoding='ISO-8859-1')[['date', 'time', 'samplename', 'chlorophyll']]
chl.to_csv(backupfile, index=False)
print("saved back-up file: ", backupfile)
else: # creating
chl = pd.DataFrame(columns=['date', 'time', 'samplename', 'chlorophyll'])
# main loop
try:
i = 0
for d in glob.glob(os.path.join(self.datafolder, pattern + "*")):
if os.path.isdir(d):
day = os.path.split(d)[1]
samplefile = os.path.join(d, day + "_SAMPLES.csv")
# print(day, samplefile, os.path.isfile(samplefile))
if os.path.isfile(samplefile):
tmp = pd.read_csv(samplefile, encoding='ISO-8859-1')[['date', 'time', 'samplename']]
# add time column
if day not in set(chl.date.apply(lambda x: str(x))):
chl = chl.append(tmp, ignore_index=True)
i += 1
# sort and save
chl = chl.sort_values(by=['date', 'time'], ascending=[False, False])
chl.to_csv(outputfile, index=False)
# success
if i > 0:
QtGui.QMessageBox.information(self, 'Chlorophyll file update',
"{} samples were added to {}".format(i, outputfile))
else:
QtGui.QMessageBox.information(self, 'Chlorophyll file update',
"{} was already up-to-date. No sample added".format(outputfile))
except Exception as e:
QtGui.QMessageBox.critical(self, 'Error',
"The following error was encoutered while updating {outputfile}: \n" + str(e))
def export_results(self):
t={}
# create progress dialog
self.getpeaks_progress = QtGui.QProgressDialog("Extracting data from selected files. Please wait...", "Cancel", 0,
0, self)
self.getpeaks_progress.setWindowTitle('Extracting data...')
self.getpeaks_progress.setWindowModality(QtCore.Qt.WindowModal)
self.getpeaks_progress.canceled.connect(self.getpeaks_progress.close)
self.getpeaks_progress.show()
self.getpeaks_progress.setRange(0, len(
[self.folderlist[self.filelist[x]['folder']]['path'] for x in self.selectedfiles]))
max_threads = 0
# chlorophylls
chlfile = os.path.join(self.datafolder, "chlorophylls.csv")
chldf = pd.read_csv(chlfile, encoding='ISO-8859-1')
for fo in set([self.folderlist[self.filelist[x]['folder']]['path'] for x in self.selectedfiles]):
# do each folder in a different thread
t[fo] = threading.Thread(target=self.getpeaks, args=(fo, chldf.copy(), ))
t[fo].start()
max_threads+=1
while len([x for x in t.values() if x.is_alive()]) > 0:
pass
print(fo, " - results exported")
self.getpeaks_progress.close()
def getpeaks(self, fo, chldf):
"""
Extracts means from rawdata and exports in an molten csv
Non-catalytic is substracted from each value (for enrichment rate and atomic fraction rate)
Values are normalized to chlorophyll whenever 1) it is relevant and 2) chlorophyll data is available
:return:
"""
resultfiles = []
col1, col2 = 'enrichrate49', 'logE49'
# list of files from that folder
files = [x for x in self.selectedfiles if self.folderlist[self.filelist[x]['folder']]['path'] == fo]
# select columns to keep for analysis
cols = [col1, col2]
cols.extend(['time', 'eventtype', 'eventdetails', 'd32dt_cd', 'totalCO2'])
output_molten = pd.DataFrame(columns=['exp', 'sample', 'variable', 'value_norm', 'value', 'meas_start', 'meas_end', 'unit', 'comments'])
# check if "results" folder exists
resultsfolder = os.path.join(fo, "results")
if not os.path.isdir(resultsfolder):
os.makedirs(resultsfolder)
for i, f in enumerate(set(files)):
# prepare dataframe
df = self.df.loc[self.df.filepath == self.filelist[f]['path'], cols]
# extract data
data = self.extract_data2(df, col1, col2)
# add chlorophyll
print(os.path.split(fo)[-1], "_".join(f.split("_")[1:]))
data['chl'] = (chldf.loc[(chldf.date == int(os.path.split(fo)[-1])) & (chldf.samplename == "_".join(f.split("_")[1:])), 'chlorophyll'].values[0],
0, 0, 'µg/mL', 'Chlorophyll concentration in stock solution.')
# find final chl concentration
cellvol = 10
if 'CELLS' in df.eventtype.tolist():
cellvol = int(re.search(r"^Added ([0-9]{1,}) µL",
df.loc[df.eventtype == 'CELLS', :].eventdetails.values[0]).groups()[0])
final_chl = (data['chl'][0] * cellvol) / self.filelist[f]['cuvette']
data['chl_final'] = (final_chl, 0, 0, 'µg/mL', 'Final chlorophyll concentration in cuvette.')
data['cellvol'] = (cellvol, 0, 0, 'µL', 'Volume of cells injected in cuvette.')
data['cuvette'] = (self.filelist[f]['cuvette'], 0, 0, 'µL', 'Volume of cuvette')
exc = ['cons32', col1 + '_nc', 'chl', 'chl_final', 'cellvol', 'cuvette']
if final_chl > 0:
data2 = {k: (v[0] / final_chl, v[0], v[1], v[2], v[3], v[4]) for k, v in data.items() if k not in exc}
else:
data2 = {k: (v[0], v[0], v[1], v[2], v[3], v[4]) for k, v in data.items() if k not in exc}
# finally add excluded columns
for x in exc:
data2[x] = (data[x][0], data[x][0], data[x][1], data[x][2], data[x][3], data[x][4])
data = data2.copy()
del data2
# fill in output dataframe
for k, v in data.items():
# print([os.path.split(fo)[-1], f, k, *v])
output_molten.loc[len(output_molten), :] = [os.path.split(fo)[-1], f, k, *v]
# save csv
resultsfile_molten = os.path.join(resultsfolder, os.path.split(fo)[-1] + "_processed_data_molten.csv")
output_molten.to_csv(resultsfile_molten, index=False)
def extract_data2(self, df, col1='enrichrate49', col2='logE49'):
data = {}
# initialize all events to avoid errors
df.loc[df.time == df.loc[df.logE49.notnull(), 'time'].max(), 'eventtype'] = 'END'
events_types = ['CELLS', 'LIGHT ON', 'LIGHT OFF', 'AZ', 'BIC', 'EZ', 'CUSTOM', 'END']
events = {x: df.loc[df.logE49.notnull(), 'time'].max() for x in events_types}
for _, x in df.loc[df.eventtype.isin(events), :].iterrows():
if x['eventtype'] in events:
events[x['eventtype']] = x['time']
# non-catalytic
start, end = events['CELLS'] - 10, events['CELLS'] - 3
data[col1 + '_nc'] = (
df.loc[(df.time >= start) & (df.time < end), col1].mean(), start, end, 's-1', 'Non-catalytic ' + col1)
# in the dark, after cells injection and before Light on
# peak_postinjection
start, end = events['CELLS'], events['LIGHT ON']
data[col1 + '_peak_postinjection'] = (
df.loc[(df.time >= start) & (df.time < end), col1].min() - data[col1 + '_nc'][0], start, end, 's-1',
'Minimal ' + col1 + ', in the dark, after cell injection (= maximal peak of activity)')
# stable_postinjection
start, end = events['LIGHT ON'] - 10, events['LIGHT ON'] - 1
data[col1 + '_stable_postinjection'] = (
df.loc[(df.time >= start) & (df.time < end), col1].mean() - data[col1 + '_nc'][0], start, end, 's-1',
'Steady ' + col1 + ', in the dark, after cell injection')
# in the light
start, end = events['LIGHT ON'], events['LIGHT OFF']
# Rate, light_high : Max value in the light period
data[col1 + '_light_high'] = (
df.loc[(df.time >= start) & (df.time < end), col1].max() - data[col1 + '_nc'][0], start, end, 's-1',
'Maximal ' + col1 + ' during the light period (= maximal peak of activity)')
# Rate, light_low : Min value in the light period
data[col1 + '_light_low'] = (
df.loc[(df.time >= start) & (df.time < end), col1].min() - data[col1 + '_nc'][0], start, end, 's-1',
'Minimal ' + col1 + ' during the light period')
# Rate, light_steady
start, end = events['LIGHT OFF'] - 10, events['LIGHT OFF'] - 1
data[col1 + '_light_steady'] = (
df.loc[(df.time >= start) & (df.time < end), col1].mean() - data[col1 + '_nc'][0], start, end, 's-1',
'Steady ' + col1 + ' during the light period')
# in the dark
# Rate, in the dark after light period
start, end = events['LIGHT OFF'], events['END']
# Rate, dark2_high : Max value in the light period
data[col1 + '_dark2_high'] = (
df.loc[(df.time >= start) & (df.time < end), col1].max() - data[col1 + '_nc'][0], start, end, 's-1',
'Maximal ' + col1 + ' measured in second dark phase, after light is turned off')
# Rate, dark2_low : Min value in the light period
data[col1 + '_dark2_low'] = (
df.loc[(df.time >= start) & (df.time < end), col1].min() - data[col1 + '_nc'][0], start, end, 's-1',
'Minimal ' + col1 + ' measured in second dark phase, after light is turned off')
# Rate, dark2_steady : Min value in the light period
start, end = events['END'] - 10, events['END']
data[col1 + '_dark2_steady'] = (
df.loc[(df.time >= start) & (df.time < end), col1].mean() - data[col1 + '_nc'][0], start, end, 's-1',
'Steady ' + col1 + ' measured in second dark phase, just before the end of the experiment')
# photosynthesis and respiration
# oxygen consumption
start, end = events['CELLS'] - 30, events['CELLS']
data['cons32'] = (df.loc[(df.time >= start) & (df.time < end), 'd32dt_cd'].mean(), start, end, 'µmol/L/s',
'Membrane oxygen consumption measured at steady rate just before cells are injected')
# respiration
start, end = events['LIGHT ON'] - 30, events['LIGHT ON']
data['respiration'] = (
df.loc[(df.time >= start) & (df.time < end), 'd32dt_cd'].mean() - data['cons32'][0], start, end, 'µmol/L/s',
'Photosynthesis measured at steady oxygen evolution rate just before light is turned off')
# photosynthesis
start, end = events['LIGHT OFF'] - 30, events['LIGHT OFF']
data['photosynthesis'] = (
df.loc[(df.time >= start) & (df.time < end), 'd32dt_cd'].mean() - data['respiration'][0], start, end, 'µmol/L/s',
'Respiration measured at steady rate just before light is turned on')
# calculate regression to extrapolate non catalytic on log enrichment
start, end = events['CELLS'] - 15, events['CELLS'] + 2
x = np.array(df.loc[(df.time >= start) & (df.time < end), 'time'])
y = np.array(df.loc[(df.time >= start) & (df.time < end), col2])
# calculate coefficients for linear regression (y = ax + b)
A = np.vstack([x, np.ones(len(x))]).T
a, b = np.linalg.lstsq(A, y)[0]
# non-catalytic log enrichment extrapolatiuon
nc = {x: (a * events[x] + b) for x in events} # non-catalytic
actual = {x: df.loc[df.time == events[x], col2].values[0] for x in events} # actual values
cat = {x: (nc[x] - actual[x]) for x in events} # catalytic values
# calculate activity in diverse intervals
data[col2 + '_total_activity'] = (cat['END'] - cat['CELLS'], events['CELLS'], events['END'], '',
'Total amount of conversion due to catalytic activity')
data[col2 + '_dark1_total_activity'] = (cat['LIGHT ON'] - cat['CELLS'], events['CELLS'], events['LIGHT ON'], '',
'Total amount of conversion due to catalytic activity in the first dark phase, before light')
data[col2 + '_light_total_activity'] = (cat['LIGHT OFF'] - cat['LIGHT ON'], events['LIGHT OFF'], events['LIGHT ON'], '',
'Total amount of conversion due to catalytic activityin the light phase')
data[col2 + '_dark2_total_activity'] = (cat['END'] - cat['LIGHT OFF'], events['LIGHT OFF'], events['END'], '',
'Total amount of conversion due to catalytic activityin the second dark phase, after light')
# return
return data
# -- Classe principale (lancement) --
def main(args):
a = QApplication(args) # crée l'objet application
f = QMainWindow() # crée le QWidget racine
c = pyMS(f) # appelle la classe contenant le code de l'application
f.showMaximized()
f.activateWindow()
r = a.exec_() # lance l'exécution de l'application
return r
if __name__ == "__main__":
main(sys.argv) # appelle la fonction main
| gpl-3.0 |
JeanKossaifi/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
AliShahin/AliShahin.github.io | markdown_generator/talks.py | 199 | 4000 |
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
| mit |
zfrenchee/pandas | pandas/tests/reshape/test_melt.py | 1 | 28100 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas import DataFrame
import pandas as pd
from numpy import nan
import numpy as np
from pandas import melt, lreshape, wide_to_long
import pandas.util.testing as tm
from pandas.compat import range
class TestMelt(object):
def setup_method(self, method):
self.df = tm.makeTimeDataFrame()[:10]
self.df['id1'] = (self.df['A'] > 0).astype(np.int64)
self.df['id2'] = (self.df['B'] > 0).astype(np.int64)
self.var_name = 'var'
self.value_name = 'val'
self.df1 = pd.DataFrame([[1.067683, -1.110463, 0.20867
], [-1.321405, 0.368915, -1.055342],
[-0.807333, 0.08298, -0.873361]])
self.df1.columns = [list('ABC'), list('abc')]
self.df1.columns.names = ['CAP', 'low']
def test_top_level_method(self):
result = melt(self.df)
assert result.columns.tolist() == ['variable', 'value']
def test_method_signatures(self):
tm.assert_frame_equal(self.df.melt(),
melt(self.df))
tm.assert_frame_equal(self.df.melt(id_vars=['id1', 'id2'],
value_vars=['A', 'B']),
melt(self.df,
id_vars=['id1', 'id2'],
value_vars=['A', 'B']))
tm.assert_frame_equal(self.df.melt(var_name=self.var_name,
value_name=self.value_name),
melt(self.df,
var_name=self.var_name,
value_name=self.value_name))
tm.assert_frame_equal(self.df1.melt(col_level=0),
melt(self.df1, col_level=0))
def test_default_col_names(self):
result = self.df.melt()
assert result.columns.tolist() == ['variable', 'value']
result1 = self.df.melt(id_vars=['id1'])
assert result1.columns.tolist() == ['id1', 'variable', 'value']
result2 = self.df.melt(id_vars=['id1', 'id2'])
assert result2.columns.tolist() == ['id1', 'id2', 'variable', 'value']
def test_value_vars(self):
result3 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A')
assert len(result3) == 10
result4 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'])
expected4 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
tm.assert_frame_equal(result4, expected4)
def test_value_vars_types(self):
# GH 15348
expected = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable', 'value'])
for type_ in (tuple, list, np.array):
result = self.df.melt(id_vars=['id1', 'id2'],
value_vars=type_(('A', 'B')))
tm.assert_frame_equal(result, expected)
def test_vars_work_with_multiindex(self):
expected = DataFrame({
('A', 'a'): self.df1[('A', 'a')],
'CAP': ['B'] * len(self.df1),
'low': ['b'] * len(self.df1),
'value': self.df1[('B', 'b')],
}, columns=[('A', 'a'), 'CAP', 'low', 'value'])
result = self.df1.melt(id_vars=[('A', 'a')], value_vars=[('B', 'b')])
tm.assert_frame_equal(result, expected)
def test_tuple_vars_fail_with_multiindex(self):
# melt should fail with an informative error message if
# the columns have a MultiIndex and a tuple is passed
# for id_vars or value_vars.
tuple_a = ('A', 'a')
list_a = [tuple_a]
tuple_b = ('B', 'b')
list_b = [tuple_b]
for id_vars, value_vars in ((tuple_a, list_b), (list_a, tuple_b),
(tuple_a, tuple_b)):
with tm.assert_raises_regex(ValueError, r'MultiIndex'):
self.df1.melt(id_vars=id_vars, value_vars=value_vars)
def test_custom_var_name(self):
result5 = self.df.melt(var_name=self.var_name)
assert result5.columns.tolist() == ['var', 'value']
result6 = self.df.melt(id_vars=['id1'], var_name=self.var_name)
assert result6.columns.tolist() == ['id1', 'var', 'value']
result7 = self.df.melt(id_vars=['id1', 'id2'], var_name=self.var_name)
assert result7.columns.tolist() == ['id1', 'id2', 'var', 'value']
result8 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name)
assert result8.columns.tolist() == ['id1', 'id2', 'var', 'value']
result9 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name)
expected9 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
'value': (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name, 'value'])
tm.assert_frame_equal(result9, expected9)
def test_custom_value_name(self):
result10 = self.df.melt(value_name=self.value_name)
assert result10.columns.tolist() == ['variable', 'val']
result11 = self.df.melt(id_vars=['id1'], value_name=self.value_name)
assert result11.columns.tolist() == ['id1', 'variable', 'val']
result12 = self.df.melt(id_vars=['id1', 'id2'],
value_name=self.value_name)
assert result12.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result13 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
value_name=self.value_name)
assert result13.columns.tolist() == ['id1', 'id2', 'variable', 'val']
result14 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
value_name=self.value_name)
expected14 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
'variable': ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', 'variable',
self.value_name])
tm.assert_frame_equal(result14, expected14)
def test_custom_var_and_value_name(self):
result15 = self.df.melt(var_name=self.var_name,
value_name=self.value_name)
assert result15.columns.tolist() == ['var', 'val']
result16 = self.df.melt(id_vars=['id1'], var_name=self.var_name,
value_name=self.value_name)
assert result16.columns.tolist() == ['id1', 'var', 'val']
result17 = self.df.melt(id_vars=['id1', 'id2'],
var_name=self.var_name,
value_name=self.value_name)
assert result17.columns.tolist() == ['id1', 'id2', 'var', 'val']
result18 = self.df.melt(id_vars=['id1', 'id2'], value_vars='A',
var_name=self.var_name,
value_name=self.value_name)
assert result18.columns.tolist() == ['id1', 'id2', 'var', 'val']
result19 = self.df.melt(id_vars=['id1', 'id2'], value_vars=['A', 'B'],
var_name=self.var_name,
value_name=self.value_name)
expected19 = DataFrame({'id1': self.df['id1'].tolist() * 2,
'id2': self.df['id2'].tolist() * 2,
self.var_name: ['A'] * 10 + ['B'] * 10,
self.value_name: (self.df['A'].tolist() +
self.df['B'].tolist())},
columns=['id1', 'id2', self.var_name,
self.value_name])
tm.assert_frame_equal(result19, expected19)
df20 = self.df.copy()
df20.columns.name = 'foo'
result20 = df20.melt()
assert result20.columns.tolist() == ['foo', 'value']
def test_col_level(self):
res1 = self.df1.melt(col_level=0)
res2 = self.df1.melt(col_level='CAP')
assert res1.columns.tolist() == ['CAP', 'value']
assert res2.columns.tolist() == ['CAP', 'value']
def test_multiindex(self):
res = self.df1.melt()
assert res.columns.tolist() == ['CAP', 'low', 'value']
class TestLreshape(object):
def test_pairs(self):
data = {'birthdt': ['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt1': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009'],
'visitdt2':
['21jan2009', nan, '22jan2009', '31dec2008', '03feb2009'],
'visitdt3': ['05feb2009', nan, nan, '02jan2009', '15feb2009'],
'wt1': [1823, 3338, 1549, 3298, 4306],
'wt2': [2011.0, nan, 1892.0, 3338.0, 4575.0],
'wt3': [2293.0, nan, nan, 3377.0, 4805.0]}
df = DataFrame(data)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 4)],
'wt': ['wt%d' % i for i in range(1, 4)]}
result = lreshape(df, spec)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 1454, 3139,
4133, 1766, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 103, 104, 105, 101,
104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Male',
'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009',
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', '02jan2009', '15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0,
1892.0, 3338.0, 4575.0, 2293.0, 3377.0, 4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {'birthdt':
['08jan2009', '20dec2008', '30dec2008', '21dec2008',
'11jan2009', '08jan2009', '20dec2008', '30dec2008',
'21dec2008', '11jan2009', '08jan2009', '20dec2008',
'30dec2008', '21dec2008', '11jan2009'],
'birthwt': [1766, 3301, 1454, 3139, 4133, 1766, 3301, 1454,
3139, 4133, 1766, 3301, 1454, 3139, 4133],
'id': [101, 102, 103, 104, 105, 101, 102, 103, 104, 105,
101, 102, 103, 104, 105],
'sex': ['Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female',
'Male', 'Female', 'Female', 'Female', 'Female'],
'visitdt': ['11jan2009', '22dec2008', '04jan2009',
'29dec2008', '20jan2009', '21jan2009', nan,
'22jan2009', '31dec2008', '03feb2009',
'05feb2009', nan, nan, '02jan2009',
'15feb2009'],
'wt': [1823.0, 3338.0, 1549.0, 3298.0, 4306.0, 2011.0, nan,
1892.0, 3338.0, 4575.0, 2293.0, nan, nan, 3377.0,
4805.0]}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {'visitdt': ['visitdt%d' % i for i in range(1, 3)],
'wt': ['wt%d' % i for i in range(1, 4)]}
pytest.raises(ValueError, lreshape, df, spec)
class TestWideToLong(object):
def test_simple(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A1970": {0: "a",
1: "b",
2: "c"},
"A1980": {0: "d",
1: "e",
2: "f"},
"B1970": {0: 2.5,
1: 1.2,
2: .7},
"B1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2]}
expected = DataFrame(exp_data)
expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_stubs(self):
# GH9204
df = pd.DataFrame([[0, 1, 2, 3, 8], [4, 5, 6, 7, 9]])
df.columns = ['id', 'inc1', 'inc2', 'edu1', 'edu2']
stubs = ['inc', 'edu']
# TODO: unused?
df_long = pd.wide_to_long(df, stubs, i='id', j='age') # noqa
assert stubs == ['inc', 'edu']
def test_separating_character(self):
# GH14779
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A.1970": {0: "a",
1: "b",
2: "c"},
"A.1980": {0: "d",
1: "e",
2: "f"},
"B.1970": {0: 2.5,
1: 1.2,
2: .7},
"B.1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A": ['a', 'b', 'c', 'd', 'e', 'f'],
"B": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2]}
expected = DataFrame(exp_data)
expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
result = wide_to_long(df, ["A", "B"], i="id", j="year", sep=".")
tm.assert_frame_equal(result, expected)
def test_escapable_characters(self):
np.random.seed(123)
x = np.random.randn(3)
df = pd.DataFrame({"A(quarterly)1970": {0: "a",
1: "b",
2: "c"},
"A(quarterly)1980": {0: "d",
1: "e",
2: "f"},
"B(quarterly)1970": {0: 2.5,
1: 1.2,
2: .7},
"B(quarterly)1980": {0: 3.2,
1: 1.3,
2: .1},
"X": dict(zip(
range(3), x))})
df["id"] = df.index
exp_data = {"X": x.tolist() + x.tolist(),
"A(quarterly)": ['a', 'b', 'c', 'd', 'e', 'f'],
"B(quarterly)": [2.5, 1.2, 0.7, 3.2, 1.3, 0.1],
"year": [1970, 1970, 1970, 1980, 1980, 1980],
"id": [0, 1, 2, 0, 1, 2]}
expected = DataFrame(exp_data)
expected = expected.set_index(
['id', 'year'])[["X", "A(quarterly)", "B(quarterly)"]]
result = wide_to_long(df, ["A(quarterly)", "B(quarterly)"],
i="id", j="year")
tm.assert_frame_equal(result, expected)
def test_unbalanced(self):
# test that we can have a varying amount of time variables
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': ['X1', 'X1', 'X2', 'X2'],
'A': [1.0, 3.0, 2.0, 4.0],
'B': [5.0, np.nan, 6.0, np.nan],
'id': [0, 0, 1, 1],
'year': [2010, 2011, 2010, 2011]}
expected = pd.DataFrame(exp_data)
expected = expected.set_index(['id', 'year'])[["X", "A", "B"]]
result = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(result, expected)
def test_character_overlap(self):
# Test we handle overlapping characters in both id_vars and value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'BBBX': [91, 92, 93],
'BBBZ': [91, 92, 93]
})
df['id'] = df.index
expected = pd.DataFrame({
'BBBX': [91, 92, 93, 91, 92, 93],
'BBBZ': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': [11, 11, 11, 12, 12, 12]})
expected = expected.set_index(['id', 'year'])[
['BBBX', 'BBBZ', 'A', 'B', 'BB']]
result = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(result.sort_index(axis=1),
expected.sort_index(axis=1))
def test_invalid_separator(self):
# if an invalid separator is supplied a empty data frame is returned
sep = 'nope!'
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'A2010': [],
'A2011': [],
'B2010': [],
'id': [],
'year': [],
'A': [],
'B': []}
expected = pd.DataFrame(exp_data).astype({'year': 'int'})
expected = expected.set_index(['id', 'year'])[[
'X', 'A2010', 'A2011', 'B2010', 'A', 'B']]
expected.index.set_levels([0, 1], level=0, inplace=True)
result = wide_to_long(df, ['A', 'B'], i='id', j='year', sep=sep)
tm.assert_frame_equal(result.sort_index(axis=1),
expected.sort_index(axis=1))
def test_num_string_disambiguation(self):
# Test that we can disambiguate number value_vars from
# string value_vars
df = pd.DataFrame({
'A11': ['a11', 'a22', 'a33'],
'A12': ['a21', 'a22', 'a23'],
'B11': ['b11', 'b12', 'b13'],
'B12': ['b21', 'b22', 'b23'],
'BB11': [1, 2, 3],
'BB12': [4, 5, 6],
'Arating': [91, 92, 93],
'Arating_old': [91, 92, 93]
})
df['id'] = df.index
expected = pd.DataFrame({
'Arating': [91, 92, 93, 91, 92, 93],
'Arating_old': [91, 92, 93, 91, 92, 93],
'A': ['a11', 'a22', 'a33', 'a21', 'a22', 'a23'],
'B': ['b11', 'b12', 'b13', 'b21', 'b22', 'b23'],
'BB': [1, 2, 3, 4, 5, 6],
'id': [0, 1, 2, 0, 1, 2],
'year': [11, 11, 11, 12, 12, 12]})
expected = expected.set_index(['id', 'year'])[
['Arating', 'Arating_old', 'A', 'B', 'BB']]
result = wide_to_long(df, ['A', 'B', 'BB'], i='id', j='year')
tm.assert_frame_equal(result.sort_index(axis=1),
expected.sort_index(axis=1))
def test_invalid_suffixtype(self):
# If all stubs names end with a string, but a numeric suffix is
# assumed, an empty data frame is returned
df = pd.DataFrame({'Aone': [1.0, 2.0],
'Atwo': [3.0, 4.0],
'Bone': [5.0, 6.0],
'X': ['X1', 'X2']})
df['id'] = df.index
exp_data = {'X': '',
'Aone': [],
'Atwo': [],
'Bone': [],
'id': [],
'year': [],
'A': [],
'B': []}
expected = pd.DataFrame(exp_data).astype({'year': 'int'})
expected = expected.set_index(['id', 'year'])
expected.index.set_levels([0, 1], level=0, inplace=True)
result = wide_to_long(df, ['A', 'B'], i='id', j='year')
tm.assert_frame_equal(result.sort_index(axis=1),
expected.sort_index(axis=1))
def test_multiple_id_columns(self):
# Taken from http://www.ats.ucla.edu/stat/stata/modules/reshapel.htm
df = pd.DataFrame({
'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
})
expected = pd.DataFrame({
'ht': [2.8, 3.4, 2.9, 3.8, 2.2, 2.9, 2.0, 3.2, 1.8,
2.8, 1.9, 2.4, 2.2, 3.3, 2.3, 3.4, 2.1, 2.9],
'famid': [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3],
'birth': [1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3],
'age': [1, 2, 1, 2, 1, 2, 1, 2, 1,
2, 1, 2, 1, 2, 1, 2, 1, 2]
})
expected = expected.set_index(['famid', 'birth', 'age'])[['ht']]
result = wide_to_long(df, 'ht', i=['famid', 'birth'], j='age')
tm.assert_frame_equal(result, expected)
def test_non_unique_idvars(self):
# GH16382
# Raise an error message if non unique id vars (i) are passed
df = pd.DataFrame({
'A_A1': [1, 2, 3, 4, 5],
'B_B1': [1, 2, 3, 4, 5],
'x': [1, 1, 1, 1, 1]
})
with pytest.raises(ValueError):
wide_to_long(df, ['A_A', 'B_B'], i='x', j='colname')
def test_cast_j_int(self):
df = pd.DataFrame({
'actor_1': ['CCH Pounder', 'Johnny Depp', 'Christoph Waltz'],
'actor_2': ['Joel David Moore', 'Orlando Bloom', 'Rory Kinnear'],
'actor_fb_likes_1': [1000.0, 40000.0, 11000.0],
'actor_fb_likes_2': [936.0, 5000.0, 393.0],
'title': ['Avatar', "Pirates of the Caribbean", 'Spectre']})
expected = pd.DataFrame({
'actor': ['CCH Pounder',
'Johnny Depp',
'Christoph Waltz',
'Joel David Moore',
'Orlando Bloom',
'Rory Kinnear'],
'actor_fb_likes': [1000.0, 40000.0, 11000.0, 936.0, 5000.0, 393.0],
'num': [1, 1, 1, 2, 2, 2],
'title': ['Avatar',
'Pirates of the Caribbean',
'Spectre',
'Avatar',
'Pirates of the Caribbean',
'Spectre']}).set_index(['title', 'num'])
result = wide_to_long(df, ['actor', 'actor_fb_likes'],
i='title', j='num', sep='_')
tm.assert_frame_equal(result, expected)
def test_identical_stubnames(self):
df = pd.DataFrame({'A2010': [1.0, 2.0],
'A2011': [3.0, 4.0],
'B2010': [5.0, 6.0],
'A': ['X1', 'X2']})
with pytest.raises(ValueError):
wide_to_long(df, ['A', 'B'], i='A', j='colname')
def test_nonnumeric_suffix(self):
df = pd.DataFrame({'treatment_placebo': [1.0, 2.0],
'treatment_test': [3.0, 4.0],
'result_placebo': [5.0, 6.0],
'A': ['X1', 'X2']})
expected = pd.DataFrame({
'A': ['X1', 'X1', 'X2', 'X2'],
'colname': ['placebo', 'test', 'placebo', 'test'],
'result': [5.0, np.nan, 6.0, np.nan],
'treatment': [1.0, 3.0, 2.0, 4.0]})
expected = expected.set_index(['A', 'colname'])
result = wide_to_long(df, ['result', 'treatment'],
i='A', j='colname', suffix='[a-z]+', sep='_')
tm.assert_frame_equal(result, expected)
def test_mixed_type_suffix(self):
df = pd.DataFrame({
'treatment_1': [1.0, 2.0],
'treatment_foo': [3.0, 4.0],
'result_foo': [5.0, 6.0],
'result_1': [0, 9],
'A': ['X1', 'X2']})
expected = pd.DataFrame({
'A': ['X1', 'X2', 'X1', 'X2'],
'colname': ['1', '1', 'foo', 'foo'],
'result': [0.0, 9.0, 5.0, 6.0],
'treatment': [1.0, 2.0, 3.0, 4.0]}).set_index(['A', 'colname'])
result = wide_to_long(df, ['result', 'treatment'],
i='A', j='colname', suffix='.+', sep='_')
tm.assert_frame_equal(result, expected)
def test_float_suffix(self):
df = pd.DataFrame({
'treatment_1.1': [1.0, 2.0],
'treatment_2.1': [3.0, 4.0],
'result_1.2': [5.0, 6.0],
'result_1': [0, 9],
'A': ['X1', 'X2']})
expected = pd.DataFrame({
'A': ['X1', 'X1', 'X1', 'X1', 'X2', 'X2', 'X2', 'X2'],
'colname': [1, 1.1, 1.2, 2.1, 1, 1.1, 1.2, 2.1],
'result': [0.0, np.nan, 5.0, np.nan, 9.0, np.nan, 6.0, np.nan],
'treatment': [np.nan, 1.0, np.nan, 3.0, np.nan, 2.0, np.nan, 4.0]})
expected = expected.set_index(['A', 'colname'])
result = wide_to_long(df, ['result', 'treatment'],
i='A', j='colname', suffix='[0-9.]+', sep='_')
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
Clyde-fare/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
flaviovdf/prme | mrr.py | 1 | 1332 | #-*- coding: utf8
from __future__ import division, print_function
from prme import mrr
import pandas as pd
import plac
import numpy as np
def main(model, out_fpath):
store = pd.HDFStore(model)
from_ = store['from_'][0][0]
to = store['to'][0][0]
assert from_ == 0
trace_fpath = store['trace_fpath'][0][0]
XP_hk = store['XP_hk'].values
XP_ok = store['XP_ok'].values
XG_ok = store['XG_ok'].values
alpha = store['alpha'].values[0][0]
tau = store['tau'].values[0][0]
hyper2id = dict(store['hyper2id'].values)
obj2id = dict(store['obj2id'].values)
HSDs = []
dts = []
with open(trace_fpath) as trace_file:
for i, l in enumerate(trace_file):
if i < to:
continue
dt, h, s, d = l.strip().split('\t')
if h in hyper2id and s in obj2id and d in obj2id:
dts.append(float(dt))
HSDs.append([hyper2id[h], obj2id[s], obj2id[d]])
num_queries = min(10000, len(HSDs))
queries = np.random.choice(len(HSDs), size=num_queries)
dts = np.array(dts, order='C', dtype='d')
HSDs = np.array(HSDs, order='C', dtype='i4')
rrs = mrr.compute(dts, HSDs, XP_hk, XP_ok, XG_ok, alpha, tau)
np.savetxt(out_fpath, rrs)
store.close()
plac.call(main)
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 2 | 5380 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
import pytest
from scipy import sparse
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert 0.9 <= bandwidth <= 1.5
def test_estimate_bandwidth_1sample():
# Test estimate_bandwidth when n_samples=1 and quantile<1, so that
# n_neighbors is set to 1.
bandwidth = estimate_bandwidth(X, n_samples=1, quantile=0.3)
assert bandwidth == pytest.approx(0., abs=1e-5)
@pytest.mark.parametrize("bandwidth, cluster_all, expected, "
"first_cluster_label",
[(1.2, True, 3, 0), (1.2, False, 4, -1)])
def test_mean_shift(bandwidth, cluster_all, expected, first_cluster_label):
# Test MeanShift algorithm
ms = MeanShift(bandwidth=bandwidth, cluster_all=cluster_all)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert n_clusters_ == expected
assert labels_unique[0] == first_cluster_label
cluster_centers, labels_mean_shift = mean_shift(X, cluster_all=cluster_all)
labels_mean_shift_unique = np.unique(labels_mean_shift)
n_clusters_mean_shift = len(labels_mean_shift_unique)
assert n_clusters_mean_shift == expected
assert labels_mean_shift_unique[0] == first_cluster_label
def test_mean_shift_negative_bandwidth():
bandwidth = -1
ms = MeanShift(bandwidth=bandwidth)
msg = (r"bandwidth needs to be greater than zero or None,"
r" got -1\.000000")
with pytest.raises(ValueError, match=msg):
ms.fit(X)
def test_estimate_bandwidth_with_sparse_matrix():
# Test estimate_bandwidth with sparse matrix
X = sparse.lil_matrix((1000, 1000))
msg = "A sparse matrix was passed, but dense data is required."
assert_raise_message(TypeError, msg, estimate_bandwidth, X, 200)
def test_parallel():
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=50, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_almost_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert not hasattr(ms, "cluster_centers_")
assert not hasattr(ms, "labels_")
def test_cluster_intensity_tie():
X = np.array([[1, 1], [2, 1], [1, 0],
[4, 7], [3, 5], [3, 6]])
c1 = MeanShift(bandwidth=2).fit(X)
X = np.array([[4, 7], [3, 5], [3, 6],
[1, 1], [2, 1], [1, 0]])
c2 = MeanShift(bandwidth=2).fit(X)
assert_array_equal(c1.labels_, [1, 1, 1, 0, 0, 0])
assert_array_equal(c2.labels_, [0, 0, 0, 1, 1, 1])
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = {(1., 1.), (2., 1.), (0., 0.)}
test_bins = get_bin_seeds(X, 1, 1)
test_result = set(tuple(p) for p in test_bins)
assert len(ground_truth.symmetric_difference(test_result)) == 0
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = {(1., 1.), (2., 1.)}
test_bins = get_bin_seeds(X, 1, 2)
test_result = set(tuple(p) for p in test_bins)
assert len(ground_truth.symmetric_difference(test_result)) == 0
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_almost_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
vibhorag/scikit-learn | sklearn/metrics/scorer.py | 211 | 13141 | """
The :mod:`sklearn.metrics.scorer` submodule implements a flexible
interface for model selection and evaluation using
arbitrary score functions.
A scorer object is a callable that can be passed to
:class:`sklearn.grid_search.GridSearchCV` or
:func:`sklearn.cross_validation.cross_val_score` as the ``scoring`` parameter,
to specify how a model should be evaluated.
The signature of the call is ``(estimator, X, y)`` where ``estimator``
is the model to be evaluated, ``X`` is the test data and ``y`` is the
ground truth labeling (or ``None`` in the case of unsupervised models).
"""
# Authors: Andreas Mueller <[email protected]>
# Lars Buitinck <[email protected]>
# Arnaud Joly <[email protected]>
# License: Simplified BSD
from abc import ABCMeta, abstractmethod
from functools import partial
import numpy as np
from . import (r2_score, median_absolute_error, mean_absolute_error,
mean_squared_error, accuracy_score, f1_score,
roc_auc_score, average_precision_score,
precision_score, recall_score, log_loss)
from .cluster import adjusted_rand_score
from ..utils.multiclass import type_of_target
from ..externals import six
from ..base import is_regressor
class _BaseScorer(six.with_metaclass(ABCMeta, object)):
def __init__(self, score_func, sign, kwargs):
self._kwargs = kwargs
self._score_func = score_func
self._sign = sign
@abstractmethod
def __call__(self, estimator, X, y, sample_weight=None):
pass
def __repr__(self):
kwargs_string = "".join([", %s=%s" % (str(k), str(v))
for k, v in self._kwargs.items()])
return ("make_scorer(%s%s%s%s)"
% (self._score_func.__name__,
"" if self._sign > 0 else ", greater_is_better=False",
self._factory_args(), kwargs_string))
def _factory_args(self):
"""Return non-default make_scorer arguments for repr."""
return ""
class _PredictScorer(_BaseScorer):
def __call__(self, estimator, X, y_true, sample_weight=None):
"""Evaluate predicted target values for X relative to y_true.
Parameters
----------
estimator : object
Trained estimator to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to estimator.predict.
y_true : array-like
Gold standard target values for X.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = estimator.predict(X)
if sample_weight is not None:
return self._sign * self._score_func(y_true, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y_true, y_pred,
**self._kwargs)
class _ProbaScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate predicted probabilities for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have a predict_proba
method; the output of that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not probabilities.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_pred = clf.predict_proba(X)
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_proba=True"
class _ThresholdScorer(_BaseScorer):
def __call__(self, clf, X, y, sample_weight=None):
"""Evaluate decision function output for X relative to y_true.
Parameters
----------
clf : object
Trained classifier to use for scoring. Must have either a
decision_function method or a predict_proba method; the output of
that is used to compute the score.
X : array-like or sparse matrix
Test data that will be fed to clf.decision_function or
clf.predict_proba.
y : array-like
Gold standard target values for X. These must be class labels,
not decision function values.
sample_weight : array-like, optional (default=None)
Sample weights.
Returns
-------
score : float
Score function applied to prediction of estimator on X.
"""
y_type = type_of_target(y)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if is_regressor(clf):
y_pred = clf.predict(X)
else:
try:
y_pred = clf.decision_function(X)
# For multi-output multi-class estimator
if isinstance(y_pred, list):
y_pred = np.vstack(p for p in y_pred).T
except (NotImplementedError, AttributeError):
y_pred = clf.predict_proba(X)
if y_type == "binary":
y_pred = y_pred[:, 1]
elif isinstance(y_pred, list):
y_pred = np.vstack([p[:, -1] for p in y_pred]).T
if sample_weight is not None:
return self._sign * self._score_func(y, y_pred,
sample_weight=sample_weight,
**self._kwargs)
else:
return self._sign * self._score_func(y, y_pred, **self._kwargs)
def _factory_args(self):
return ", needs_threshold=True"
def get_scorer(scoring):
if isinstance(scoring, six.string_types):
try:
scorer = SCORERS[scoring]
except KeyError:
raise ValueError('%r is not a valid scoring value. '
'Valid options are %s'
% (scoring, sorted(SCORERS.keys())))
else:
scorer = scoring
return scorer
def _passthrough_scorer(estimator, *args, **kwargs):
"""Function that wraps estimator.score"""
return estimator.score(*args, **kwargs)
def check_scoring(estimator, scoring=None, allow_none=False):
"""Determine scorer from user options.
A TypeError will be thrown if the estimator cannot be scored.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
allow_none : boolean, optional, default: False
If no scoring is specified and the estimator has no score function, we
can either return None or raise an exception.
Returns
-------
scoring : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
"""
has_scoring = scoring is not None
if not hasattr(estimator, 'fit'):
raise TypeError("estimator should a be an estimator implementing "
"'fit' method, %r was passed" % estimator)
elif has_scoring:
return get_scorer(scoring)
elif hasattr(estimator, 'score'):
return _passthrough_scorer
elif allow_none:
return None
else:
raise TypeError(
"If no scoring is specified, the estimator passed should "
"have a 'score' method. The estimator %r does not." % estimator)
def make_scorer(score_func, greater_is_better=True, needs_proba=False,
needs_threshold=False, **kwargs):
"""Make a scorer from a performance metric or loss function.
This factory function wraps scoring functions for use in GridSearchCV
and cross_val_score. It takes a score function, such as ``accuracy_score``,
``mean_squared_error``, ``adjusted_rand_index`` or ``average_precision``
and returns a callable that scores an estimator's output.
Read more in the :ref:`User Guide <scoring>`.
Parameters
----------
score_func : callable,
Score function (or loss function) with signature
``score_func(y, y_pred, **kwargs)``.
greater_is_better : boolean, default=True
Whether score_func is a score function (default), meaning high is good,
or a loss function, meaning low is good. In the latter case, the
scorer object will sign-flip the outcome of the score_func.
needs_proba : boolean, default=False
Whether score_func requires predict_proba to get probability estimates
out of a classifier.
needs_threshold : boolean, default=False
Whether score_func takes a continuous decision certainty.
This only works for binary classification using estimators that
have either a decision_function or predict_proba method.
For example ``average_precision`` or the area under the roc curve
can not be computed using discrete predictions alone.
**kwargs : additional arguments
Additional parameters to be passed to score_func.
Returns
-------
scorer : callable
Callable object that returns a scalar score; greater is better.
Examples
--------
>>> from sklearn.metrics import fbeta_score, make_scorer
>>> ftwo_scorer = make_scorer(fbeta_score, beta=2)
>>> ftwo_scorer
make_scorer(fbeta_score, beta=2)
>>> from sklearn.grid_search import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> grid = GridSearchCV(LinearSVC(), param_grid={'C': [1, 10]},
... scoring=ftwo_scorer)
"""
sign = 1 if greater_is_better else -1
if needs_proba and needs_threshold:
raise ValueError("Set either needs_proba or needs_threshold to True,"
" but not both.")
if needs_proba:
cls = _ProbaScorer
elif needs_threshold:
cls = _ThresholdScorer
else:
cls = _PredictScorer
return cls(score_func, sign, kwargs)
# Standard regression scores
r2_scorer = make_scorer(r2_score)
mean_squared_error_scorer = make_scorer(mean_squared_error,
greater_is_better=False)
mean_absolute_error_scorer = make_scorer(mean_absolute_error,
greater_is_better=False)
median_absolute_error_scorer = make_scorer(median_absolute_error,
greater_is_better=False)
# Standard Classification Scores
accuracy_scorer = make_scorer(accuracy_score)
f1_scorer = make_scorer(f1_score)
# Score functions that need decision values
roc_auc_scorer = make_scorer(roc_auc_score, greater_is_better=True,
needs_threshold=True)
average_precision_scorer = make_scorer(average_precision_score,
needs_threshold=True)
precision_scorer = make_scorer(precision_score)
recall_scorer = make_scorer(recall_score)
# Score function for probabilistic classification
log_loss_scorer = make_scorer(log_loss, greater_is_better=False,
needs_proba=True)
# Clustering scores
adjusted_rand_scorer = make_scorer(adjusted_rand_score)
SCORERS = dict(r2=r2_scorer,
median_absolute_error=median_absolute_error_scorer,
mean_absolute_error=mean_absolute_error_scorer,
mean_squared_error=mean_squared_error_scorer,
accuracy=accuracy_scorer, roc_auc=roc_auc_scorer,
average_precision=average_precision_scorer,
log_loss=log_loss_scorer,
adjusted_rand_score=adjusted_rand_scorer)
for name, metric in [('precision', precision_score),
('recall', recall_score), ('f1', f1_score)]:
SCORERS[name] = make_scorer(metric)
for average in ['macro', 'micro', 'samples', 'weighted']:
qualified_name = '{0}_{1}'.format(name, average)
SCORERS[qualified_name] = make_scorer(partial(metric, pos_label=None,
average=average))
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
UCBerkeleySETI/breakthrough | ML/CNNFRB/research_code/image_reader.py | 1 | 10610 | from __future__ import print_function
import fnmatch
import os
import re
import threading
from scipy import ndimage
from skimage import measure
import pandas
import numpy as np
import tensorflow as tf
import skimage
import skimage.io
import skimage.transform
import os
import zipfile
LABEL_FILE = "./labels.csv"
#G
def get_corpus_size(directory, pattern='*.png'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return len(files)
#def get_imgid(fpath):
# return int(fpath.split('/')[-1].split('.')[0])
def get_imgid(filename, pref):
if pref:
tsplits = filename.split('/')
img_id = '_'.join([tsplits[-2], tsplits[-1]]).split('.')[0]
else:
img_id = '.'.join(filename.split('/')[-1].split('.')[:-1])
return img_id
def find_files(directory, pattern='*.png', sortby="shuffle"):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
if sortby == 'auto':
files = np.sort(files)
elif sortby == 'shuffle':
np.random.shuffle(files)
return files
def _augment_img(img):
rands = np.random.random(3)
if rands[0] < 0.5:
img = np.fliplr(img)
if rands[1] < 0.5:
img = np.flipud(img)
return img
def load_image(directory, pattern='*.png', train=False, pref=False, select=0.99, dtype=np.float32):
'''Generator that yields pixel_array from dataset, and
additionally the ID of the corresponding patient.'''
if train:
sort_by = 'shuffle'
else:
sort_by = 'auto'
files = find_files(directory, pattern=pattern, sortby=sort_by)
csize = int(select*len(files))
files = np.random.choice(files, size=csize)
for filename in files:
if pattern == '*.png':
img = skimage.io.imread(filename).astype(dtype)
img /= 256.
elif pattern == '*.npy':
img = np.load(filename).astype(dtype)
elif pattern == '*.npz':
img = np.load(filename)['frame'].astype(dtype)
if False:
img *= np.load(filename)['mask'].astype(dtype)
if train:
img = img.T[...,np.newaxis]
img_id = get_imgid(filename, pref=pref)
#print(filename, img.shape)
yield img, img_id
def load_data_to_memory(directory, pattern='*.npy', train=True, pref=False, limit=100000, dtype=np.float16, dshape=None):
if train:
sort_by = 'shuffle'
else:
sort_by = 'auto'
files = find_files(directory, pattern=pattern, sortby=sort_by)
print("Loading {} files into memory".format(min(len(files), limit)))
if limit < len(files):
files = files[:limit]
Y_true = []
X = np.zeros((len(files),)+dshape, dtype=dtype)
for i, filename in enumerate(files):
if i % 1000 == 0:
print(i)
if pattern == '*.png':
img = skimage.io.imread(filename)
img /= 256.
elif pattern == '*.npy':
img = np.load(filename)
elif pattern == '*.npz':
img = np.load(filename)['frame']
if False:
img *= np.load(filename)['mask']
if train:
img = img.T[...,np.newaxis]
if img.dtype is not X.dtype:
img = img.astype(X.dtype)
X[i] = img
#img_id = get_imgid(filename, pref=pref)
if "signa" not in filename:
Y_true.append(0)
elif "signa" in filename:
Y_true.append(1)
else:
print(filename + " not understood")
#X = np.stack(X, axis=0)
return X, np.asarray(Y_true)
def convert_to_chunck(directory, outdir, batch_size=512, pattern='*.npy', train=True, pref=False, limit=1000000, dtype=np.float16, dshape=None):
if train:
sort_by = 'shuffle'
else:
sort_by = 'auto'
files = find_files(directory, pattern=pattern, sortby=sort_by)
print("Loading {} files into memory".format(min(len(files), limit)))
if limit < len(files):
files = files[:limit]
Y_true = []
X = np.zeros((batch_size,)+dshape, dtype=dtype)
for i, filename in enumerate(files):
if len(files) - i < batch_size:
X = X[:len(files) - i]
if i % batch_size == 0:
print(i)
np.savez(outdir)
if pattern == '*.png':
img = skimage.io.imread(filename)
img /= 256.
elif pattern == '*.npy':
img = np.load(filename)
elif pattern == '*.npz':
img = np.load(filename)['frame']
if False:
img *= np.load(filename)['mask']
if train:
img = img.T[...,np.newaxis]
if img.dtype is not X.dtype:
img = img.astype(X.dtype)
X[i] = img
#img_id = get_imgid(filename, pref=pref)
if "clean" in filename:
Y_true.append(0)
elif "signa" in filename:
Y_true.append(1)
else:
print(filename + " not understood")
#X = np.stack(X, axis=0)
return X, np.asarray(Y_true)
def load_label_df(filename):
df_train = pandas.DataFrame.from_csv(filename, index_col='fname')
#import IPython; IPython.embed()
#df_train['label'] = df_train['DM']#.apply(lambda row: get_weather(row))
print(df_train.columns)
return df_train
# def get_loss_weights(dframe=None, label_file=LABEL_FILE):
# if dframe is None:
# dframe = load_label_df(label_file)
# keys, counts = np.unique(dframe['label'], return_counts=True)
# weights = counts.astype(np.float32)/np.sum(counts)
# return dict(zip(keys, weights))
class Reader(object):
'''Generic background reader that preprocesses files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
data_dir,
coord,
train=True,
threshold=None,
queue_size=16,
min_after_dequeue=4,
q_shape=None,
pattern='*.npy',
n_threads=1,
multi=True,
label_file=LABEL_FILE,
label_type=tf.int32,
pref=False,
dtype=np.float32):
self.data_dir = data_dir
self.coord = coord
self.n_threads = n_threads
self.threshold = threshold
self.ftype = pattern
self.corpus_size = get_corpus_size(self.data_dir, pattern=self.ftype)
self.threads = []
self.q_shape = q_shape
self.multi = multi
self.train = train
self.npdtype = dtype
self.tfdtype = tf.as_dtype(self.npdtype)
self.sample_placeholder = tf.placeholder(dtype=self.tfdtype, shape=None)
self.label_shape = []
self.label_type = label_type
self.pattern = pattern
self.pref = pref
self.labels_df = None
self.label_shape = []
if self.train:
if label_file is not None:
self.labels_df = load_label_df(label_file)
self.label_shape = [len(self.labels_df.columns)]
self.label_placeholder = tf.placeholder(dtype=self.label_type, shape=self.label_shape, name='label') #!!!
if self.q_shape:
#self.queue = tf.FIFOQueue(queue_size,[tf.float32,tf.int32], shapes=[q_shape,[]])
self.queue = tf.RandomShuffleQueue(queue_size, min_after_dequeue,
[self.tfdtype,label_type], shapes=[q_shape, self.label_shape])
else:
self.q_shape = [(1, None, None, 1)]
self.queue = tf.PaddingFIFOQueue(queue_size,
[self.tfdtype, label_type],
shapes=[self.q_shape,self.label_shape])
else:
self.label_placeholder = tf.placeholder(dtype=tf.string, shape=[], name='label') #!!!
self.queue = tf.FIFOQueue(queue_size,[self.tfdtype,tf.string], shapes=[q_shape,[]])
self.enqueue = self.queue.enqueue([self.sample_placeholder, self.label_placeholder])
def dequeue(self, num_elements):
images, labels = self.queue.dequeue_many(num_elements)
#print(labels[:4])
return images, labels
def thread_main(self, sess):
buffer_ = np.array([])
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_image(self.data_dir, train=self.train, pattern=self.pattern, pref=self.pref, dtype=self.npdtype)
for img, img_id in iterator:
#print(filename)
if self.train:
if self.labels_df is not None:
try:
label = [self.labels_df[c][img_id] for c in self.labels_df.columns]
except(KeyError):
print('No match for ', img_id)
continue
else:
if img_id.startswith('clean'):
label = 0
elif img_id.startswith('signa'):
label = 1
else:
print(img_id)
raise Exception("labels not understood")
else:
label = img_id
if self.coord.should_stop():
stop = True
break
if self.threshold is not None:
#TODO: Perform quality check if needed
pass
#print(img.shape, self.q_shape)
if img.shape != self.q_shape:
img = img.T[...,np.newaxis]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: img, self.label_placeholder: label})
def start_threads(self, sess):
for _ in xrange(self.n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
| gpl-3.0 |
Tong-Chen/scikit-learn | examples/manifold/plot_compare_methods.py | 8 | 3592 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(241, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(241, projection='3d')
pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(242 + i)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(246)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(247)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(248)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
pl.show()
| bsd-3-clause |
l0k1/naev | utils/heatsim/heatsim.py | 20 | 7285 | #!/usr/bin/env python
#######################################################
#
# SIM CODE
#
#######################################################
# Imports
from frange import *
import math
import matplotlib.pyplot as plt
def clamp( a, b, x ):
return min( b, max( a, x ) )
class heatsim:
def __init__( self, shipname = "llama", weapname = "laser", simulation = [ 60., 120. ] ):
# Sim parameters
self.STEFAN_BOLZMANN = 5.67e-8
self.SPACE_TEMP = 250.
self.STEEL_COND = 54.
self.STEEL_CAP = 0.49
self.STEEL_DENS = 7.88e3
self.ACCURACY_LIMIT = 500
self.FIRERATE_LIMIT = 800
self.shipname = shipname
self.weapname = weapname
# Sim info
self.sim_dt = 1./50. # Delta tick
self.setSimulation( simulation )
# Load some data
self.ship_mass, self.ship_weaps = self.loadship( shipname )
self.weap_mass, self.weap_delay, self.weap_energy = self.loadweap( weapname )
def setSimulation( self, simulation ):
self.simulation = simulation
self.sim_total = simulation[-1]
def loadship( self, shipname ):
"Returns mass, number of weaps."
if shipname == "llama":
return 80., 2
elif shipname == "lancelot":
return 180., 4
elif shipname == "pacifier":
return 730., 5
elif shipname == "hawking":
return 3750., 7
elif shipname == "peacemaker":
return 6200., 8
else:
raise ValueError
def loadweap( self, weapname ):
"Returns mass, delay, energy."
if weapname == "laser":
return 2., 0.9, 4.25
elif weapname == "plasma":
return 4., 0.675, 3.75
elif weapname == "ion":
return 6., 1.440, 15.
elif weapname == "laser turret":
return 16., 0.540, 6.12
elif weapname == "ion turret":
return 42., 0.765, 25.
elif weapname == "railgun turret":
return 60., 1.102, 66.
else:
raise ValueError
def prepare( self ):
# Time stuff
self.time_data = []
# Calculate ship parameters
ship_kg = self.ship_mass * 1000.
self.ship_emis = 0.8
self.ship_cond = self.STEEL_COND
self.ship_C = self.STEEL_CAP * ship_kg
#self.ship_area = pow( ship_kg / self.STEEL_DENS, 2./3. )
self.ship_area = 4.*math.pi*pow( 3./4.*ship_kg/self.STEEL_DENS/math.pi, 2./3. )
self.ship_T = self.SPACE_TEMP
self.ship_data = []
# Calculate weapon parameters
weap_kg = self.weap_mass * 1000.
self.weap_C = self.STEEL_CAP * weap_kg
#self.weap_area = pow( weap_kg / self.STEEL_DENS, 2./3. )
self.weap_area = 2.*math.pi*pow( 3./4.*weap_kg/self.STEEL_DENS/math.pi, 2./3. )
self.weap_list = []
self.weap_T = []
self.weap_data = []
for i in range(self.ship_weaps):
self.weap_list.append( i*self.weap_delay / self.ship_weaps )
self.weap_T.append( self.SPACE_TEMP )
self.weap_data.append( [] )
def __accMod( self, T ):
return clamp( 0., 1., (T-500.)/600. )
def __frMod( self, T ):
return clamp( 0., 1., (1100.-T)/300. )
def simulate( self ):
"Begins the simulation."
# Prepare it
self.prepare()
# Run simulation
weap_on = True
sim_index = 0
dt = self.sim_dt
sim_elapsed = 0.
while sim_elapsed < self.sim_total:
Q_cond = 0.
# Check weapons
for i in range(len(self.weap_list)):
# Check if we should start/stop shooting
if self.simulation[ sim_index ] < sim_elapsed:
weap_on = not weap_on
sim_index += 1
# Check if shot
if weap_on:
self.weap_list[i] -= dt * self.__frMod( self.weap_T[i] )
if self.weap_list[i] < 0.:
self.weap_T[i] += 1e4 * self.weap_energy / self.weap_C
self.weap_list[i] += self.weap_delay
# Do heat movement (conduction)
Q = -self.ship_cond * (self.weap_T[i] - self.ship_T) * self.weap_area * dt
self.weap_T[i] += Q / self.weap_C
Q_cond += Q
self.weap_data[i].append( self.weap_T[i] )
# Do ship heat (radiation)
Q_rad = self.STEFAN_BOLZMANN * self.ship_area * self.ship_emis * (pow(self.SPACE_TEMP,4.) - pow(self.ship_T,4.)) * dt
Q = Q_rad - Q_cond
self.ship_T += Q / self.ship_C
self.time_data.append( sim_elapsed )
self.ship_data.append( self.ship_T )
# Elapsed time
sim_elapsed += dt;
def save( self, filename ):
"Saves the results to a file."
f = open( self.filename, 'w' )
for i in range(self.time_data):
f.write( str(self.time_data[i])+' '+str(self.ship_data[i]))
for j in range(self.weap_data):
f.write( ' '+str(self.weap_data[i][j]) )
f.write( '\n' )
f.close()
def display( self ):
print("Ship Temp: "+str(hs.ship_T)+" K")
for i in range(len(hs.weap_list)):
print("Outfit["+str(i)+"] Temp: "+str(hs.weap_T[i])+" K")
def plot( self, filename=None ):
plt.hold(False)
plt.figure(1)
# Plot 1 Data
plt.subplot(211)
plt.plot( self.time_data, self.ship_data, '-' )
# Plot 1 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.title( 'NAEV Heat Simulation ('+self.shipname+' with '+self.weapname+')' )
plt.legend( ('Ship', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper left')
plt.ylabel( 'Temperature [K]' )
plt.grid( True )
# Plot 1 Data
plt.subplot(212)
plt.plot( self.time_data, self.weap_data[0], '-' )
plt.hold(True)
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.ACCURACY_LIMIT )
plt.plot( self.time_data, plt_data, '--' )
plt_data = []
for i in range(len(self.weap_data[0])):
plt_data.append( self.FIRERATE_LIMIT )
plt.plot( self.time_data, plt_data, '-.' )
plt.hold(False)
# Plot 2 Info
plt.axis( [0, self.sim_total, 0, 1100] )
plt.legend( ('Weapon', 'Accuracy Limit', 'Fire Rate Limit'), loc='upper right')
plt.ylabel( 'Temperature [K]' )
plt.xlabel( 'Time [s]' )
plt.grid( True )
if filename == None:
plt.show()
else:
plt.savefig( filename )
if __name__ == "__main__":
print("NAEV HeatSim\n")
shp_lst = { 'llama' : 'laser',
'lancelot' : 'ion',
'pacifier' : 'laser turret',
'hawking' : 'ion turret',
'peacemaker' : 'railgun turret' }
for shp,wpn in shp_lst.items():
hs = heatsim( shp, wpn, (60., 120.) )
#hs = heatsim( shp, wpn, frange( 30., 600., 30. ) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_60_60.png' )
hs.setSimulation( (30., 90.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60.png' )
hs.setSimulation( (30., 90., 120., 180.) )
hs.simulate()
hs.plot( shp+'_'+wpn+'_30_60_30_60.png' )
print( ' '+shp+' with '+wpn+' done!' )
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/mpl_examples/pylab_examples/font_table_ttf.py | 3 | 1771 | #!/usr/bin/env python
# -*- noplot -*-
"""
matplotlib has support for freetype fonts. Here's a little example
using the 'table' command to build a font table that shows the glyphs
by character code.
Usage python font_table_ttf.py somefile.ttf
"""
import sys
import os
import matplotlib
from matplotlib.ft2font import FT2Font
from matplotlib.font_manager import FontProperties
from pylab import figure, table, show, axis, title
import six
from six import unichr
# the font table grid
labelc = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F']
labelr = ['00', '10', '20', '30', '40', '50', '60', '70', '80', '90',
'A0', 'B0', 'C0', 'D0', 'E0', 'F0']
if len(sys.argv) > 1:
fontname = sys.argv[1]
else:
fontname = os.path.join(matplotlib.get_data_path(),
'fonts', 'ttf', 'Vera.ttf')
font = FT2Font(fontname)
codes = list(font.get_charmap().items())
codes.sort()
# a 16,16 array of character strings
chars = [['' for c in range(16)] for r in range(16)]
colors = [[(0.95, 0.95, 0.95) for c in range(16)] for r in range(16)]
figure(figsize=(8, 4), dpi=120)
for ccode, glyphind in codes:
if ccode >= 256:
continue
r, c = divmod(ccode, 16)
s = unichr(ccode)
chars[r][c] = s
lightgrn = (0.5, 0.8, 0.5)
title(fontname)
tab = table(cellText=chars,
rowLabels=labelr,
colLabels=labelc,
rowColours=[lightgrn]*16,
colColours=[lightgrn]*16,
cellColours=colors,
cellLoc='center',
loc='upper left')
for key, cell in tab.get_celld().items():
row, col = key
if row > 0 and col > 0:
cell.set_text_props(fontproperties=FontProperties(fname=fontname))
axis('off')
show()
| mit |
nesterione/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
velezj/ml-chats | time_series/code/time_series/periodicity.py | 1 | 16504 | #####
## This file is subject to the terms and conditions defined in
## file 'LICENSE', which is part of this source code package.
####
import logging
logger = logging.getLogger( __name__ )
import copy
import math
import numpy as np
from sklearn import gaussian_process
import autocorrelation
import categorical_distribution
##=======================================================================
##
# Estimate the period of a sequence using the autocorrelation.
# Returns a distirbution over the period (cat_distribution)
def period_distribution( x,
prior = categorical_distribution.cat_distibution({}) ):
# first, calculate autocorrelation
ac = autocorrelation.autocorrelation_estimate( x )
# Ok, now ignore first datapoint and take the top 5 percent of the
# height
n = len(x)
height_thresh = 2 * np.std( ac[1:n/2] )
height_thresh = 0.5 * max( ac[1:n/2] )
# now find continuous ranges with value above the threhold
ranges = [] # [start,end) intervals
current_start = None
for i, a in enumerate(ac[1:]):
idx = i + 1
if a >= height_thresh:
# add to range if we have one
if current_start is not None:
pass
elif current_start is None:
current_start = idx
else:
# stop range if we were accruing one
if current_start is not None:
ranges.append( (current_start, idx ) )
current_start = None
# Ok, if we have no ranges then there is no period
if len(ranges) < 2:
return categorical_distribution.cat_distibution( { 0: 1.0 } )
# ok, grab the midpoint of each range
mids = map(lambda (a,b): a + (b-a) / 2 , ranges )
# Ok, calculate probability for different period lengths
counts = {}
for a,b in zip(mids,mids[1:]):
k = b - a
if k not in counts:
counts[ k ] = 0.0
counts[ k ] += 1.0
# Ok, add prior counts
for k,c in prior.counts.iteritems():
counts[ k ] += c
# return the distribution
return categorical_distribution.cat_distibution( counts )
##=======================================================================
##
# Returns an estimate of the period along with a value with how likely
# the signal is actually periodic with the given period
def estimate_period( x ):
n = len(x)
# Ok, build up a prior over the period given the signal length
prior = categorical_distribution.cat_distibution( {} )
# compute the period distribution
period_dist = period_distribution( x, prior=prior )
# ok, judge how well we think the signal is actually periodic
period_interval, mass = period_dist.credible_interval( 0.5 )
# too large an interval means no
if period_interval[1] - period_interval[0] > 3:
return None, 0.0
# If the average period < 2 return none
avg_period = period_interval[0] + ( period_interval[1] - period_interval[0] ) / 2.0
if avg_period < 2:
return None, 0.0
# widen the period interval by one to either side if it is a point interval
if period_interval[0] == period_interval[1]:
period_interval = ( period_interval[0] - 1.0,
period_interval[1] + 1.0 )
# Ok, we have a peak but did we find enough of hte peaks according to
# the raw counts
num_expected_peaks = int( math.floor( n / avg_period ) )
num_found_peaks = 0
for k, c in period_dist.counts.iteritems():
if k >= period_interval[0] and k <= period_interval[1]:
num_found_peaks += c
found_peak_thresh = 0.75
if float(num_found_peaks) / num_expected_peaks < found_peak_thresh:
return None, 0.0
# Ok, we have found enough of hte peaks, so let's calculate the probability
max_periods = set([])
max_count = None
for k, c in period_dist.counts.iteritems():
if k >= period_interval[0] and k <= period_interval[1]:
if max_count is None or c > max_count:
max_count = c
max_periods = set( [k] )
elif c == max_count:
max_periods.add( k )
period_est = sorted(max_periods)[ len(max_periods) / 2 ]
return ( period_est, period_dist.pmf( period_est ) )
##=======================================================================
##
# Given a sequence x and a period distribution, renormalizes the time
# so get a set of sequences for each period. This will infer the
# best points in time for the start of each period.
# The return will be the start-of-period indices for the sequence
#
# @param x : A sequence
# @param period_dist : a cat_distibution with the period distribution
# @param noise_sigma : noise level in the sequence x values.
# If None this is estimated from the signal itself
def start_of_period_fit( x, period_dist, noise_sigma = None ):
# Initialize the start-of-periods (sop) by finding first peak in
# autocorrelation and using hte period distribution
n = len(x)
ac = autocorrelation.autocorrelation_estimate( x )
height_thresh = 0.5 * max( ac[1:n/2] )
init_start = 0
for i,a in enumerate(ac):
if a >= height_thresh:
init_start = i
break
# build up the rest of the period starts from the initial and
# the given period distribution
sops = [ init_start ]
mode = list(period_dist.mode())
mode_period = mode[ len(mode)/2]
if mode_period <= 0:
raise RuntimeError( "Period Distribution has mode which include non-positive values! {0}".format( period_dist ) )
while True:
new_sop = sops[-1] + mode_period
if new_sop >= n:
break
sops.append( new_sop )
logger.info( "Initial SOPs: {0}".format( sops ) )
# estimate hte noise sigma if wanted
if noise_sigma is None:
noise_sigma = estimate_periodic_signal_noise_sigma( x, period_dist )
logger.info( "Noise Sigma: {0}".format( noise_sigma ) )
# the score function for a particular set of sops
# higher is better
def _score( sops ):
# having a single sop is an edge case
# treat it as if the last or first is a sop
if len(sops) == 1:
if sops[0] < n/2:
sops = sops + [n]
else:
sops = [0] + sops
# everything before first sop is discarded for now
# ok, renormalize time based on sops
y_data = []
y_time = []
steps = []
for sop0, sop1 in zip( sops, sops[1:] ):
y_slice = list( x[ sop0 : sop1 ] )
y_data += y_slice
y_time += list(np.linspace( 0.0, 1.0, len(y_slice) ))
steps.append( len(y_slice ) )
# ok, add in things before sops[0] and after sops[-1]
# where we will do time step adjustment according to the
# mean time steps in the slices above
mean_step = int(np.mean( steps ))
step_lookup = np.linspace( 0.0, 1.0, max( mean_step, sops[0]) )
for i,y in enumerate( x[:sops[0]] ):
time = 1.0 - step_lookup[ sops[0] - i - 1 ]
y_data.insert( 0, y )
y_time.insert( 0, time )
step_lookup = np.linspace( 0.0, 1.0, max( mean_step, n - sops[-1]) )
for i,y in enumerate( x[sops[-1] : ] ):
time = step_lookup[ i ]
y_data.append( y )
y_time.append( time )
# jitter time to make sure they are unique :-)
y_time = map(lambda t: t + np.random.random() * 1.0e-5, y_time )
# Ok, now that we have the renomalized time, treat data as
# 2D data and fit a GP to it :-)
nugget = map(lambda y: ( noise_sigma / y ) ** 2, y_data )
gp = gaussian_process.GaussianProcess(nugget=nugget)
gp.fit( np.array(y_time).reshape(-1,1), y_data )
# Ok compute the likelihood of the fit
# p( y | X, w ) = N( X_T * w , sigma^2 * I )
# where X is training data and w is learned weights of GP
# and sigma is the kernel sigma
#return gp.reduced_likelihood_function_value_
return gp.score( np.array(y_time).reshape(-1,1), y_data)
# Ok, we will do a gradient descent algorithm to find hte best sops
max_lik_sops, max_lik = _gradient_descent_sops(
_score,
sops,
n,
max_iters = 10 * len(sops),
num_restarts = 2 * len(sops))
return max_lik_sops, max_lik, _score
##=======================================================================
##
# Perform random 1-step changes to the SOPs with a given likelihood/score
# function (higher is better) to find hte maximum scored set of SOPs
#
# Returns the max SOPs and the max score found.
#
# Will restart with teh *same* initial SOP num_restarts times,
# and each restart will try at most max_iters single-step changes to the
# SOP. Each generation/restart ends when we hit a local maxima
#
# @param lik : a function f( sops ) returning the likelihood or score for a
# SOP. Higher is better
# @param init_sops : the initial SOP for all restarts.
# @param n : the maximum data size to cap SOPs at.
# @param max_iters : the maximum number of 1-step changes to try per restart
# @param num_restarts : the number of restarts to run
def _gradient_descent_sops(
lik,
init_sops,
n,
max_iters = 100,
num_restarts = 10 ):
generation_sops = []
generation_liks = []
generation_stats = []
# look over each restart
for r in xrange(num_restarts):
# start at initial sops always
sops = init_sops
max_lik = lik( sops )
logger.info( "[{0}] SOPs GD: init lik = {1}".format( r, max_lik ) )
# Ok, iterate to find max lik sop
for i in xrange(max_iters):
# pick a random sop to shift
sop_to_explore_idx = np.random.choice(len(sops))
logger.info( "[{0}] SOPs GD: explore index = {1}".format( r, sop_to_explore_idx ) )
# ok, step to either side
a_sops = copy.deepcopy(sops)
b_sops = copy.deepcopy(sops)
a_sops[ sop_to_explore_idx ] -= 1
if a_sops[ sop_to_explore_idx ] < 0:
a_sops[ sop_to_explore_idx ] = 0
b_sops[ sop_to_explore_idx ] += 1
if b_sops[ sop_to_explore_idx ] >= n:
b_sops[ sop_to_explore_idx ] = n-1
# Renormalize by removing redundant sops
if sop_to_explore_idx > 0 and a_sops[ sop_to_explore_idx ] == a_sops[ sop_to_explore_idx -1 ]:
del a_sops[ sop_to_explore_idx ]
if sop_to_explore_idx < len(sops) - 1 and b_sops[ sop_to_explore_idx ] == b_sops[ sop_to_explore_idx + 1 ]:
del b_sops[ sop_to_explore_idx ]
# calculate new likelihoods
a_lik = lik( a_sops )
b_lik = lik( b_sops )
# keep highest between current a and b likelihoods
if a_lik >= max_lik and a_lik >= b_lik:
max_lik = a_lik
sops = a_sops
elif b_lik >= max_lik and b_lik > a_lik:
max_lik = b_lik
sops = b_sops
else:
# we are done with this generation
break
# add the best found to generation
generation_liks.append( max_lik )
generation_sops.append( sops )
generation_stats.append( { 'iter' : i } )
logger.info( "[{0}] SOPs GD: generation max in {1} iterations = {2} {3}".format(
r,
i,
max_lik,
sops ) )
# ok, no do a last ordered pass to tune the sops
sops = generation_sops[0]
max_lik = generation_liks[0]
for s,l in zip( generation_sops, generation_liks ):
if l >max_lik:
max_lik = l
sops = s
gen_lik = max_lik
for i in xrange(max_iters):
for sop_idx in xrange(len(sops)):
# ok, step to either side
a_sops = copy.deepcopy(sops)
b_sops = copy.deepcopy(sops)
a_sops[ sop_idx ] -= 1
if a_sops[ sop_idx ] < 0:
a_sops[ sop_idx ] = 0
b_sops[ sop_idx ] += 1
if b_sops[ sop_idx ] >= n:
b_sops[ sop_idx ] = n-1
# Renormalize by removing redundant sops
if sop_idx > 0 and a_sops[ sop_idx ] == a_sops[ sop_idx -1 ]:
del a_sops[ sop_idx ]
if sop_idx < len(sops) - 1 and b_sops[ sop_idx ] == b_sops[ sop_idx + 1 ]:
del b_sops[ sop_idx ]
# calculate new likelihoods
a_lik = lik( a_sops )
b_lik = lik( b_sops )
# keep highest between current a and b likelihoods
if a_lik >= max_lik and a_lik >= b_lik:
max_lik = a_lik
sops = a_sops
elif b_lik >= max_lik and b_lik > a_lik:
max_lik = b_lik
sops = b_sops
new_gen_lik = lik( sops )
logger.info( "Generation {0} tuned likelihood = {1}".format(i,new_gen_lik))
if gen_lik >= new_gen_lik:
break
else:
gen_lik = new_gen_lik
# ok, return best generation
return sops, max_lik
##=======================================================================
##
# Estimate the noise sigma floor fo a periodic signal
# given the period distribution for the period
def estimate_periodic_signal_noise_sigma( x, period_dist ):
n = len(x)
# we will calcualte a sigma for each period
sigmas = {}
# iterate over known period in domain of distribution
for period in period_dist.counts:
# iterate over possible peior start points (start-of-period)
sop_variances = []
for sop in xrange(period):
# calcualte noise sigma for this sop and period
variances = []
for i in xrange(period):
data = []
cur_index = i
while cur_index < n:
data.append( x[cur_index] )
cur_index += period
variances.append( np.var( data ) )
# calculate the average variance
mean_var = np.mean( variances )
# store for this sop
sop_variances.append( mean_var )
# Ok, grab the *smallest* mean variance for any SOP as the
# mean variance for that perior
min_var = np.min( sop_variances )
# store sigma for this period
sigmas[ period ] = np.sqrt( min_var )
# Ok, compute the expected sigma by using hte period distribution
e_sigma = 0.0
for period,s in sigmas.iteritems():
p = period_dist.pmf( period )
e_sigma += ( p * s )
# return the expeted sigma
return e_sigma
##=======================================================================
def plot_sops( x, sops ):
import matplotlib.pyplot as plt
plt.figure()
plt.plot( x, 'b-' )
plt.plot( sops, np.array(x)[ sops ], 'rx', ms=10 )
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
##=======================================================================
| apache-2.0 |
sunny94/temp | sympy/plotting/plot.py | 14 | 64512 | """Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from itertools import chain
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set.union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set.union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.