repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
ioam/holoviews | holoviews/tests/plotting/bokeh/testcurveplot.py | 2 | 18479 | import datetime as dt
from unittest import skipIf
import numpy as np
from holoviews.core import NdOverlay, HoloMap, DynamicMap
from holoviews.core.options import Cycle, Palette
from holoviews.core.util import pd, basestring
from holoviews.element import Curve
from holoviews.plotting.util import rgb2hex
from holoviews.streams import PointerX
from .testplot import TestBokehPlot, bokeh_renderer
try:
from bokeh.models import FactorRange, FixedTicker
from holoviews.plotting.bokeh.callbacks import Callback, PointerXCallback
except:
pass
pd_skip = skipIf(pd is None, 'Pandas not available')
class TestCurvePlot(TestBokehPlot):
def test_batched_curve_subscribers_correctly_attached(self):
posx = PointerX()
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = DynamicMap(lambda x: NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts), kdims=[],
streams=[posx])
plot = bokeh_renderer.get_plot(overlay)
self.assertIn(plot.refresh, posx.subscribers)
self.assertNotIn(list(plot.subplots.values())[0].refresh, posx.subscribers)
def test_batched_curve_subscribers_correctly_linked(self):
# Checks if a stream callback is created to link batched plot
# to the stream
posx = PointerX()
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = DynamicMap(lambda x: NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts), kdims=[],
streams=[posx])
plot = bokeh_renderer.get_plot(overlay)
self.assertEqual(len(Callback._callbacks), 1)
key = list(Callback._callbacks.keys())[0]
self.assertEqual(key, (id(plot.handles['plot']), id(PointerXCallback)))
def test_cyclic_palette_curves(self):
palette = Palette('Set1')
opts = dict(color=palette)
hmap = HoloMap({i: NdOverlay({j: Curve(np.random.rand(3)).opts(style=opts)
for j in range(3)})
for i in range(3)})
colors = palette[3].values
plot = bokeh_renderer.get_plot(hmap)
for subp, color in zip(plot.subplots.values(), colors):
color = color if isinstance(color, basestring) else rgb2hex(color)
self.assertEqual(subp.handles['glyph'].line_color, color)
def test_batched_curve_line_color_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_color=Cycle(values=['red', 'blue'])))}
overlay = NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_color = ['red', 'blue']
self.assertEqual(plot.handles['source'].data['line_color'], line_color)
def test_batched_curve_alpha_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(alpha=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
alpha = [0.5, 1.]
color = ['#30a2da', '#fc4f30']
self.assertEqual(plot.handles['source'].data['alpha'], alpha)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_batched_curve_line_width_and_color(self):
opts = {'NdOverlay': dict(plot=dict(legend_limit=0)),
'Curve': dict(style=dict(line_width=Cycle(values=[0.5, 1])))}
overlay = NdOverlay({i: Curve([(i, j) for j in range(2)])
for i in range(2)}).opts(opts)
plot = bokeh_renderer.get_plot(overlay).subplots[()]
line_width = [0.5, 1.]
color = ['#30a2da', '#fc4f30']
self.assertEqual(plot.handles['source'].data['line_width'], line_width)
self.assertEqual(plot.handles['source'].data['color'], color)
def test_curve_overlay_datetime_hover(self):
obj = NdOverlay({i: Curve([(dt.datetime(2016, 1, j+1), j) for j in range(31)]) for i in range(5)},
kdims=['Test'])
opts = {'Curve': {'tools': ['hover']}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x_dt_strings}'), ('y', '@{y}')])
def test_curve_overlay_hover_batched(self):
obj = NdOverlay({i: Curve(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Curve': {'tools': ['hover']},
'NdOverlay': {'legend_limit': 0}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}')], 'prev')
def test_curve_overlay_hover(self):
obj = NdOverlay({i: Curve(np.random.rand(10,2)) for i in range(5)},
kdims=['Test'])
opts = {'Curve': {'tools': ['hover']}}
obj = obj(plot=opts)
self._test_hover_info(obj, [('Test', '@{Test}'), ('x', '@{x}'), ('y', '@{y}')], 'nearest')
def test_curve_categorical_xaxis(self):
curve = Curve((['A', 'B', 'C'], [1,2,3]))
plot = bokeh_renderer.get_plot(curve)
x_range = plot.handles['x_range']
self.assertIsInstance(x_range, FactorRange)
self.assertEqual(x_range.factors, ['A', 'B', 'C'])
def test_curve_categorical_xaxis_invert_axes(self):
curve = Curve((['A', 'B', 'C'], (1,2,3))).opts(plot=dict(invert_axes=True))
plot = bokeh_renderer.get_plot(curve)
y_range = plot.handles['y_range']
self.assertIsInstance(y_range, FactorRange)
self.assertEqual(y_range.factors, ['A', 'B', 'C'])
def test_curve_datetime64(self):
dates = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 10)))
@pd_skip
def test_curve_pandas_timestamps(self):
dates = pd.date_range('2016-01-01', '2016-01-10', freq='D')
curve = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 10)))
def test_curve_dt_datetime(self):
dates = [dt.datetime(2016,1,i) for i in range(1, 11)]
curve = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 10)))
def test_curve_heterogeneous_datetime_types_overlay(self):
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve_dt*curve_dt64)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 11)))
@pd_skip
def test_curve_heterogeneous_datetime_types_with_pd_overlay(self):
dates_pd = pd.date_range('2016-01-04', '2016-01-13', freq='D')
dates64 = [np.datetime64(dt.datetime(2016,1,i)) for i in range(1, 11)]
dates = [dt.datetime(2016,1,i) for i in range(2, 12)]
curve_dt64 = Curve((dates64, np.random.rand(10)))
curve_dt = Curve((dates, np.random.rand(10)))
curve_pd = Curve((dates_pd, np.random.rand(10)))
plot = bokeh_renderer.get_plot(curve_dt*curve_dt64*curve_pd)
self.assertEqual(plot.handles['x_range'].start, np.datetime64(dt.datetime(2016, 1, 1)))
self.assertEqual(plot.handles['x_range'].end, np.datetime64(dt.datetime(2016, 1, 13)))
def test_curve_fontsize_xlabel(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'xlabel': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].axis_label_text_font_size,
'14pt')
def test_curve_fontsize_ylabel(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'ylabel': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['yaxis'].axis_label_text_font_size,
'14pt')
def test_curve_fontsize_both_labels(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'labels': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].axis_label_text_font_size,
'14pt')
self.assertEqual(plot.handles['yaxis'].axis_label_text_font_size,
'14pt')
def test_curve_fontsize_xticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'xticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_yticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'yticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['yaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_both_ticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'ticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].major_label_text_font_size,
{'value': '14pt'})
self.assertEqual(plot.handles['yaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_fontsize_xticks_and_both_ticks(self):
curve = Curve(range(10)).opts(plot=dict(fontsize={'xticks': '18pt', 'ticks': '14pt'}))
plot = bokeh_renderer.get_plot(curve)
self.assertEqual(plot.handles['xaxis'].major_label_text_font_size,
{'value': '18pt'})
self.assertEqual(plot.handles['yaxis'].major_label_text_font_size,
{'value': '14pt'})
def test_curve_xticks_list(self):
curve = Curve(range(10)).opts(plot=dict(xticks=[0, 5, 10]))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.xaxis[0].ticker, FixedTicker)
self.assertEqual(plot.xaxis[0].ticker.ticks, [0, 5, 10])
def test_curve_xticks_list_of_tuples_xaxis(self):
ticks = [(0, 'zero'), (5, 'five'), (10, 'ten')]
curve = Curve(range(10)).opts(plot=dict(xticks=ticks))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.xaxis[0].ticker, FixedTicker)
self.assertEqual(plot.xaxis[0].major_label_overrides, dict(ticks))
def test_curve_yticks_list(self):
curve = Curve(range(10)).opts(plot=dict(yticks=[0, 5, 10]))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.yaxis[0].ticker, FixedTicker)
self.assertEqual(plot.yaxis[0].ticker.ticks, [0, 5, 10])
def test_curve_xticks_list_of_tuples_yaxis(self):
ticks = [(0, 'zero'), (5, 'five'), (10, 'ten')]
curve = Curve(range(10)).opts(plot=dict(yticks=ticks))
plot = bokeh_renderer.get_plot(curve).state
self.assertIsInstance(plot.yaxis[0].ticker, FixedTicker)
self.assertEqual(plot.yaxis[0].major_label_overrides, dict(ticks))
def test_curve_padding_square(self):
curve = Curve([1, 2, 3]).options(padding=0.1)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.2)
self.assertEqual(x_range.end, 2.2)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_square_per_axis(self):
curve = Curve([1, 2, 3]).options(padding=((0, 0.1), (0.1, 0.2)))
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0)
self.assertEqual(x_range.end, 2.2)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.4)
def test_curve_padding_hard_xrange(self):
curve = Curve([1, 2, 3]).redim.range(x=(0, 3)).options(padding=0.1)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0)
self.assertEqual(x_range.end, 3)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_soft_xrange(self):
curve = Curve([1, 2, 3]).redim.soft_range(x=(0, 3)).options(padding=0.1)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.2)
self.assertEqual(x_range.end, 3)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_unequal(self):
curve = Curve([1, 2, 3]).options(padding=(0.05, 0.1))
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.1)
self.assertEqual(x_range.end, 2.1)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_nonsquare(self):
curve = Curve([1, 2, 3]).options(padding=0.1, width=600)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.1)
self.assertEqual(x_range.end, 2.1)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_logx(self):
curve = Curve([(1, 1), (2, 2), (3,3)]).options(padding=0.1, logx=True)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, 0.89595845984076228)
self.assertEqual(x_range.end, 3.3483695221017129)
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_logy(self):
curve = Curve([1, 2, 3]).options(padding=0.1, logy=True)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, -0.2)
self.assertEqual(x_range.end, 2.2)
self.assertEqual(y_range.start, 0.89595845984076228)
self.assertEqual(y_range.end, 3.3483695221017129)
def test_curve_padding_datetime_square(self):
curve = Curve([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1
)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, np.datetime64('2016-03-31T19:12:00.000000000'))
self.assertEqual(x_range.end, np.datetime64('2016-04-03T04:48:00.000000000'))
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
def test_curve_padding_datetime_nonsquare(self):
curve = Curve([(np.datetime64('2016-04-0%d' % i), i) for i in range(1, 4)]).options(
padding=0.1, width=600
)
plot = bokeh_renderer.get_plot(curve)
x_range, y_range = plot.handles['x_range'], plot.handles['y_range']
self.assertEqual(x_range.start, np.datetime64('2016-03-31T21:36:00.000000000'))
self.assertEqual(x_range.end, np.datetime64('2016-04-03T02:24:00.000000000'))
self.assertEqual(y_range.start, 0.8)
self.assertEqual(y_range.end, 3.2)
###########################
# Styling mapping #
###########################
def test_curve_scalar_color_op(self):
curve = Curve([(0, 0, 'red'), (0, 1, 'red'), (0, 2, 'red')],
vdims=['y', 'color']).options(color='color')
plot = bokeh_renderer.get_plot(curve)
glyph = plot.handles['glyph']
self.assertEqual(glyph.line_color, 'red')
def test_op_ndoverlay_color_value(self):
colors = ['blue', 'red']
overlay = NdOverlay({color: Curve(np.arange(i))
for i, color in enumerate(colors)},
'color').options('Curve', color='color')
plot = bokeh_renderer.get_plot(overlay)
for subplot, color in zip(plot.subplots.values(), colors):
style = dict(subplot.style[subplot.cyclic_index])
style = subplot._apply_transforms(subplot.current_frame, {}, {}, style)
self.assertEqual(style['color'], color)
def test_curve_color_op(self):
curve = Curve([(0, 0, 'red'), (0, 1, 'blue'), (0, 2, 'red')],
vdims=['y', 'color']).options(color='color')
with self.assertRaises(Exception):
bokeh_renderer.get_plot(curve)
def test_curve_alpha_op(self):
curve = Curve([(0, 0, 0.1), (0, 1, 0.3), (0, 2, 1)],
vdims=['y', 'alpha']).options(alpha='alpha')
with self.assertRaises(Exception):
bokeh_renderer.get_plot(curve)
def test_curve_line_width_op(self):
curve = Curve([(0, 0, 0.1), (0, 1, 0.3), (0, 2, 1)],
vdims=['y', 'linewidth']).options(line_width='linewidth')
with self.assertRaises(Exception):
bokeh_renderer.get_plot(curve)
| bsd-3-clause |
returnandrisk/meucci-python | projection_pricing_aggregation.py | 1 | 4119 | """
Python code for blog post "mini-Meucci : Applying The Checklist - Steps 3-5"
http://www.returnandrisk.com/2016/06/mini-meucci-applying-checklist-steps-3-5.html
Copyright (c) 2016 Peter Chan (peter-at-return-and-risk-dot-com)
"""
#%matplotlib inline
from pandas_datareader import data
import numpy as np
import datetime
import math
import matplotlib.pyplot as plt
import seaborn
# Get Yahoo data on 30 DJIA stocks and a few ETFs
tickers = ['MMM','AXP','AAPL','BA','CAT','CVX','CSCO','KO','DD','XOM','GE','GS',
'HD','INTC','IBM','JNJ','JPM','MCD','MRK','MSFT','NKE','PFE','PG',
'TRV','UNH','UTX','VZ','V','WMT','DIS','SPY','DIA','TLT','SHY']
start = datetime.datetime(2008, 4, 1)
end = datetime.datetime(2016, 5, 31)
rawdata = data.DataReader(tickers, 'yahoo', start, end)
prices = rawdata.to_frame().unstack(level=1)['Adj Close']
###############################################################################
# Quest for Invariance (random walk model) and Estimation (historical approach)
###############################################################################
risk_drivers = np.log(prices)
# Set estimation interval = investment horizon (tau)
tau = 21 # investment horizon in days
invariants = risk_drivers.diff(tau).drop(risk_drivers.index[0:tau])
###############################################################################
# Projection to the Investment Horizon
###############################################################################
# Using the historical simulation approach and setting estimation interval =
# investment horizon, means that projected invariants = invariants
# Recover the projected scenarios for the risk drivers at the tau-day horizon
risk_drivers_prjn = risk_drivers.loc[end,:] + invariants
###############################################################################
# Pricing at the Investment Horizon
###############################################################################
# Compute the projected $ P&L per unit of each stock for all scenarios
prices_prjn = np.exp(risk_drivers_prjn)
pnl = prices_prjn - prices.loc[end,:]
###############################################################################
# Aggregation at the Investment Horizon
###############################################################################
# Aggregate the individual stock P&Ls into projected portfolio P&L for all scenarios
# Assume equally weighted protfolio at beginning of investment period
capital = 1e6
n_asset = 30
asset_tickers = tickers[0:30]
asset_weights = np.ones(n_asset) / n_asset
# initial holdings ie number of shares
h0 = capital * asset_weights / prices.loc[end, asset_tickers]
pnl_portfolio = np.dot(pnl.loc[:, asset_tickers], h0)
# Apply flexible probabilities to portfolio P&L scenarios
n_scenarios = len(pnl_portfolio)
# Equal probs
equal_probs = np.ones(n_scenarios) / n_scenarios
# Time-conditioned flexible probs with exponential decay
half_life = 252 * 2 # half life of 2 years
es_lambda = math.log(2) / half_life
exp_probs = np.exp(-es_lambda * (np.arange(0, n_scenarios)[::-1]))
exp_probs = exp_probs / sum(exp_probs)
# effective number of scenarios
ens_exp_probs = np.exp(sum(-exp_probs * np.log(exp_probs)))
# Projected Distribution of Portfolio P&L at Horizon with flexible probabilities
import rnr_meucci_functions as rnr
mu_port, sigma2_port = rnr.fp_mean_cov(pnl_portfolio.T, equal_probs)
mu_port_e, sigma2_port_e = rnr.fp_mean_cov(pnl_portfolio.T, exp_probs)
print('Ex-ante portfolio $P&L mean over horizon (equal probs) : {:,.0f}'.format(mu_port))
print('Ex-ante portfolio $P&L volatility over horizon (equal probs) : {:,.0f}'.format(np.sqrt(sigma2_port)))
print('')
print('Ex-ante portfolio $P&L mean over horizon (flex probs) : {:,.0f}'.format(mu_port_e))
print('Ex-ante portfolio $P&L volatility over horizon (flex probs) : {:,.0f}'.format(np.sqrt(sigma2_port_e)))
fig = plt.figure(figsize=(9, 8))
ax = fig.add_subplot(111)
ax.hist(pnl_portfolio, 50, weights=exp_probs)
ax.set_title('Ex-ante Distribution of Portfolio P&L (flexbile probabilities with exponential decay)')
plt.show()
| mit |
rrohan/scikit-learn | sklearn/linear_model/bayes.py | 220 | 15248 | """
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/utils/testing.py | 2 | 4641 | """Testing utilities."""
# Copyright (c) 2011 Pietro Berkes
# License: Simplified BSD
import inspect
import pkgutil
import urllib2
from StringIO import StringIO
import scipy as sp
import sklearn
from sklearn.base import BaseEstimator
from .fixes import savemat
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
from nose.tools import assert_true, assert_false
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def fake_mldata_cache(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set in the cache_path.
Parameters
----------
columns_dict: contains data as
columns_dict[column_name] = array of data
dataname: name of data set
matfile: file-like object or file name
ordering: list of column_names, determines the ordering in the data set
Note: this function transposes all arrays, while fetch_mldata only
transposes 'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
savemat(matfile, datasets, oned_as='column')
class mock_urllib2(object):
def __init__(self, mock_datasets):
"""Object that mocks the urllib2 module to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata_cache` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an URLError.
"""
self.mock_datasets = mock_datasets
class HTTPError(urllib2.URLError):
code = 404
def urlopen(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
matfile = StringIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata_cache(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise mock_urllib2.HTTPError('%s not found.' % urlname)
def quote(self, string, safe='/'):
return urllib2.quote(string, safe)
def all_estimators():
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(path=path,
prefix='sklearn.', onerror=lambda x: None):
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
# get rid of abstract base classes
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes if issubclass(c[1], BaseEstimator)]
estimators = [c for c in estimators if not is_abstract(c[1])]
# We sort in order to have reproducible test failures
return sorted(estimators)
| agpl-3.0 |
gef756/scipy | scipy/cluster/tests/test_hierarchy.py | 26 | 35153 | #! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_)
from scipy._lib.six import xrange, u
import scipy.cluster.hierarchy
from scipy.cluster.hierarchy import (
linkage, from_mlab_linkage, to_mlab_linkage, num_obs_linkage, inconsistent,
cophenet, fclusterdata, fcluster, is_isomorphic, single, leaders,
correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette)
from scipy.spatial.distance import pdist
import hierarchy_test_data
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
Z[i//2,3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
R = inconsistent(Z)
R[i//2,2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0,4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i*(i-1)//2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i*(i-1)//2)
y2 = np.random.rand(j*(j-1)//2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2,2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(111)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
plt.close()
assert_equal(R1, expected)
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n-1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._cpy_euclid_methods:
assert_raises(ValueError,
linkage, [[1, 1], [1, 1]], method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
bsipocz/AstroHackWeek2015 | day3-machine-learning/plots/plot_2d_separator.py | 41 | 1513 | import numpy as np
import matplotlib.pyplot as plt
def plot_2d_separator(classifier, X, fill=False, ax=None, eps=None):
if eps is None:
eps = X.std() / 2.
x_min, x_max = X[:, 0].min() - eps, X[:, 0].max() + eps
y_min, y_max = X[:, 1].min() - eps, X[:, 1].max() + eps
xx = np.linspace(x_min, x_max, 100)
yy = np.linspace(y_min, y_max, 100)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
try:
decision_values = classifier.decision_function(X_grid)
levels = [0]
fill_levels = [decision_values.min(), 0, decision_values.max()]
except AttributeError:
# no decision_function
decision_values = classifier.predict_proba(X_grid)[:, 1]
levels = [.5]
fill_levels = [0, .5, 1]
if ax is None:
ax = plt.gca()
if fill:
ax.contourf(X1, X2, decision_values.reshape(X1.shape),
levels=fill_levels, colors=['blue', 'red'])
else:
ax.contour(X1, X2, decision_values.reshape(X1.shape), levels=levels,
colors="black")
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_xticks(())
ax.set_yticks(())
if __name__ == '__main__':
from sklearn.datasets import make_blobs
from sklearn.linear_model import LogisticRegression
X, y = make_blobs(centers=2, random_state=42)
clf = LogisticRegression().fit(X, y)
plot_2d_separator(clf, X, fill=True)
plt.scatter(X[:, 0], X[:, 1], c=y)
plt.show()
| gpl-2.0 |
fengzhyuan/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
MohammedWasim/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
dingocuster/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
EliHar/Pattern_recognition | openface1/evaluation/lfw.py | 10 | 10846 | #!/usr/bin/env python2
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This implements the standard LFW verification experiment.
import math
import numpy as np
import pandas as pd
from scipy.interpolate import interp1d
from sklearn.cross_validation import KFold
from sklearn.metrics import accuracy_score
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
import os
import sys
import argparse
from scipy import arange
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'tag', type=str, help='The label/tag to put on the ROC curve.')
parser.add_argument('workDir', type=str,
help='The work directory with labels.csv and reps.csv.')
pairsDefault = os.path.expanduser("~/openface/data/lfw/pairs.txt")
parser.add_argument('--lfwPairs', type=str,
default=os.path.expanduser(
"~/openface/data/lfw/pairs.txt"),
help='Location of the LFW pairs file from http://vis-www.cs.umass.edu/lfw/pairs.txt')
args = parser.parse_args()
if not os.path.isfile(args.lfwPairs):
print("""
Error in LFW evaluation code. (Source: <openface>/evaluation/lfw.py)
The LFW evaluation requires a file containing pairs of faces to evaluate.
Download this file from http://vis-www.cs.umass.edu/lfw/pairs.txt
and place it in the default location ({})
or pass it as --lfwPairs.
""".format(pairsDefault))
sys.exit(-1)
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
paths = pd.read_csv(fname, header=None).as_matrix()[:, 1]
paths = map(os.path.basename, paths) # Get the filename.
# Remove the extension.
paths = map(lambda path: os.path.splitext(path)[0], paths)
fname = "{}/reps.csv".format(args.workDir)
rawEmbeddings = pd.read_csv(fname, header=None).as_matrix()
embeddings = dict(zip(*[paths, rawEmbeddings]))
pairs = loadPairs(args.lfwPairs)
verifyExp(args.workDir, pairs, embeddings)
plotVerifyExp(args.workDir, args.tag)
def loadPairs(pairsFname):
print(" + Reading pairs.")
pairs = []
with open(pairsFname, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
pairs.append(pair)
assert(len(pairs) == 6000)
return np.array(pairs)
def getEmbeddings(pair, embeddings):
if len(pair) == 3:
name1 = "{}_{}".format(pair[0], pair[1].zfill(4))
name2 = "{}_{}".format(pair[0], pair[2].zfill(4))
actual_same = True
elif len(pair) == 4:
name1 = "{}_{}".format(pair[0], pair[1].zfill(4))
name2 = "{}_{}".format(pair[2], pair[3].zfill(4))
actual_same = False
else:
raise Exception(
"Unexpected pair length: {}".format(len(pair)))
(x1, x2) = (embeddings[name1], embeddings[name2])
return (x1, x2, actual_same)
def writeROC(fname, thresholds, embeddings, pairsTest):
with open(fname, "w") as f:
f.write("threshold,tp,tn,fp,fn,tpr,fpr\n")
tp = tn = fp = fn = 0
for threshold in thresholds:
tp = tn = fp = fn = 0
for pair in pairsTest:
(x1, x2, actual_same) = getEmbeddings(pair, embeddings)
diff = x1 - x2
dist = np.dot(diff.T, diff)
predict_same = dist < threshold
if predict_same and actual_same:
tp += 1
elif predict_same and not actual_same:
fp += 1
elif not predict_same and not actual_same:
tn += 1
elif not predict_same and actual_same:
fn += 1
if tp + fn == 0:
tpr = 0
else:
tpr = float(tp) / float(tp + fn)
if fp + tn == 0:
fpr = 0
else:
fpr = float(fp) / float(fp + tn)
f.write(",".join([str(x)
for x in [threshold, tp, tn, fp, fn, tpr, fpr]]))
f.write("\n")
if tpr == 1.0 and fpr == 1.0:
# No further improvements.
f.write(",".join([str(x)
for x in [4.0, tp, tn, fp, fn, tpr, fpr]]))
return
def getDistances(embeddings, pairsTrain):
list_dist = []
y_true = []
for pair in pairsTrain:
(x1, x2, actual_same) = getEmbeddings(pair, embeddings)
diff = x1 - x2
dist = np.dot(diff.T, diff)
list_dist.append(dist)
y_true.append(actual_same)
return np.asarray(list_dist), np.array(y_true)
def evalThresholdAccuracy(embeddings, pairs, threshold):
distances, y_true = getDistances(embeddings, pairs)
y_predict = np.zeros(y_true.shape)
y_predict[np.where(distances < threshold)] = 1
y_true = np.array(y_true)
accuracy = accuracy_score(y_true, y_predict)
return accuracy, pairs[np.where(y_true != y_predict)]
def findBestThreshold(thresholds, embeddings, pairsTrain):
bestThresh = bestThreshAcc = 0
distances, y_true = getDistances(embeddings, pairsTrain)
for threshold in thresholds:
y_predlabels = np.zeros(y_true.shape)
y_predlabels[np.where(distances < threshold)] = 1
accuracy = accuracy_score(y_true, y_predlabels)
if accuracy >= bestThreshAcc:
bestThreshAcc = accuracy
bestThresh = threshold
else:
# No further improvements.
return bestThresh
return bestThresh
def verifyExp(workDir, pairs, embeddings):
print(" + Computing accuracy.")
folds = KFold(n=6000, n_folds=10, shuffle=False)
thresholds = arange(0, 4, 0.01)
if os.path.exists("{}/accuracies.txt".format(workDir)):
print("{}/accuracies.txt already exists. Skipping processing.".format(workDir))
else:
accuracies = []
with open("{}/accuracies.txt".format(workDir), "w") as f:
f.write('fold, threshold, accuracy\n')
for idx, (train, test) in enumerate(folds):
fname = "{}/l2-roc.fold-{}.csv".format(workDir, idx)
writeROC(fname, thresholds, embeddings, pairs[test])
bestThresh = findBestThreshold(
thresholds, embeddings, pairs[train])
accuracy, pairs_bad = evalThresholdAccuracy(
embeddings, pairs[test], bestThresh)
accuracies.append(accuracy)
f.write('{}, {:0.2f}, {:0.2f}\n'.format(
idx, bestThresh, accuracy))
avg = np.mean(accuracies)
std = np.std(accuracies)
f.write('\navg, {:0.4f} +/- {:0.4f}\n'.format(avg, std))
print(' + {:0.4f}'.format(avg))
def getAUC(fprs, tprs):
sortedFprs, sortedTprs = zip(*sorted(zip(*(fprs, tprs))))
sortedFprs = list(sortedFprs)
sortedTprs = list(sortedTprs)
if sortedFprs[-1] != 1.0:
sortedFprs.append(1.0)
sortedTprs.append(sortedTprs[-1])
return np.trapz(sortedTprs, sortedFprs)
def plotOpenFaceROC(workDir, plotFolds=True, color=None):
fs = []
for i in range(10):
rocData = pd.read_csv("{}/l2-roc.fold-{}.csv".format(workDir, i))
fs.append(interp1d(rocData['fpr'], rocData['tpr']))
x = np.linspace(0, 1, 1000)
if plotFolds:
foldPlot, = plt.plot(x, fs[-1](x), color='grey', alpha=0.5)
else:
foldPlot = None
fprs = []
tprs = []
for fpr in np.linspace(0, 1, 1000):
tpr = 0.0
for f in fs:
v = f(fpr)
if math.isnan(v):
v = 0.0
tpr += v
tpr /= 10.0
fprs.append(fpr)
tprs.append(tpr)
if color:
meanPlot, = plt.plot(fprs, tprs, color=color)
else:
meanPlot, = plt.plot(fprs, tprs)
AUC = getAUC(fprs, tprs)
return foldPlot, meanPlot, AUC
def plotVerifyExp(workDir, tag):
print("Plotting.")
fig, ax = plt.subplots(1, 1)
openbrData = pd.read_csv("comparisons/openbr.v1.1.0.DET.csv")
openbrData['Y'] = 1 - openbrData['Y']
# brPlot = openbrData.plot(x='X', y='Y', legend=True, ax=ax)
brPlot, = plt.plot(openbrData['X'], openbrData['Y'])
brAUC = getAUC(openbrData['X'], openbrData['Y'])
foldPlot, meanPlot, AUC = plotOpenFaceROC(workDir, color='k')
humanData = pd.read_table(
"comparisons/kumar_human_crop.txt", header=None, sep=' ')
humanPlot, = plt.plot(humanData[1], humanData[0])
humanAUC = getAUC(humanData[1], humanData[0])
deepfaceData = pd.read_table(
"comparisons/deepface_ensemble.txt", header=None, sep=' ')
dfPlot, = plt.plot(deepfaceData[1], deepfaceData[0], '--',
alpha=0.75)
deepfaceAUC = getAUC(deepfaceData[1], deepfaceData[0])
# baiduData = pd.read_table(
# "comparisons/BaiduIDLFinal.TPFP", header=None, sep=' ')
# bPlot, = plt.plot(baiduData[1], baiduData[0])
# baiduAUC = getAUC(baiduData[1], baiduData[0])
eigData = pd.read_table(
"comparisons/eigenfaces-original-roc.txt", header=None, sep=' ')
eigPlot, = plt.plot(eigData[1], eigData[0])
eigAUC = getAUC(eigData[1], eigData[0])
ax.legend([humanPlot, dfPlot, brPlot, eigPlot,
meanPlot, foldPlot],
['Human, Cropped [AUC={:.3f}]'.format(humanAUC),
# 'Baidu [{:.3f}]'.format(baiduAUC),
'DeepFace Ensemble [{:.3f}]'.format(deepfaceAUC),
'OpenBR v1.1.0 [{:.3f}]'.format(brAUC),
'Eigenfaces [{:.3f}]'.format(eigAUC),
'OpenFace {} [{:.3f}]'.format(tag, AUC),
'OpenFace {} folds'.format(tag)],
loc='lower right')
plt.plot([0, 1], color='k', linestyle=':')
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
# plt.ylim(ymin=0,ymax=1)
plt.xlim(xmin=0, xmax=1)
plt.grid(b=True, which='major', color='k', linestyle='-')
plt.grid(b=True, which='minor', color='k', linestyle='-', alpha=0.2)
plt.minorticks_on()
# fig.savefig(os.path.join(workDir, "roc.pdf"))
fig.savefig(os.path.join(workDir, "roc.png"))
if __name__ == '__main__':
main()
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/pandas/core/computation/scope.py | 7 | 9040 | """
Module for scope operations
"""
import sys
import struct
import inspect
import datetime
import itertools
import pprint
import numpy as np
import pandas
import pandas as pd # noqa
from pandas.compat import DeepChainMap, map, StringIO
from pandas.core.base import StringMixin
import pandas.core.computation as compu
def _ensure_scope(level, global_dict=None, local_dict=None, resolvers=(),
target=None, **kwargs):
"""Ensure that we are grabbing the correct scope."""
return Scope(level + 1, global_dict=global_dict, local_dict=local_dict,
resolvers=resolvers, target=target)
def _replacer(x):
"""Replace a number with its hexadecimal representation. Used to tag
temporary variables with their calling scope's id.
"""
# get the hex repr of the binary char and remove 0x and pad by pad_size
# zeros
try:
hexin = ord(x)
except TypeError:
# bytes literals masquerade as ints when iterating in py3
hexin = x
return hex(hexin)
def _raw_hex_id(obj):
"""Return the padded hexadecimal id of ``obj``."""
# interpret as a pointer since that's what really what id returns
packed = struct.pack('@P', id(obj))
return ''.join(map(_replacer, packed))
_DEFAULT_GLOBALS = {
'Timestamp': pandas._libs.lib.Timestamp,
'datetime': datetime.datetime,
'True': True,
'False': False,
'list': list,
'tuple': tuple,
'inf': np.inf,
'Inf': np.inf,
}
def _get_pretty_string(obj):
"""Return a prettier version of obj
Parameters
----------
obj : object
Object to pretty print
Returns
-------
s : str
Pretty print object repr
"""
sio = StringIO()
pprint.pprint(obj, stream=sio)
return sio.getvalue()
class Scope(StringMixin):
"""Object to hold scope, with a few bells to deal with some custom syntax
and contexts added by pandas.
Parameters
----------
level : int
global_dict : dict or None, optional, default None
local_dict : dict or Scope or None, optional, default None
resolvers : list-like or None, optional, default None
target : object
Attributes
----------
level : int
scope : DeepChainMap
target : object
temps : dict
"""
__slots__ = 'level', 'scope', 'target', 'temps'
def __init__(self, level, global_dict=None, local_dict=None, resolvers=(),
target=None):
self.level = level + 1
# shallow copy because we don't want to keep filling this up with what
# was there before if there are multiple calls to Scope/_ensure_scope
self.scope = DeepChainMap(_DEFAULT_GLOBALS.copy())
self.target = target
if isinstance(local_dict, Scope):
self.scope.update(local_dict.scope)
if local_dict.target is not None:
self.target = local_dict.target
self.update(local_dict.level)
frame = sys._getframe(self.level)
try:
# shallow copy here because we don't want to replace what's in
# scope when we align terms (alignment accesses the underlying
# numpy array of pandas objects)
self.scope = self.scope.new_child((global_dict or
frame.f_globals).copy())
if not isinstance(local_dict, Scope):
self.scope = self.scope.new_child((local_dict or
frame.f_locals).copy())
finally:
del frame
# assumes that resolvers are going from outermost scope to inner
if isinstance(local_dict, Scope):
resolvers += tuple(local_dict.resolvers.maps)
self.resolvers = DeepChainMap(*resolvers)
self.temps = {}
def __unicode__(self):
scope_keys = _get_pretty_string(list(self.scope.keys()))
res_keys = _get_pretty_string(list(self.resolvers.keys()))
return '%s(scope=%s, resolvers=%s)' % (type(self).__name__, scope_keys,
res_keys)
@property
def has_resolvers(self):
"""Return whether we have any extra scope.
For example, DataFrames pass Their columns as resolvers during calls to
``DataFrame.eval()`` and ``DataFrame.query()``.
Returns
-------
hr : bool
"""
return bool(len(self.resolvers))
def resolve(self, key, is_local):
"""Resolve a variable name in a possibly local context
Parameters
----------
key : text_type
A variable name
is_local : bool
Flag indicating whether the variable is local or not (prefixed with
the '@' symbol)
Returns
-------
value : object
The value of a particular variable
"""
try:
# only look for locals in outer scope
if is_local:
return self.scope[key]
# not a local variable so check in resolvers if we have them
if self.has_resolvers:
return self.resolvers[key]
# if we're here that means that we have no locals and we also have
# no resolvers
assert not is_local and not self.has_resolvers
return self.scope[key]
except KeyError:
try:
# last ditch effort we look in temporaries
# these are created when parsing indexing expressions
# e.g., df[df > 0]
return self.temps[key]
except KeyError:
raise compu.ops.UndefinedVariableError(key, is_local)
def swapkey(self, old_key, new_key, new_value=None):
"""Replace a variable name, with a potentially new value.
Parameters
----------
old_key : str
Current variable name to replace
new_key : str
New variable name to replace `old_key` with
new_value : object
Value to be replaced along with the possible renaming
"""
if self.has_resolvers:
maps = self.resolvers.maps + self.scope.maps
else:
maps = self.scope.maps
maps.append(self.temps)
for mapping in maps:
if old_key in mapping:
mapping[new_key] = new_value
return
def _get_vars(self, stack, scopes):
"""Get specifically scoped variables from a list of stack frames.
Parameters
----------
stack : list
A list of stack frames as returned by ``inspect.stack()``
scopes : sequence of strings
A sequence containing valid stack frame attribute names that
evaluate to a dictionary. For example, ('locals', 'globals')
"""
variables = itertools.product(scopes, stack)
for scope, (frame, _, _, _, _, _) in variables:
try:
d = getattr(frame, 'f_' + scope)
self.scope = self.scope.new_child(d)
finally:
# won't remove it, but DECREF it
# in Py3 this probably isn't necessary since frame won't be
# scope after the loop
del frame
def update(self, level):
"""Update the current scope by going back `level` levels.
Parameters
----------
level : int or None, optional, default None
"""
sl = level + 1
# add sl frames to the scope starting with the
# most distant and overwriting with more current
# makes sure that we can capture variable scope
stack = inspect.stack()
try:
self._get_vars(stack[:sl], scopes=['locals'])
finally:
del stack[:], stack
def add_tmp(self, value):
"""Add a temporary variable to the scope.
Parameters
----------
value : object
An arbitrary object to be assigned to a temporary variable.
Returns
-------
name : basestring
The name of the temporary variable created.
"""
name = '{0}_{1}_{2}'.format(type(value).__name__, self.ntemps,
_raw_hex_id(self))
# add to inner most scope
assert name not in self.temps
self.temps[name] = value
assert name in self.temps
# only increment if the variable gets put in the scope
return name
@property
def ntemps(self):
"""The number of temporary variables in this scope"""
return len(self.temps)
@property
def full_scope(self):
"""Return the full scope for use with passing to engines transparently
as a mapping.
Returns
-------
vars : DeepChainMap
All variables in this scope.
"""
maps = [self.temps] + self.resolvers.maps + self.scope.maps
return DeepChainMap(*maps)
| agpl-3.0 |
IndraVikas/scikit-learn | sklearn/linear_model/randomized_l1.py | 95 | 23365 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import itertools
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import center_data
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..utils import (as_float_array, check_random_state, check_X_y,
check_array, safe_mask, ConvergenceWarning)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.random_integers(
0, 1, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
TransformerMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_center_data = staticmethod(center_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, sparse matrix shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], y_numeric=True)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if isinstance(memory, six.string_types):
memory = Memory(cachedir=memory)
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def get_support(self, indices=False):
"""Return a mask, or list, of the features/indices selected."""
check_is_fitted(self, 'scores_')
mask = self.scores_ > self.selection_threshold
return mask if not indices else np.where(mask)[0]
# XXX: the two function below are copy/pasted from feature_selection,
# Should we add an intermediate base class?
def transform(self, X):
"""Transform a new matrix using the selected features"""
mask = self.get_support()
X = check_array(X)
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return check_array(X)[:, safe_mask(X, mask)]
def inverse_transform(self, X):
"""Transform a new matrix using the selected features"""
support = self.get_support()
if X.ndim == 1:
X = X[None, :]
Xt = np.zeros((X.shape[0], support.size))
Xt[:, support] = X
return Xt
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by resampling the train data and computing
a Lasso on each resampling. In short, the features selected more
often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold: float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto'
Whether to use a precomputed Gram matrix to speed up
calculations. If set to 'auto' let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, LogisticRegression
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
assert self.precompute in (True, False, None, 'auto')
alpha = self.alpha
if alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=self.precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Regression works by resampling the train data and computing
a LogisticRegression on each resampling. In short, the features selected
more often are good features. It is also known as stability selection.
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
C : float, optional, default=1
The regularization parameter C in the LogisticRegression.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default=True
If True, the regressors X will be normalized before regression.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : Instance of joblib.Memory or string
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, Lasso, ElasticNet
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=Memory(cachedir=None, verbose=0)):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _center_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, Xmean, _, X_std = center_data(X, y, fit_intercept,
normalize=normalize)
return X, y, Xmean, y, X_std
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in assending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stabiliy path based on randomized Lasso estimates
Read more in the :ref:`User Guide <randomized_l1>`.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : integer or numpy.random.RandomState, optional
The generator used to randomize the design.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
Notes
-----
See examples/linear_model/plot_sparse_recovery.py for an example.
"""
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.random_integers(0, 1,
size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
hickerson/bbn | fable/fable_sources/libtbx/auto_build/package_defs.py | 1 | 1935 |
"""
Listing of current dependencies for CCTBX and related applications (including
LABELIT, xia2, DIALS, and Phenix with GUI). Not all of these can be downloaded
via the web (yet).
"""
from __future__ import division
BASE_CCI_PKG_URL = "http://cci.lbl.gov/third_party"
BASE_XIA_PKG_URL = "http://www.ccp4.ac.uk/xia"
# from CCI
PYTHON_PKG = "Python-2.7.6_cci.tar.gz"
# XXX we maintain a patched copy to avoid an ICE with gcc 3.4
NUMPY_PKG = "numpy-1.6.2.tar.gz" # used many places
IMAGING_PKG = "Imaging-1.1.7.tar.gz" # for labelit, gltbx
REPORTLAB_PKG = "reportlab-2.6.tar.gz" # for labelit
ZLIB_PKG = "zlib-1.2.7.tar.gz"
SCIPY_PKG = "scipy-0.11.0.tar.gz" # not used by default
PYRTF_PKG = "PyRTF-0.45.tar.gz" # for phenix.table_one, etc.
BIOPYTHON_PKG = "biopython-1.58.tar.gz" # used in iotbx
# from xia2 page
HDF5_PKG = "hdf5-1.8.8.tar.bz2" # dxtbx
H5PY_PKG = "h5py-2.0.1-edit.tar.gz" # dxtbx
# GUI dependencies
LIBPNG_PKG = "libpng-1.2.32.tar.gz"
FREETYPE_PKG = "freetype-2.4.2.tar.gz"
# Linux-only
GETTEXT_PKG = "gettext-0.18.2.tar.gz"
GLIB_PKG = "glib-2.12.11.tar.gz"
EXPAT_PKG = "expat-1.95.8.tar.gz"
FONTCONFIG_PKG = "fontconfig-2.3.95.tar.gz"
RENDER_PKG = "render-0.8.tar.gz"
XRENDER_PKG = "xrender-0.8.3.tar.gz"
XFT_PKG = "xft-2.1.2.tar.gz"
PIXMAN_PKG = "pixman-0.19.2.tar.gz"
CAIRO_PKG = "cairo-1.8.10.tar.gz"
PANGO_PKG = "pango-1.16.1.tar.gz"
ATK_PKG = "atk-1.9.1.tar.gz"
TIFF_PKG = "tiff-v3.6.1.tar.gz"
GTK_PKG = "gtk+-2.10.11.tar.gz"
GTK_ENGINE_PKG = "clearlooks-0.5.tar.gz"
GTK_THEME_PKG = "gtk_themes.tar.gz"
# end Linux-only
FONT_PKG = "fonts.tar.gz"
WXPYTHON_DEV_PKG = "wxPython-src-3.0.0.0_cci.tar.gz" # Mac 64-bit
WXPYTHON_PKG = "wxPython-src-2.8.12.1.tar.gz" # Linux, Mac 32-bit
WEBKIT_PKG = "wxwebkit.tar.gz" # not currently used
MATPLOTLIB_PKG = "matplotlib-1.3.0.tar.gz"
PY2APP_PKG = "py2app-0.7.3.tar.gz" # Mac only
| mit |
harshaneelhg/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
Sentient07/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 99 | 4608 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/sklearn/decomposition/pca.py | 5 | 27151 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Denis A. Engemann <[email protected]>
# Michael Eickenberg <[email protected]>
# Giorgio Patrini <[email protected]>
#
# License: BSD 3 clause
from math import log, sqrt
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from scipy.sparse import issparse
from ..externals import six
from .base import _BasePCA
from ..base import BaseEstimator, TransformerMixin
from ..utils import deprecated
from ..utils import check_random_state, as_float_array
from ..utils import check_array
from ..utils.extmath import fast_dot, fast_logdet, randomized_svd, svd_flip
from ..utils.validation import check_is_fitted
from ..utils.arpack import svds
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
Data spectrum.
rank: int
Tested rank value.
n_samples: int
Number of samples.
n_features: int
Number of features.
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.) -
log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(_BasePCA):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data to project it to a lower dimensional space.
It uses the LAPACK implementation of the full SVD or a randomized truncated
SVD by the method of Halko et al. 2009, depending on the shape of the input
data and the number of components to extract.
It can also use the scipy.sparse.linalg ARPACK implementation of the
truncated SVD.
Notice that this class does not support sparse input. See
:class:`TruncatedSVD` for an alternative with sparse data.
Read more in the :ref:`User Guide <PCA>`.
Parameters
----------
n_components : int, float, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle' and svd_solver == 'full', Minka\'s MLE is used
to guess the dimension
if ``0 < n_components < 1`` and svd_solver == 'full', select the number
of components such that the amount of variance that needs to be
explained is greater than the percentage specified by n_components
n_components cannot be equal to n_features for svd_solver == 'arpack'.
copy : bool (default True)
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional (default False)
When True (False by default) the `components_` vectors are multiplied
by the square root of n_samples and then divided by the singular values
to ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
svd_solver : string {'auto', 'full', 'arpack', 'randomized'}
auto :
the solver is selected by a default policy based on `X.shape` and
`n_components`: if the input data is larger than 500x500 and the
number of components to extract is lower than 80% of the smallest
dimension of the data, then the more efficient 'randomized'
method is enabled. Otherwise the exact full SVD is computed and
optionally truncated afterwards.
full :
run exact full SVD calling the standard LAPACK solver via
`scipy.linalg.svd` and select the components by postprocessing
arpack :
run SVD truncated to n_components calling ARPACK solver via
`scipy.sparse.linalg.svds`. It requires strictly
0 < n_components < X.shape[1]
randomized :
run randomized SVD by the method of Halko et al.
.. versionadded:: 0.18.0
tol : float >= 0, optional (default .0)
Tolerance for singular values computed by svd_solver == 'arpack'.
.. versionadded:: 0.18.0
iterated_power : int >= 0, or 'auto', (default 'auto')
Number of iterations for the power method computed by
svd_solver == 'randomized'.
.. versionadded:: 0.18.0
random_state : int or RandomState instance or None (default None)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Used by svd_solver == 'arpack' or 'randomized'.
.. versionadded:: 0.18.0
Attributes
----------
components_ : array, [n_components, n_features]
Principal axes in feature space, representing the directions of
maximum variance in the data. The components are sorted by
``explained_variance_``.
explained_variance_ : array, [n_components]
The amount of variance explained by each of the selected components.
.. versionadded:: 0.18
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0.
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Equal to `X.mean(axis=1)`.
n_components_ : int
The estimated number of components. When n_components is set
to 'mle' or a number between 0 and 1 (with svd_solver == 'full') this
number is estimated from input data. Otherwise it equals the parameter
n_components, or n_features if n_components is None.
noise_variance_ : float
The estimated noise covariance following the Probabilistic PCA model
from Tipping and Bishop 1999. See "Pattern Recognition and
Machine Learning" by C. Bishop, 12.2.1 p. 574 or
http://www.miketipping.com/papers/met-mppca.pdf. It is required to
computed the estimated data covariance and score samples.
References
----------
For n_components == 'mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Implements the probabilistic PCA model from:
M. Tipping and C. Bishop, Probabilistic Principal Component Analysis,
Journal of the Royal Statistical Society, Series B, 61, Part 3, pp. 611-622
via the score and score_samples methods.
See http://www.miketipping.com/papers/met-mppca.pdf
For svd_solver == 'arpack', refer to `scipy.sparse.linalg.svds`.
For svd_solver == 'randomized', see:
`Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
`A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> pca = PCA(n_components=2, svd_solver='full')
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
PCA(copy=True, iterated_power='auto', n_components=2, random_state=None,
svd_solver='full', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
>>> pca = PCA(n_components=1, svd_solver='arpack')
>>> pca.fit(X)
PCA(copy=True, iterated_power='auto', n_components=1, random_state=None,
svd_solver='arpack', tol=0.0, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244...]
See also
--------
KernelPCA
SparsePCA
TruncatedSVD
IncrementalPCA
"""
def __init__(self, n_components=None, copy=True, whiten=False,
svd_solver='auto', tol=0.0, iterated_power='auto',
random_state=None):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
self.svd_solver = svd_solver
self.tol = tol
self.iterated_power = iterated_power
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
"""Dispatch to the right submethod depending on the chosen solver."""
# Raise an error for sparse input.
# This is more informative than the generic one raised by check_array.
if issparse(X):
raise TypeError('PCA does not support sparse input. See '
'TruncatedSVD for a possible alternative.')
X = check_array(X, dtype=[np.float64], ensure_2d=True,
copy=self.copy)
# Handle n_components==None
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
# Handle svd_solver
svd_solver = self.svd_solver
if svd_solver == 'auto':
# Small problem, just call full PCA
if max(X.shape) <= 500:
svd_solver = 'full'
elif n_components >= 1 and n_components < .8 * min(X.shape):
svd_solver = 'randomized'
# This is also the case of n_components in (0,1)
else:
svd_solver = 'full'
# Call different fits for either full or truncated SVD
if svd_solver == 'full':
return self._fit_full(X, n_components)
elif svd_solver in ['arpack', 'randomized']:
return self._fit_truncated(X, n_components, svd_solver)
def _fit_full(self, X, n_components):
"""Fit the model by computing full SVD on X"""
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= n_features:
raise ValueError("n_components=%r must be between 0 and "
"n_features=%r with svd_solver='full'"
% (n_components, n_features))
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / n_samples
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = explained_variance_ratio_.cumsum()
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
return U, S, V
def _fit_truncated(self, X, n_components, svd_solver):
"""Fit the model by computing truncated SVD (by ARPACK or randomized)
on X
"""
n_samples, n_features = X.shape
if isinstance(n_components, six.string_types):
raise ValueError("n_components=%r cannot be a string "
"with svd_solver='%s'"
% (n_components, svd_solver))
elif not 1 <= n_components <= n_features:
raise ValueError("n_components=%r must be between 1 and "
"n_features=%r with svd_solver='%s'"
% (n_components, n_features, svd_solver))
elif svd_solver == 'arpack' and n_components == n_features:
raise ValueError("n_components=%r must be stricly less than "
"n_features=%r with svd_solver='%s'"
% (n_components, n_features, svd_solver))
random_state = check_random_state(self.random_state)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if svd_solver == 'arpack':
# random init solution, as ARPACK does it internally
v0 = random_state.uniform(-1, 1, size=min(X.shape))
U, S, V = svds(X, k=n_components, tol=self.tol, v0=v0)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
S = S[::-1]
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U[:, ::-1], V[::-1])
elif svd_solver == 'randomized':
# sign flipping is done inside
U, S, V = randomized_svd(X, n_components=n_components,
n_iter=self.iterated_power,
flip_sign=True,
random_state=random_state)
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = V
self.n_components_ = n_components
# Get variance explained by singular values
self.explained_variance_ = (S ** 2) / n_samples
total_var = np.var(X, axis=0)
self.explained_variance_ratio_ = \
self.explained_variance_ / total_var.sum()
if self.n_components_ < n_features:
self.noise_variance_ = (total_var.sum() -
self.explained_variance_.sum())
else:
self.noise_variance_ = 0.
return U, S, V
def score_samples(self, X):
"""Return the log-likelihood of each sample.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: array, shape (n_samples,)
Log-likelihood of each sample under the current model
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
precision = self.get_precision()
log_like = -.5 * (Xr * (np.dot(Xr, precision))).sum(axis=1)
log_like -= .5 * (n_features * log(2. * np.pi) -
fast_logdet(precision))
return log_like
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
See. "Pattern Recognition and Machine Learning"
by C. Bishop, 12.2.1 p. 574
or http://www.miketipping.com/papers/met-mppca.pdf
Parameters
----------
X: array, shape(n_samples, n_features)
The data.
Returns
-------
ll: float
Average log-likelihood of the samples under the current model
"""
return np.mean(self.score_samples(X))
@deprecated("RandomizedPCA was deprecated in 0.18 and will be removed in 0.20. "
"Use PCA(svd_solver='randomized') instead. The new implementation "
"DOES NOT store whiten ``components_``. Apply transform to get them.")
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
.. deprecated:: 0.18
This class will be removed in 0.20.
Use :class:`PCA` with parameter svd_solver 'randomized' instead.
The new implementation DOES NOT store whiten ``components_``.
Apply transform to get them.
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Read more in the :ref:`User Guide <RandomizedPCA>`.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, default=2
Number of iterations for the power method.
.. versionchanged:: 0.18
whiten : bool, optional
When True (False by default) the `components_` vectors are multiplied by
the square root of (n_samples) and divided by the singular values to
ensure uncorrelated outputs with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
components_ : array, [n_components, n_features]
Components with maximum variance.
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
k is not set then all components are stored and the sum of explained
variances is equal to 1.0
mean_ : array, [n_features]
Per-feature empirical mean, estimated from the training set.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=2, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
"""
def __init__(self, n_components=None, copy=True, iterated_power=2,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X by extracting the first principal components.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(check_array(X))
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
X is projected on the first principal components previous extracted
from a training set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'mean_')
X = check_array(X)
if self.mean_ is not None:
X = X - self.mean_
X = fast_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = check_array(X)
X = self._fit(X)
return fast_dot(X, self.components_.T)
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
check_is_fitted(self, 'mean_')
X_original = fast_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| mit |
pandas-ml/pandas-ml | pandas_ml/skaccessors/preprocessing.py | 2 | 3152 | #!/usr/bin/env python
import numpy as np
from pandas_ml.core.accessor import _AccessorMethods, _attach_methods
from pandas_ml.compat import (_SKLEARN_INSTALLED, is_integer_dtype)
if _SKLEARN_INSTALLED:
import sklearn.preprocessing as pp
_keep_col_classes = [pp.Binarizer,
pp.FunctionTransformer,
pp.Imputer,
pp.KernelCenterer,
pp.LabelEncoder,
pp.MaxAbsScaler,
pp.MinMaxScaler,
pp.Normalizer,
pp.RobustScaler,
pp.StandardScaler]
else:
_keep_col_classes = []
class PreprocessingMethods(_AccessorMethods):
"""
Accessor to ``sklearn.preprocessing``.
"""
_module_name = 'sklearn.preprocessing'
def _keep_existing_columns(self, estimator):
"""
Check whether estimator should preserve existing column names
"""
return estimator.__class__ in _keep_col_classes
def add_dummy_feature(self, value=1.0):
"""
Call ``sklearn.preprocessing.add_dummy_feature`` using automatic mapping.
- ``X``: ``ModelFrame.data``
"""
from pandas_ml.core.series import ModelSeries
from pandas_ml.core.frame import ModelFrame
func = self._module.add_dummy_feature
if isinstance(self._df, ModelSeries):
data = self._df.to_frame()
constructor = ModelFrame
else:
data = self._data
constructor = self._constructor
result = func(data.values, value=value)
result = constructor(result, index=data.index)
columns = result.columns[:-len(data.columns)].append(data.columns)
result.columns = columns
return result
_preprocessing_methods = ['binarize', 'normalize', 'scale']
def _wrap_func(func, func_name):
def f(self, *args, **kwargs):
from pandas_ml.core.frame import ModelFrame
if isinstance(self._df, ModelFrame):
values = self._data.values
if is_integer_dtype(values):
# integer raises an error in normalize
values = values.astype(np.float)
result = func(values, *args, **kwargs)
result = self._constructor(result, index=self._data.index,
columns=self._data.columns)
else:
# ModelSeries
values = np.atleast_2d(self._df.values)
if is_integer_dtype(values):
values = values.astype(np.float)
result = func(values, *args, **kwargs)
result = self._constructor(result[0], index=self._df.index,
name=self._df.name)
return result
f.__doc__ = (
"""
Call ``%s`` using automatic mapping.
- ``X``: ``ModelFrame.data``
""" % func_name)
return f
_attach_methods(PreprocessingMethods, _wrap_func, _preprocessing_methods)
| bsd-3-clause |
tridge/jsbsim | tests/TestAeroFuncOutput.py | 3 | 1701 | # TestAeroFuncOutput.py
#
# Check that the aerodynamics forces are consistent with the aerodynamics
# functions output
#
# Copyright (c) 2016 Bertrand Coconnier
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>
#
import pandas as pd
from JSBSim_utils import JSBSimTestCase, RunTest, CreateFDM
class TestAeroFuncOutput(JSBSimTestCase):
def testDragFunctions(self):
fdm = CreateFDM(self.sandbox)
self.script_path = self.sandbox.path_to_jsbsim_file('scripts',
'x153.xml')
fdm.load_script(self.script_path)
fdm.set_output_directive(self.sandbox.path_to_jsbsim_file('tests',
'output.xml'))
fdm.run_ic()
while fdm.run():
pass
results = pd.read_csv('output.csv', index_col=0)
Fdrag = results['F_{Drag} (lbs)']
CDmin = results['aero/coefficient/CDmin']
CDi = results['aero/coefficient/CDi']
self.assertAlmostEqual(abs(Fdrag/(CDmin+CDi)).max(), 1.0, delta=1E-5)
RunTest(TestAeroFuncOutput)
| lgpl-2.1 |
edgarcosta92/ns3-dev | scripts/thomas/parse_size.py | 1 | 1465 | import csv
import numpy as np
import matplotlib.pyplot as plt
import random
#Here we will merge, parse, and filter netflow data.
file_format = {"packets":0, "duration": 1, "bytes": 2}
BytesThreshold = 80;
class MergeAndParse(object):
def __init__(self):
pass
def load_file(self, file_name):
lines = []
with open(file_name, 'rb') as f:
for line in f:
flow = line.split()
#accept flow.
if int(flow[2])/int(flow[0]) > BytesThreshold:
lines.append(flow)
return lines
def merge_files(self, fileA, fileB):
#load files
parsedA = self.load_file(fileA)
parsedB = self.load_file(fileB)
# shuffle data sets
random.shuffle(parsedA)
random.shuffle(parsedB)
#get minimum size so we only take MIN ELEMENTS from both sets
min_size = min(len(parsedA), len(parsedB))
data_set = parsedA[:min_size] + parsedB[:min_size]
#shuffle again
random.shuffle(data_set)
return data_set
def run(self, fileA, fileB, fileOut):
data_set = self.merge_files(fileA, fileB)
with open(fileOut, "w") as f:
for data_point in data_set:
f.write(" ".join(data_point) + "\n")
a = MergeAndParse()
a.run("../../swift_datasets/netflowA.flows", "../../swift_datasets/netflowB.flows", "../../swift_datasets/netflow.flows") | gpl-2.0 |
hrjn/scikit-learn | examples/covariance/plot_sparse_cov.py | 29 | 5079 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <[email protected]>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores_, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
markneville/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_ps.py | 69 | 50262 | """
A PostScript backend, which can produce both PostScript .ps and .eps
"""
from __future__ import division
import glob, math, os, shutil, sys, time
def _fn_name(): return sys._getframe(1).f_code.co_name
try:
from hashlib import md5
except ImportError:
from md5 import md5 #Deprecated in 2.5
from tempfile import gettempdir
from cStringIO import StringIO
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.afm import AFM
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like, get_realpath_and_stat, \
is_writable_file_like, maxdict
from matplotlib.mlab import quad2cubic
from matplotlib.figure import Figure
from matplotlib.font_manager import findfont, is_opentype_cff_font
from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING
from matplotlib.ttconv import convert_ttf_to_ps
from matplotlib.mathtext import MathTextParser
from matplotlib._mathtext_data import uni2type1
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib.transforms import IdentityTransform
import numpy as npy
import binascii
import re
try:
set
except NameError:
from sets import Set as set
if sys.platform.startswith('win'): cmd_split = '&'
else: cmd_split = ';'
backend_version = 'Level II'
debugPS = 0
papersize = {'letter': (8.5,11),
'legal': (8.5,14),
'ledger': (11,17),
'a0': (33.11,46.81),
'a1': (23.39,33.11),
'a2': (16.54,23.39),
'a3': (11.69,16.54),
'a4': (8.27,11.69),
'a5': (5.83,8.27),
'a6': (4.13,5.83),
'a7': (2.91,4.13),
'a8': (2.07,2.91),
'a9': (1.457,2.05),
'a10': (1.02,1.457),
'b0': (40.55,57.32),
'b1': (28.66,40.55),
'b2': (20.27,28.66),
'b3': (14.33,20.27),
'b4': (10.11,14.33),
'b5': (7.16,10.11),
'b6': (5.04,7.16),
'b7': (3.58,5.04),
'b8': (2.51,3.58),
'b9': (1.76,2.51),
'b10': (1.26,1.76)}
def _get_papertype(w, h):
keys = papersize.keys()
keys.sort()
keys.reverse()
for key in keys:
if key.startswith('l'): continue
pw, ph = papersize[key]
if (w < pw) and (h < ph): return key
else:
return 'a0'
def _num_to_str(val):
if is_string_like(val): return val
ival = int(val)
if val==ival: return str(ival)
s = "%1.3f"%val
s = s.rstrip("0")
s = s.rstrip(".")
return s
def _nums_to_str(*args):
return ' '.join(map(_num_to_str,args))
def quote_ps_string(s):
"Quote dangerous characters of S for use in a PostScript string constant."
s=s.replace("\\", "\\\\")
s=s.replace("(", "\\(")
s=s.replace(")", "\\)")
s=s.replace("'", "\\251")
s=s.replace("`", "\\301")
s=re.sub(r"[^ -~\n]", lambda x: r"\%03o"%ord(x.group()), s)
return s
def seq_allequal(seq1, seq2):
"""
seq1 and seq2 are either None or sequences or numerix arrays
Return True if both are None or both are seqs with identical
elements
"""
if seq1 is None:
return seq2 is None
if seq2 is None:
return False
#ok, neither are None:, assuming iterable
if len(seq1) != len(seq2): return False
return npy.alltrue(npy.equal(seq1, seq2))
class RendererPS(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles.
"""
fontd = maxdict(50)
afmfontd = maxdict(50)
def __init__(self, width, height, pswriter, imagedpi=72):
"""
Although postscript itself is dpi independent, we need to
imform the image code about a requested dpi to generate high
res images and them scale them before embeddin them
"""
RendererBase.__init__(self)
self.width = width
self.height = height
self._pswriter = pswriter
if rcParams['text.usetex']:
self.textcnt = 0
self.psfrag = []
self.imagedpi = imagedpi
if rcParams['path.simplify']:
self.simplify = (width * imagedpi, height * imagedpi)
else:
self.simplify = None
# current renderer state (None=uninitialised)
self.color = None
self.linewidth = None
self.linejoin = None
self.linecap = None
self.linedash = None
self.fontname = None
self.fontsize = None
self.hatch = None
self.image_magnification = imagedpi/72.0
self._clip_paths = {}
self._path_collection_id = 0
self.used_characters = {}
self.mathtext_parser = MathTextParser("PS")
def track_characters(self, font, s):
"""Keeps track of which characters are required from
each font."""
realpath, stat_key = get_realpath_and_stat(font.fname)
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update([ord(x) for x in s])
def merge_used_characters(self, other):
for stat_key, (realpath, charset) in other.items():
used_characters = self.used_characters.setdefault(
stat_key, (realpath, set()))
used_characters[1].update(charset)
def set_color(self, r, g, b, store=1):
if (r,g,b) != self.color:
if r==g and r==b:
self._pswriter.write("%1.3f setgray\n"%r)
else:
self._pswriter.write("%1.3f %1.3f %1.3f setrgbcolor\n"%(r,g,b))
if store: self.color = (r,g,b)
def set_linewidth(self, linewidth, store=1):
if linewidth != self.linewidth:
self._pswriter.write("%1.3f setlinewidth\n"%linewidth)
if store: self.linewidth = linewidth
def set_linejoin(self, linejoin, store=1):
if linejoin != self.linejoin:
self._pswriter.write("%d setlinejoin\n"%linejoin)
if store: self.linejoin = linejoin
def set_linecap(self, linecap, store=1):
if linecap != self.linecap:
self._pswriter.write("%d setlinecap\n"%linecap)
if store: self.linecap = linecap
def set_linedash(self, offset, seq, store=1):
if self.linedash is not None:
oldo, oldseq = self.linedash
if seq_allequal(seq, oldseq): return
if seq is not None and len(seq):
s="[%s] %d setdash\n"%(_nums_to_str(*seq), offset)
self._pswriter.write(s)
else:
self._pswriter.write("[] 0 setdash\n")
if store: self.linedash = (offset,seq)
def set_font(self, fontname, fontsize, store=1):
if rcParams['ps.useafm']: return
if (fontname,fontsize) != (self.fontname,self.fontsize):
out = ("/%s findfont\n"
"%1.3f scalefont\n"
"setfont\n" % (fontname,fontsize))
self._pswriter.write(out)
if store: self.fontname = fontname
if store: self.fontsize = fontsize
def set_hatch(self, hatch):
"""
hatch can be one of:
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
X - crossed diagonal
letters can be combined, in which case all the specified
hatchings are done
if same letter repeats, it increases the density of hatching
in that direction
"""
hatches = {'horiz':0, 'vert':0, 'diag1':0, 'diag2':0}
for letter in hatch:
if (letter == '/'): hatches['diag2'] += 1
elif (letter == '\\'): hatches['diag1'] += 1
elif (letter == '|'): hatches['vert'] += 1
elif (letter == '-'): hatches['horiz'] += 1
elif (letter == '+'):
hatches['horiz'] += 1
hatches['vert'] += 1
elif (letter.lower() == 'x'):
hatches['diag1'] += 1
hatches['diag2'] += 1
def do_hatch(angle, density):
if (density == 0): return ""
return """\
gsave
eoclip %s rotate 0.0 0.0 0.0 0.0 setrgbcolor 0 setlinewidth
/hatchgap %d def
pathbbox /hatchb exch def /hatchr exch def /hatcht exch def /hatchl exch def
hatchl cvi hatchgap idiv hatchgap mul
hatchgap
hatchr cvi hatchgap idiv hatchgap mul
{hatcht m 0 hatchb hatcht sub r }
for
stroke
grestore
""" % (angle, 12/density)
self._pswriter.write("gsave\n")
self._pswriter.write(do_hatch(90, hatches['horiz']))
self._pswriter.write(do_hatch(0, hatches['vert']))
self._pswriter.write(do_hatch(45, hatches['diag1']))
self._pswriter.write(do_hatch(-45, hatches['diag2']))
self._pswriter.write("grestore\n")
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
if rcParams['text.usetex']:
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
l,b,r,t = texmanager.get_ps_bbox(s, fontsize)
w = (r-l)
h = (t-b)
# TODO: We need a way to get a good baseline from
# text.usetex
return w, h, 0
if ismath:
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
return width, height, descent
if rcParams['ps.useafm']:
if ismath: s = s[1:-1]
font = self._get_font_afm(prop)
l,b,w,h,d = font.get_str_bbox_and_descent(s)
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
w *= scale
h *= scale
d *= scale
return w, h, d
font = self._get_font_ttf(prop)
font.set_text(s, 0.0, flags=LOAD_NO_HINTING)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
d = font.get_descent()
d /= 64.0
#print s, w, h
return w, h, d
def flipy(self):
'return true if small y numbers are top for renderer'
return False
def _get_font_afm(self, prop):
key = hash(prop)
font = self.afmfontd.get(key)
if font is None:
fname = findfont(prop, fontext='afm')
font = self.afmfontd.get(fname)
if font is None:
font = AFM(file(findfont(prop, fontext='afm')))
self.afmfontd[fname] = font
self.afmfontd[key] = font
return font
def _get_font_ttf(self, prop):
key = hash(prop)
font = self.fontd.get(key)
if font is None:
fname = findfont(prop)
font = self.fontd.get(fname)
if font is None:
font = FT2Font(str(fname))
self.fontd[fname] = font
self.fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, 72.0)
return font
def _rgba(self, im):
return im.as_rgba_str()
def _rgb(self, im):
h,w,s = im.as_rgba_str()
rgba = npy.fromstring(s, npy.uint8)
rgba.shape = (h, w, 4)
rgb = rgba[:,:,:3]
return h, w, rgb.tostring()
def _gray(self, im, rc=0.3, gc=0.59, bc=0.11):
rgbat = im.as_rgba_str()
rgba = npy.fromstring(rgbat[2], npy.uint8)
rgba.shape = (rgbat[0], rgbat[1], 4)
rgba_f = rgba.astype(npy.float32)
r = rgba_f[:,:,0]
g = rgba_f[:,:,1]
b = rgba_f[:,:,2]
gray = (r*rc + g*gc + b*bc).astype(npy.uint8)
return rgbat[0], rgbat[1], gray.tostring()
def _hex_lines(self, s, chars_per_line=128):
s = binascii.b2a_hex(s)
nhex = len(s)
lines = []
for i in range(0,nhex,chars_per_line):
limit = min(i+chars_per_line, nhex)
lines.append(s[i:limit])
return lines
def get_image_magnification(self):
"""
Get the factor by which to magnify images passed to draw_image.
Allows a backend to have images at a different resolution to other
artists.
"""
return self.image_magnification
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas and y
is the distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
im.flipud_out()
if im.is_grayscale:
h, w, bits = self._gray(im)
imagecmd = "image"
else:
h, w, bits = self._rgb(im)
imagecmd = "false 3 colorimage"
hexlines = '\n'.join(self._hex_lines(bits))
xscale, yscale = (
w/self.image_magnification, h/self.image_magnification)
figh = self.height*72
#print 'values', origin, flipud, figh, h, y
clip = []
if bbox is not None:
clipx,clipy,clipw,cliph = bbox.bounds
clip.append('%s clipbox' % _nums_to_str(clipw, cliph, clipx, clipy))
if clippath is not None:
id = self._get_clip_path(clippath, clippath_trans)
clip.append('%s' % id)
clip = '\n'.join(clip)
#y = figh-(y+h)
ps = """gsave
%(clip)s
%(x)s %(y)s translate
%(xscale)s %(yscale)s scale
/DataString %(w)s string def
%(w)s %(h)s 8 [ %(w)s 0 0 -%(h)s 0 %(h)s ]
{
currentfile DataString readhexstring pop
} bind %(imagecmd)s
%(hexlines)s
grestore
""" % locals()
self._pswriter.write(ps)
# unflip
im.flipud_out()
def _convert_path(self, path, transform, simplify=None):
path = transform.transform_path(path)
ps = []
last_points = None
for points, code in path.iter_segments(simplify):
if code == Path.MOVETO:
ps.append("%g %g m" % tuple(points))
elif code == Path.LINETO:
ps.append("%g %g l" % tuple(points))
elif code == Path.CURVE3:
points = quad2cubic(*(list(last_points[-2:]) + list(points)))
ps.append("%g %g %g %g %g %g c" %
tuple(points[2:]))
elif code == Path.CURVE4:
ps.append("%g %g %g %g %g %g c" % tuple(points))
elif code == Path.CLOSEPOLY:
ps.append("cl")
last_points = points
ps = "\n".join(ps)
return ps
def _get_clip_path(self, clippath, clippath_transform):
id = self._clip_paths.get((clippath, clippath_transform))
if id is None:
id = 'c%x' % len(self._clip_paths)
ps_cmd = ['/%s {' % id]
ps_cmd.append(self._convert_path(clippath, clippath_transform))
ps_cmd.extend(['clip', 'newpath', '} bind def\n'])
self._pswriter.write('\n'.join(ps_cmd))
self._clip_paths[(clippath, clippath_transform)] = id
return id
def draw_path(self, gc, path, transform, rgbFace=None):
"""
Draws a Path instance using the given affine transform.
"""
ps = self._convert_path(path, transform, self.simplify)
self._draw_ps(ps, gc, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
"""
Draw the markers defined by path at each of the positions in x
and y. path coordinates are points, x and y coords will be
transformed by the transform
"""
if debugPS: self._pswriter.write('% draw_markers \n')
write = self._pswriter.write
if rgbFace:
if rgbFace[0]==rgbFace[1] and rgbFace[0]==rgbFace[2]:
ps_color = '%1.3f setgray' % rgbFace[0]
else:
ps_color = '%1.3f %1.3f %1.3f setrgbcolor' % rgbFace
# construct the generic marker command:
ps_cmd = ['/o {', 'gsave', 'newpath', 'translate'] # dont want the translate to be global
ps_cmd.append(self._convert_path(marker_path, marker_trans))
if rgbFace:
ps_cmd.extend(['gsave', ps_color, 'fill', 'grestore'])
ps_cmd.extend(['stroke', 'grestore', '} bind def'])
tpath = trans.transform_path(path)
for vertices, code in tpath.iter_segments():
if len(vertices):
x, y = vertices[-2:]
ps_cmd.append("%g %g o" % (x, y))
ps = '\n'.join(ps_cmd)
self._draw_ps(ps, gc, rgbFace, fill=False, stroke=False)
def draw_path_collection(self, master_transform, cliprect, clippath,
clippath_trans, paths, all_transforms, offsets,
offsetTrans, facecolors, edgecolors, linewidths,
linestyles, antialiaseds, urls):
write = self._pswriter.write
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = 'p%x_%x' % (self._path_collection_id, i)
ps_cmd = ['/%s {' % name,
'newpath', 'translate']
ps_cmd.append(self._convert_path(path, transform))
ps_cmd.extend(['} bind def\n'])
write('\n'.join(ps_cmd))
path_codes.append(name)
for xo, yo, path_id, gc, rgbFace in self._iter_collection(
path_codes, cliprect, clippath, clippath_trans,
offsets, offsetTrans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls):
ps = "%g %g %s" % (xo, yo, path_id)
self._draw_ps(ps, gc, rgbFace)
self._path_collection_id += 1
def draw_tex(self, gc, x, y, s, prop, angle, ismath='TeX!'):
"""
draw a Text instance
"""
w, h, bl = self.get_text_width_height_descent(s, prop, ismath)
fontsize = prop.get_size_in_points()
corr = 0#w/2*(fontsize-10)/10
pos = _nums_to_str(x-corr, y)
thetext = 'psmarker%d' % self.textcnt
color = '%1.3f,%1.3f,%1.3f'% gc.get_rgb()[:3]
fontcmd = {'sans-serif' : r'{\sffamily %s}',
'monospace' : r'{\ttfamily %s}'}.get(
rcParams['font.family'], r'{\rmfamily %s}')
s = fontcmd % s
tex = r'\color[rgb]{%s} %s' % (color, s)
self.psfrag.append(r'\psfrag{%s}[bl][bl][1][%f]{\fontsize{%f}{%f}%s}'%(thetext, angle, fontsize, fontsize*1.25, tex))
ps = """\
gsave
%(pos)s moveto
(%(thetext)s)
show
grestore
""" % locals()
self._pswriter.write(ps)
self.textcnt += 1
def draw_text(self, gc, x, y, s, prop, angle, ismath):
"""
draw a Text instance
"""
# local to avoid repeated attribute lookups
write = self._pswriter.write
if debugPS:
write("% text\n")
if ismath=='TeX':
return self.tex(gc, x, y, s, prop, angle)
elif ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
elif isinstance(s, unicode):
return self.draw_unicode(gc, x, y, s, prop, angle)
elif rcParams['ps.useafm']:
font = self._get_font_afm(prop)
l,b,w,h = font.get_str_bbox(s)
fontsize = prop.get_size_in_points()
l *= 0.001*fontsize
b *= 0.001*fontsize
w *= 0.001*fontsize
h *= 0.001*fontsize
if angle==90: l,b = -b, l # todo generalize for arb rotations
pos = _nums_to_str(x-l, y-b)
thetext = '(%s)' % s
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
rotate = '%1.1f rotate' % angle
setcolor = '%1.3f %1.3f %1.3f setrgbcolor' % gc.get_rgb()[:3]
#h = 0
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(pos)s moveto
%(rotate)s
%(thetext)s
%(setcolor)s
show
grestore
""" % locals()
self._draw_ps(ps, gc, None)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
write("%s m\n"%_nums_to_str(x,y))
if angle:
write("gsave\n")
write("%s rotate\n"%_num_to_str(angle))
descent = font.get_descent() / 64.0
if descent:
write("0 %s rmoveto\n"%_num_to_str(descent))
write("(%s) show\n"%quote_ps_string(s))
if angle:
write("grestore\n")
def new_gc(self):
return GraphicsContextPS()
def draw_unicode(self, gc, x, y, s, prop, angle):
"""draw a unicode string. ps doesn't have unicode support, so
we have to do this the hard way
"""
if rcParams['ps.useafm']:
self.set_color(*gc.get_rgb())
font = self._get_font_afm(prop)
fontname = font.get_fontname()
fontsize = prop.get_size_in_points()
scale = 0.001*fontsize
thisx = 0
thisy = font.get_str_bbox_and_descent(s)[4] * scale
last_name = None
lines = []
for c in s:
name = uni2type1.get(ord(c), 'question')
try:
width = font.get_width_from_char_name(name)
except KeyError:
name = 'question'
width = font.get_width_char('?')
if last_name is not None:
kern = font.get_kern_dist_from_name(last_name, name)
else:
kern = 0
last_name = name
thisx += kern * scale
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += width * scale
thetext = "\n".join(lines)
ps = """\
gsave
/%(fontname)s findfont
%(fontsize)s scalefont
setfont
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
else:
font = self._get_font_ttf(prop)
font.set_text(s, 0, flags=LOAD_NO_HINTING)
self.track_characters(font, s)
self.set_color(*gc.get_rgb())
self.set_font(font.get_sfnt()[(1,0,0,6)], prop.get_size_in_points())
cmap = font.get_charmap()
lastgind = None
#print 'text', s
lines = []
thisx = 0
thisy = font.get_descent() / 64.0
for c in s:
ccode = ord(c)
gind = cmap.get(ccode)
if gind is None:
ccode = ord('?')
name = '.notdef'
gind = 0
else:
name = font.get_glyph_name(gind)
glyph = font.load_char(ccode, flags=LOAD_NO_HINTING)
if lastgind is not None:
kern = font.get_kerning(lastgind, gind, KERNING_DEFAULT)
else:
kern = 0
lastgind = gind
thisx += kern/64.0
lines.append('%f %f m /%s glyphshow'%(thisx, thisy, name))
thisx += glyph.linearHoriAdvance/65536.0
thetext = '\n'.join(lines)
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def draw_mathtext(self, gc,
x, y, s, prop, angle):
"""
Draw the math text using matplotlib.mathtext
"""
if debugPS:
self._pswriter.write("% mathtext\n")
width, height, descent, pswriter, used_characters = \
self.mathtext_parser.parse(s, 72, prop)
self.merge_used_characters(used_characters)
self.set_color(*gc.get_rgb())
thetext = pswriter.getvalue()
ps = """gsave
%(x)f %(y)f translate
%(angle)f rotate
%(thetext)s
grestore
""" % locals()
self._pswriter.write(ps)
def _draw_ps(self, ps, gc, rgbFace, fill=True, stroke=True, command=None):
"""
Emit the PostScript sniplet 'ps' with all the attributes from 'gc'
applied. 'ps' must consist of PostScript commands to construct a path.
The fill and/or stroke kwargs can be set to False if the
'ps' string already includes filling and/or stroking, in
which case _draw_ps is just supplying properties and
clipping.
"""
# local variable eliminates all repeated attribute lookups
write = self._pswriter.write
if debugPS and command:
write("% "+command+"\n")
mightstroke = (gc.get_linewidth() > 0.0 and
(len(gc.get_rgb()) <= 3 or gc.get_rgb()[3] != 0.0))
stroke = stroke and mightstroke
fill = (fill and rgbFace is not None and
(len(rgbFace) <= 3 or rgbFace[3] != 0.0))
if mightstroke:
self.set_linewidth(gc.get_linewidth())
jint = gc.get_joinstyle()
self.set_linejoin(jint)
cint = gc.get_capstyle()
self.set_linecap(cint)
self.set_linedash(*gc.get_dashes())
self.set_color(*gc.get_rgb()[:3])
write('gsave\n')
cliprect = gc.get_clip_rectangle()
if cliprect:
x,y,w,h=cliprect.bounds
write('%1.4g %1.4g %1.4g %1.4g clipbox\n' % (w,h,x,y))
clippath, clippath_trans = gc.get_clip_path()
if clippath:
id = self._get_clip_path(clippath, clippath_trans)
write('%s\n' % id)
# Jochen, is the strip necessary? - this could be a honking big string
write(ps.strip())
write("\n")
if fill:
if stroke:
write("gsave\n")
self.set_color(store=0, *rgbFace[:3])
write("fill\ngrestore\n")
else:
self.set_color(store=0, *rgbFace[:3])
write("fill\n")
hatch = gc.get_hatch()
if hatch:
self.set_hatch(hatch)
if stroke:
write("stroke\n")
write("grestore\n")
class GraphicsContextPS(GraphicsContextBase):
def get_capstyle(self):
return {'butt':0,
'round':1,
'projecting':2}[GraphicsContextBase.get_capstyle(self)]
def get_joinstyle(self):
return {'miter':0,
'round':1,
'bevel':2}[GraphicsContextBase.get_joinstyle(self)]
def new_figure_manager(num, *args, **kwargs):
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasPS(thisFig)
manager = FigureManagerPS(canvas, num)
return manager
class FigureCanvasPS(FigureCanvasBase):
def draw(self):
pass
filetypes = {'ps' : 'Postscript',
'eps' : 'Encapsulated Postscript'}
def get_default_filetype(self):
return 'ps'
def print_ps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'ps', *args, **kwargs)
def print_eps(self, outfile, *args, **kwargs):
return self._print_ps(outfile, 'eps', *args, **kwargs)
def _print_ps(self, outfile, format, *args, **kwargs):
papertype = kwargs.get("papertype", rcParams['ps.papersize'])
papertype = papertype.lower()
if papertype == 'auto':
pass
elif papertype not in papersize:
raise RuntimeError( '%s is not a valid papertype. Use one \
of %s'% (papertype, ', '.join( papersize.keys() )) )
orientation = kwargs.get("orientation", "portrait").lower()
if orientation == 'landscape': isLandscape = True
elif orientation == 'portrait': isLandscape = False
else: raise RuntimeError('Orientation must be "portrait" or "landscape"')
self.figure.set_dpi(72) # Override the dpi kwarg
imagedpi = kwargs.get("dpi", 72)
facecolor = kwargs.get("facecolor", "w")
edgecolor = kwargs.get("edgecolor", "w")
if rcParams['text.usetex']:
self._print_figure_tex(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
else:
self._print_figure(outfile, format, imagedpi, facecolor, edgecolor,
orientation, isLandscape, papertype)
def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w',
orientation='portrait', isLandscape=False, papertype=None):
"""
Render the figure to hardcopy. Set the figure patch face and
edge colors. This is useful because some of the GUIs have a
gray figure face color background and you'll probably want to
override this on hardcopy
If outfile is a string, it is interpreted as a file name.
If the extension matches .ep* write encapsulated postscript,
otherwise write a stand-alone PostScript file.
If outfile is a file object, a stand-alone PostScript file is
written into this file object.
"""
isEPSF = format == 'eps'
passed_in_file_object = False
if is_string_like(outfile):
title = outfile
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
elif is_writable_file_like(outfile):
title = None
tmpfile = os.path.join(gettempdir(), md5(str(hash(outfile))).hexdigest())
passed_in_file_object = True
else:
raise ValueError("outfile must be a path or a file-like object")
fh = file(tmpfile, 'w')
# find the appropriate papertype
width, height = self.figure.get_size_inches()
if papertype == 'auto':
if isLandscape: papertype = _get_papertype(height, width)
else: papertype = _get_papertype(width, height)
if isLandscape: paperHeight, paperWidth = papersize[papertype]
else: paperWidth, paperHeight = papersize[papertype]
if rcParams['ps.usedistiller'] and not papertype == 'auto':
# distillers will improperly clip eps files if the pagesize is
# too small
if width>paperWidth or height>paperHeight:
if isLandscape:
papertype = _get_papertype(height, width)
paperHeight, paperWidth = papersize[papertype]
else:
papertype = _get_papertype(width, height)
paperWidth, paperHeight = papersize[papertype]
# center the figure on the paper
xo = 72*0.5*(paperWidth - width)
yo = 72*0.5*(paperHeight - height)
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
rotation = 0
if isLandscape:
llx, lly, urx, ury = lly, llx, ury, urx
xo, yo = 72*paperHeight - yo, xo
rotation = 90
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the PostScript headers
if isEPSF: print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
else: print >>fh, "%!PS-Adobe-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%Orientation: " + orientation
if not isEPSF: print >>fh, "%%DocumentPaperSizes: "+papertype
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
if not isEPSF: print >>fh, "%%Pages: 1"
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
if not rcParams['ps.useafm']:
Ndict += len(renderer.used_characters)
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
if not rcParams['ps.useafm']:
for font_filename, chars in renderer.used_characters.values():
if len(chars):
font = FT2Font(font_filename)
cmap = font.get_charmap()
glyph_ids = []
for c in chars:
gind = cmap.get(c) or 0
glyph_ids.append(gind)
# The ttf to ps (subsetting) support doesn't work for
# OpenType fonts that are Postscript inside (like the
# STIX fonts). This will simply turn that off to avoid
# errors.
if is_opentype_cff_font(font_filename):
raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.")
else:
fonttype = rcParams['ps.fonttype']
convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids)
print >>fh, "end"
print >>fh, "%%EndProlog"
if not isEPSF: print >>fh, "%%Page: 1 1"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
if rotation: print >>fh, "%d rotate"%rotation
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
if not isEPSF: print >>fh, "%%EOF"
fh.close()
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if passed_in_file_object:
fh = file(tmpfile)
print >>outfile, fh.read()
else:
shutil.move(tmpfile, outfile)
def _print_figure_tex(self, outfile, format, dpi, facecolor, edgecolor,
orientation, isLandscape, papertype):
"""
If text.usetex is True in rc, a temporary pair of tex/eps files
are created to allow tex to manage the text layout via the PSFrags
package. These files are processed to yield the final ps or eps file.
"""
isEPSF = format == 'eps'
title = outfile
# write to a temp file, we'll move it to outfile when done
tmpfile = os.path.join(gettempdir(), md5(outfile).hexdigest())
fh = file(tmpfile, 'w')
self.figure.dpi = 72 # ignore the dpi kwarg
width, height = self.figure.get_size_inches()
xo = 0
yo = 0
l, b, w, h = self.figure.bbox.bounds
llx = xo
lly = yo
urx = llx + w
ury = lly + h
bbox = (llx, lly, urx, ury)
# generate PostScript code for the figure and store it in a string
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
self._pswriter = StringIO()
renderer = RendererPS(width, height, self._pswriter, imagedpi=dpi)
self.figure.draw(renderer)
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
# write the Encapsulated PostScript headers
print >>fh, "%!PS-Adobe-3.0 EPSF-3.0"
if title: print >>fh, "%%Title: "+title
print >>fh, ("%%Creator: matplotlib version "
+__version__+", http://matplotlib.sourceforge.net/")
print >>fh, "%%CreationDate: "+time.ctime(time.time())
print >>fh, "%%%%BoundingBox: %d %d %d %d" % bbox
print >>fh, "%%EndComments"
Ndict = len(psDefs)
print >>fh, "%%BeginProlog"
print >>fh, "/mpldict %d dict def"%Ndict
print >>fh, "mpldict begin"
for d in psDefs:
d=d.strip()
for l in d.split('\n'):
print >>fh, l.strip()
print >>fh, "end"
print >>fh, "%%EndProlog"
print >>fh, "mpldict begin"
#print >>fh, "gsave"
print >>fh, "%s translate"%_nums_to_str(xo, yo)
print >>fh, "%s clipbox"%_nums_to_str(width*72, height*72, 0, 0)
# write the figure
print >>fh, self._pswriter.getvalue()
# write the trailer
#print >>fh, "grestore"
print >>fh, "end"
print >>fh, "showpage"
fh.close()
if isLandscape: # now we are ready to rotate
isLandscape = True
width, height = height, width
bbox = (lly, llx, ury, urx)
temp_papertype = _get_papertype(width, height)
if papertype=='auto':
papertype = temp_papertype
paperWidth, paperHeight = papersize[temp_papertype]
else:
paperWidth, paperHeight = papersize[papertype]
if (width>paperWidth or height>paperHeight) and isEPSF:
paperWidth, paperHeight = papersize[temp_papertype]
verbose.report('Your figure is too big to fit on %s paper. %s \
paper will be used to prevent clipping.'%(papertype, temp_papertype), 'helpful')
texmanager = renderer.get_texmanager()
font_preamble = texmanager.get_font_preamble()
custom_preamble = texmanager.get_custom_preamble()
convert_psfrags(tmpfile, renderer.psfrag, font_preamble,
custom_preamble, paperWidth, paperHeight,
orientation)
if rcParams['ps.usedistiller'] == 'ghostscript':
gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['ps.usedistiller'] == 'xpdf':
xpdf_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
elif rcParams['text.usetex']:
if False: pass # for debugging
else: gs_distill(tmpfile, isEPSF, ptype=papertype, bbox=bbox)
if isinstance(outfile, file):
fh = file(tmpfile)
print >>outfile, fh.read()
else: shutil.move(tmpfile, outfile)
def convert_psfrags(tmpfile, psfrags, font_preamble, custom_preamble,
paperWidth, paperHeight, orientation):
"""
When we want to use the LaTeX backend with postscript, we write PSFrag tags
to a temporary postscript file, each one marking a position for LaTeX to
render some text. convert_psfrags generates a LaTeX document containing the
commands to convert those tags to text. LaTeX/dvips produces the postscript
file that includes the actual text.
"""
tmpdir = os.path.split(tmpfile)[0]
epsfile = tmpfile+'.eps'
shutil.move(tmpfile, epsfile)
latexfile = tmpfile+'.tex'
outfile = tmpfile+'.output'
latexh = file(latexfile, 'w')
dvifile = tmpfile+'.dvi'
psfile = tmpfile+'.ps'
if orientation=='landscape': angle = 90
else: angle = 0
if rcParams['text.latex.unicode']:
unicode_preamble = """\usepackage{ucs}
\usepackage[utf8x]{inputenc}"""
else:
unicode_preamble = ''
s = r"""\documentclass{article}
%s
%s
%s
\usepackage[dvips, papersize={%sin,%sin}, body={%sin,%sin}, margin={0in,0in}]{geometry}
\usepackage{psfrag}
\usepackage[dvips]{graphicx}
\usepackage{color}
\pagestyle{empty}
\begin{document}
\begin{figure}
\centering
\leavevmode
%s
\includegraphics*[angle=%s]{%s}
\end{figure}
\end{document}
"""% (font_preamble, unicode_preamble, custom_preamble, paperWidth, paperHeight,
paperWidth, paperHeight,
'\n'.join(psfrags), angle, os.path.split(epsfile)[-1])
if rcParams['text.latex.unicode']:
latexh.write(s.encode('utf8'))
else:
try:
latexh.write(s)
except UnicodeEncodeError, err:
verbose.report("You are using unicode and latex, but have "
"not enabled the matplotlib 'text.latex.unicode' "
"rcParam.", 'helpful')
raise
latexh.close()
# the split drive part of the command is necessary for windows users with
# multiple
if sys.platform == 'win32': precmd = '%s &&'% os.path.splitdrive(tmpdir)[0]
else: precmd = ''
command = '%s cd "%s" && latex -interaction=nonstopmode "%s" > "%s"'\
%(precmd, tmpdir, latexfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status:
raise RuntimeError('LaTeX was not able to process your file:\
\nHere is the full report generated by LaTeX: \n\n%s'% fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = '%s cd "%s" && dvips -q -R0 -o "%s" "%s" > "%s"'%(precmd, tmpdir,
os.path.split(psfile)[-1], os.path.split(dvifile)[-1], outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('dvips was not able to \
process the following file:\n%s\nHere is the full report generated by dvips: \
\n\n'% dvifile + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(epsfile)
shutil.move(psfile, tmpfile)
if not debugPS:
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def gs_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's pswrite or epswrite device to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. The output is low-level, converting text to outlines.
"""
paper = '-sPAPERSIZE=%s'% ptype
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
dpi = rcParams['ps.distiller.res']
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -r%d -sDEVICE=pswrite %s -sOutputFile="%s" \
"%s" > "%s"'% (gs_exe, dpi, paper, psfile, tmpfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ghostscript was not able to process \
your image.\nHere is the full report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
def xpdf_distill(tmpfile, eps=False, ptype='letter', bbox=None):
"""
Use ghostscript's ps2pdf and xpdf's/poppler's pdftops to distill a file.
This yields smaller files without illegal encapsulated postscript
operators. This distiller is preferred, generating high-level postscript
output that treats text as text.
"""
pdffile = tmpfile + '.pdf'
psfile = tmpfile + '.ps'
outfile = tmpfile + '.output'
command = 'ps2pdf -dAutoFilterColorImages=false \
-sColorImageFilter=FlateEncode -sPAPERSIZE=%s "%s" "%s" > "%s"'% \
(ptype, tmpfile, pdffile, outfile)
if sys.platform == 'win32': command = command.replace('=', '#')
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('ps2pdf was not able to process your \
image.\n\Here is the report generated by ghostscript:\n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
command = 'pdftops -paper match -level2 "%s" "%s" > "%s"'% \
(pdffile, psfile, outfile)
verbose.report(command, 'debug')
exit_status = os.system(command)
fh = file(outfile)
if exit_status: raise RuntimeError('pdftops was not able to process your \
image.\nHere is the full report generated by pdftops: \n\n' + fh.read())
else: verbose.report(fh.read(), 'debug')
fh.close()
os.remove(outfile)
os.remove(tmpfile)
shutil.move(psfile, tmpfile)
if eps:
pstoeps(tmpfile, bbox)
for fname in glob.glob(tmpfile+'.*'):
os.remove(fname)
def get_bbox(tmpfile, bbox):
"""
Use ghostscript's bbox device to find the center of the bounding box. Return
an appropriately sized bbox centered around that point. A bit of a hack.
"""
outfile = tmpfile + '.output'
if sys.platform == 'win32': gs_exe = 'gswin32c'
else: gs_exe = 'gs'
command = '%s -dBATCH -dNOPAUSE -sDEVICE=bbox "%s"' %\
(gs_exe, tmpfile)
verbose.report(command, 'debug')
stdin, stdout, stderr = os.popen3(command)
verbose.report(stdout.read(), 'debug-annoying')
bbox_info = stderr.read()
verbose.report(bbox_info, 'helpful')
bbox_found = re.search('%%HiResBoundingBox: .*', bbox_info)
if bbox_found:
bbox_info = bbox_found.group()
else:
raise RuntimeError('Ghostscript was not able to extract a bounding box.\
Here is the Ghostscript output:\n\n%s'% bbox_info)
l, b, r, t = [float(i) for i in bbox_info.split()[-4:]]
# this is a hack to deal with the fact that ghostscript does not return the
# intended bbox, but a tight bbox. For now, we just center the ink in the
# intended bbox. This is not ideal, users may intend the ink to not be
# centered.
if bbox is None:
l, b, r, t = (l-1, b-1, r+1, t+1)
else:
x = (l+r)/2
y = (b+t)/2
dx = (bbox[2]-bbox[0])/2
dy = (bbox[3]-bbox[1])/2
l,b,r,t = (x-dx, y-dy, x+dx, y+dy)
bbox_info = '%%%%BoundingBox: %d %d %d %d' % (l, b, npy.ceil(r), npy.ceil(t))
hires_bbox_info = '%%%%HiResBoundingBox: %.6f %.6f %.6f %.6f' % (l, b, r, t)
return '\n'.join([bbox_info, hires_bbox_info])
def pstoeps(tmpfile, bbox):
"""
Convert the postscript to encapsulated postscript.
"""
bbox_info = get_bbox(tmpfile, bbox)
epsfile = tmpfile + '.eps'
epsh = file(epsfile, 'w')
tmph = file(tmpfile)
line = tmph.readline()
# Modify the header:
while line:
if line.startswith('%!PS'):
print >>epsh, "%!PS-Adobe-3.0 EPSF-3.0"
print >>epsh, bbox_info
elif line.startswith('%%EndComments'):
epsh.write(line)
print >>epsh, '%%BeginProlog'
print >>epsh, 'save'
print >>epsh, 'countdictstack'
print >>epsh, 'mark'
print >>epsh, 'newpath'
print >>epsh, '/showpage {} def'
print >>epsh, '/setpagedevice {pop} def'
print >>epsh, '%%EndProlog'
print >>epsh, '%%Page 1 1'
break
elif line.startswith('%%Bound') \
or line.startswith('%%HiResBound') \
or line.startswith('%%Pages'):
pass
else:
epsh.write(line)
line = tmph.readline()
# Now rewrite the rest of the file, and modify the trailer.
# This is done in a second loop such that the header of the embedded
# eps file is not modified.
line = tmph.readline()
while line:
if line.startswith('%%Trailer'):
print >>epsh, '%%Trailer'
print >>epsh, 'cleartomark'
print >>epsh, 'countdictstack'
print >>epsh, 'exch sub { end } repeat'
print >>epsh, 'restore'
if rcParams['ps.usedistiller'] == 'xpdf':
# remove extraneous "end" operator:
line = tmph.readline()
else:
epsh.write(line)
line = tmph.readline()
tmph.close()
epsh.close()
os.remove(tmpfile)
shutil.move(epsfile, tmpfile)
class FigureManagerPS(FigureManagerBase):
pass
FigureManager = FigureManagerPS
# The following Python dictionary psDefs contains the entries for the
# PostScript dictionary mpldict. This dictionary implements most of
# the matplotlib primitives and some abbreviations.
#
# References:
# http://www.adobe.com/products/postscript/pdfs/PLRM.pdf
# http://www.mactech.com/articles/mactech/Vol.09/09.04/PostscriptTutorial/
# http://www.math.ubc.ca/people/faculty/cass/graphics/text/www/
#
# The usage comments use the notation of the operator summary
# in the PostScript Language reference manual.
psDefs = [
# x y *m* -
"/m { moveto } bind def",
# x y *l* -
"/l { lineto } bind def",
# x y *r* -
"/r { rlineto } bind def",
# x1 y1 x2 y2 x y *c* -
"/c { curveto } bind def",
# *closepath* -
"/cl { closepath } bind def",
# w h x y *box* -
"""/box {
m
1 index 0 r
0 exch r
neg 0 r
cl
} bind def""",
# w h x y *clipbox* -
"""/clipbox {
box
clip
newpath
} bind def""",
]
| agpl-3.0 |
louispotok/pandas | pandas/tests/test_base.py | 2 | 46174 | # -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.compat as compat
from pandas.core.dtypes.common import (
is_object_dtype, is_datetimetz, is_datetime64_dtype,
needs_i8_conversion)
import pandas.util.testing as tm
from pandas import (Series, Index, DatetimeIndex, TimedeltaIndex,
PeriodIndex, Timedelta, IntervalIndex, Interval,
CategoricalIndex, Timestamp)
from pandas.compat import StringIO, PYPY, long
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.core.accessor import PandasDelegate
from pandas.core.base import PandasObject, NoNewAttributesMixin
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
from pandas._libs.tslib import iNaT
class CheckStringMixin(object):
def test_string_methods_dont_fail(self):
repr(self.container)
str(self.container)
bytes(self.container)
if not compat.PY3:
unicode(self.container) # noqa
def test_tricky_container(self):
if not hasattr(self, 'unicode_container'):
pytest.skip('Need unicode_container to test with this')
repr(self.unicode_container)
str(self.unicode_container)
bytes(self.unicode_container)
if not compat.PY3:
unicode(self.unicode_container) # noqa
class CheckImmutable(object):
mutable_regex = re.compile('does not support mutable operations')
def check_mutable_error(self, *args, **kwargs):
# Pass whatever function you normally would to assert_raises_regex
# (after the Exception kind).
tm.assert_raises_regex(
TypeError, self.mutable_regex, *args, **kwargs)
def test_no_mutable_funcs(self):
def setitem():
self.container[0] = 5
self.check_mutable_error(setitem)
def setslice():
self.container[1:2] = 3
self.check_mutable_error(setslice)
def delitem():
del self.container[0]
self.check_mutable_error(delitem)
def delslice():
del self.container[0:3]
self.check_mutable_error(delslice)
mutable_methods = getattr(self, "mutable_methods", [])
for meth in mutable_methods:
self.check_mutable_error(getattr(self.container, meth))
def test_slicing_maintains_type(self):
result = self.container[1:2]
expected = self.lst[1:2]
self.check_result(result, expected)
def check_result(self, result, expected, klass=None):
klass = klass or self.klass
assert isinstance(result, klass)
assert result == expected
class TestPandasDelegate(object):
class Delegator(object):
_properties = ['foo']
_methods = ['bar']
def _set_foo(self, value):
self.foo = value
def _get_foo(self):
return self.foo
foo = property(_get_foo, _set_foo, doc="foo property")
def bar(self, *args, **kwargs):
""" a test bar method """
pass
class Delegate(PandasDelegate, PandasObject):
def __init__(self, obj):
self.obj = obj
def setup_method(self, method):
pass
def test_invalid_delegation(self):
# these show that in order for the delegation to work
# the _delegate_* methods need to be overridden to not raise
# a TypeError
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._properties,
typ='property'
)
self.Delegate._add_delegate_accessors(
delegate=self.Delegator,
accessors=self.Delegator._methods,
typ='method'
)
delegate = self.Delegate(self.Delegator())
def f():
delegate.foo
pytest.raises(TypeError, f)
def f():
delegate.foo = 5
pytest.raises(TypeError, f)
def f():
delegate.foo()
pytest.raises(TypeError, f)
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
# Delegate does not implement memory_usage.
# Check that we fall back to in-built `__sizeof__`
# GH 12924
delegate = self.Delegate(self.Delegator())
sys.getsizeof(delegate)
class Ops(object):
def _allow_na_ops(self, obj):
"""Whether to skip test cases including NaN"""
if (isinstance(obj, Index) and
(obj.is_boolean() or not obj._can_hold_na)):
# don't test boolean / int64 index
return False
return True
def setup_method(self, method):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
self.float_index = tm.makeFloatIndex(10, name='a')
self.dt_index = tm.makeDateIndex(10, name='a')
self.dt_tz_index = tm.makeDateIndex(10, name='a').tz_localize(
tz='US/Eastern')
self.period_index = tm.makePeriodIndex(10, name='a')
self.string_index = tm.makeStringIndex(10, name='a')
self.unicode_index = tm.makeUnicodeIndex(10, name='a')
arr = np.random.randn(10)
self.int_series = Series(arr, index=self.int_index, name='a')
self.float_series = Series(arr, index=self.float_index, name='a')
self.dt_series = Series(arr, index=self.dt_index, name='a')
self.dt_tz_series = self.dt_tz_index.to_series(keep_tz=True)
self.period_series = Series(arr, index=self.period_index, name='a')
self.string_series = Series(arr, index=self.string_index, name='a')
types = ['bool', 'int', 'float', 'dt', 'dt_tz', 'period', 'string',
'unicode']
fmts = ["{0}_{1}".format(t, f)
for t in types for f in ['index', 'series']]
self.objs = [getattr(self, f)
for f in fmts if getattr(self, f, None) is not None]
def check_ops_properties(self, props, filter=None, ignore_failures=False):
for op in props:
for o in self.is_valid_objs:
# if a filter, skip if it doesn't match
if filter is not None:
filt = o.index if isinstance(o, Series) else o
if not filter(filt):
continue
try:
if isinstance(o, Series):
expected = Series(
getattr(o.index, op), index=o.index, name='a')
else:
expected = getattr(o, op)
except (AttributeError):
if ignore_failures:
continue
result = getattr(o, op)
# these couuld be series, arrays or scalars
if isinstance(result, Series) and isinstance(expected, Series):
tm.assert_series_equal(result, expected)
elif isinstance(result, Index) and isinstance(expected, Index):
tm.assert_index_equal(result, expected)
elif isinstance(result, np.ndarray) and isinstance(expected,
np.ndarray):
tm.assert_numpy_array_equal(result, expected)
else:
assert result == expected
# freq raises AttributeError on an Int64Index because its not
# defined we mostly care about Series here anyhow
if not ignore_failures:
for o in self.not_valid_objs:
# an object that is datetimelike will raise a TypeError,
# otherwise an AttributeError
if issubclass(type(o), DatetimeIndexOpsMixin):
pytest.raises(TypeError, lambda: getattr(o, op))
else:
pytest.raises(AttributeError,
lambda: getattr(o, op))
def test_binary_ops_docs(self):
from pandas import DataFrame, Panel
op_map = {'add': '+',
'sub': '-',
'mul': '*',
'mod': '%',
'pow': '**',
'truediv': '/',
'floordiv': '//'}
for op_name in ['add', 'sub', 'mul', 'mod', 'pow', 'truediv',
'floordiv']:
for klass in [Series, DataFrame, Panel]:
operand1 = klass.__name__.lower()
operand2 = 'other'
op = op_map[op_name]
expected_str = ' '.join([operand1, op, operand2])
assert expected_str in getattr(klass, op_name).__doc__
# reverse version of the binary ops
expected_str = ' '.join([operand2, op, operand1])
assert expected_str in getattr(klass, 'r' + op_name).__doc__
class TestIndexOps(Ops):
def setup_method(self, method):
super(TestIndexOps, self).setup_method(method)
self.is_valid_objs = self.objs
self.not_valid_objs = []
def test_none_comparison(self):
# bug brought up by #1079
# changed from TypeError in 0.17.0
for o in self.is_valid_objs:
if isinstance(o, Series):
o[0] = np.nan
# noinspection PyComparisonWithNone
result = o == None # noqa
assert not result.iat[0]
assert not result.iat[1]
# noinspection PyComparisonWithNone
result = o != None # noqa
assert result.iat[0]
assert result.iat[1]
result = None == o # noqa
assert not result.iat[0]
assert not result.iat[1]
# this fails for numpy < 1.9
# and oddly for *some* platforms
# result = None != o # noqa
# assert result.iat[0]
# assert result.iat[1]
if (is_datetime64_dtype(o) or is_datetimetz(o)):
# Following DatetimeIndex (and Timestamp) convention,
# inequality comparisons with Series[datetime64] raise
with pytest.raises(TypeError):
None > o
with pytest.raises(TypeError):
o > None
else:
result = None > o
assert not result.iat[0]
assert not result.iat[1]
result = o < None
assert not result.iat[0]
assert not result.iat[1]
def test_ndarray_compat_properties(self):
for o in self.objs:
# Check that we work.
for p in ['shape', 'dtype', 'T', 'nbytes']:
assert getattr(o, p, None) is not None
# deprecated properties
for p in ['flags', 'strides', 'itemsize']:
with tm.assert_produces_warning(FutureWarning):
assert getattr(o, p, None) is not None
with tm.assert_produces_warning(FutureWarning):
assert hasattr(o, 'base')
# If we have a datetime-like dtype then needs a view to work
# but the user is responsible for that
try:
with tm.assert_produces_warning(FutureWarning):
assert o.data is not None
except ValueError:
pass
with pytest.raises(ValueError):
o.item() # len > 1
assert o.ndim == 1
assert o.size == len(o)
assert Index([1]).item() == 1
assert Series([1]).item() == 1
def test_ops(self):
for op in ['max', 'min']:
for o in self.objs:
result = getattr(o, op)()
if not isinstance(o, PeriodIndex):
expected = getattr(o.values, op)()
else:
expected = pd.Period(
ordinal=getattr(o._ndarray_values, op)(),
freq=o.freq)
try:
assert result == expected
except TypeError:
# comparing tz-aware series with np.array results in
# TypeError
expected = expected.astype('M8[ns]').astype('int64')
assert result.value == expected
def test_nanops(self):
# GH 7261
for op in ['max', 'min']:
for klass in [Index, Series]:
obj = klass([np.nan, 2.0])
assert getattr(obj, op)() == 2.0
obj = klass([np.nan])
assert pd.isna(getattr(obj, op)())
obj = klass([])
assert pd.isna(getattr(obj, op)())
obj = klass([pd.NaT, datetime(2011, 11, 1)])
# check DatetimeIndex monotonic path
assert getattr(obj, op)() == datetime(2011, 11, 1)
obj = klass([pd.NaT, datetime(2011, 11, 1), pd.NaT])
# check DatetimeIndex non-monotonic path
assert getattr(obj, op)(), datetime(2011, 11, 1)
# argmin/max
obj = Index(np.arange(5, dtype='int64'))
assert obj.argmin() == 0
assert obj.argmax() == 4
obj = Index([np.nan, 1, np.nan, 2])
assert obj.argmin() == 1
assert obj.argmax() == 3
obj = Index([np.nan])
assert obj.argmin() == -1
assert obj.argmax() == -1
obj = Index([pd.NaT, datetime(2011, 11, 1), datetime(2011, 11, 2),
pd.NaT])
assert obj.argmin() == 1
assert obj.argmax() == 2
obj = Index([pd.NaT])
assert obj.argmin() == -1
assert obj.argmax() == -1
def test_value_counts_unique_nunique(self):
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._values
if isinstance(values, Index):
# reset name not to affect latter process
values.name = None
# create repeated values, 'n'th element is repeated by n+1 times
# skip boolean, because it only has 2 values at most
if isinstance(o, Index) and o.is_boolean():
continue
elif isinstance(o, Index):
expected_index = Index(o[::-1])
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
else:
expected_index = Index(values[::-1])
idx = o.index.repeat(range(1, len(o) + 1))
rep = np.repeat(values, range(1, len(o) + 1))
o = klass(rep, index=idx, name='a')
# check values has the same dtype as the original
assert o.dtype == orig.dtype
expected_s = Series(range(10, 0, -1), index=expected_index,
dtype='int64', name='a')
result = o.value_counts()
tm.assert_series_equal(result, expected_s)
assert result.index.name is None
assert result.name == 'a'
result = o.unique()
if isinstance(o, Index):
assert isinstance(result, o.__class__)
tm.assert_index_equal(result, orig)
elif is_datetimetz(o):
# datetimetz Series returns array of Timestamp
assert result[0] == orig[0]
for r in result:
assert isinstance(r, Timestamp)
tm.assert_numpy_array_equal(result,
orig._values.astype(object).values)
else:
tm.assert_numpy_array_equal(result, orig.values)
assert o.nunique() == len(np.unique(o.values))
def test_value_counts_unique_nunique_null(self):
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
values = o._ndarray_values
if not self._allow_na_ops(o):
continue
# special assign to the numpy array
if is_datetimetz(o):
if isinstance(o, DatetimeIndex):
v = o.asi8
v[0:2] = iNaT
values = o._shallow_copy(v)
else:
o = o.copy()
o[0:2] = iNaT
values = o._values
elif needs_i8_conversion(o):
values[0:2] = iNaT
values = o._shallow_copy(values)
else:
values[0:2] = null_obj
# check values has the same dtype as the original
assert values.dtype == o.dtype
# create repeated values, 'n'th element is repeated by n+1
# times
if isinstance(o, (DatetimeIndex, PeriodIndex)):
expected_index = o.copy()
expected_index.name = None
# attach name to klass
o = klass(values.repeat(range(1, len(o) + 1)))
o.name = 'a'
else:
if is_datetimetz(o):
expected_index = orig._values._shallow_copy(values)
else:
expected_index = Index(values)
expected_index.name = None
o = o.repeat(range(1, len(o) + 1))
o.name = 'a'
# check values has the same dtype as the original
assert o.dtype == orig.dtype
# check values correctly have NaN
nanloc = np.zeros(len(o), dtype=np.bool)
nanloc[:3] = True
if isinstance(o, Index):
tm.assert_numpy_array_equal(pd.isna(o), nanloc)
else:
exp = Series(nanloc, o.index, name='a')
tm.assert_series_equal(pd.isna(o), exp)
expected_s_na = Series(list(range(10, 2, -1)) + [3],
index=expected_index[9:0:-1],
dtype='int64', name='a')
expected_s = Series(list(range(10, 2, -1)),
index=expected_index[9:1:-1],
dtype='int64', name='a')
result_s_na = o.value_counts(dropna=False)
tm.assert_series_equal(result_s_na, expected_s_na)
assert result_s_na.index.name is None
assert result_s_na.name == 'a'
result_s = o.value_counts()
tm.assert_series_equal(o.value_counts(), expected_s)
assert result_s.index.name is None
assert result_s.name == 'a'
result = o.unique()
if isinstance(o, Index):
tm.assert_index_equal(result,
Index(values[1:], name='a'))
elif is_datetimetz(o):
# unable to compare NaT / nan
vals = values[2:].astype(object).values
tm.assert_numpy_array_equal(result[1:], vals)
assert result[0] is pd.NaT
else:
tm.assert_numpy_array_equal(result[1:], values[2:])
assert pd.isna(result[0])
assert result.dtype == orig.dtype
assert o.nunique() == 8
assert o.nunique(dropna=False) == 9
def test_value_counts_inferred(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
expected = Series([4, 3, 2, 1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.unique(np.array(s_values, dtype=np.object_))
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 4
# don't sort, have to sort after the fact as not sorting is
# platform-dep
hist = s.value_counts(sort=False).sort_values()
expected = Series([3, 1, 4, 2], index=list('acbd')).sort_values()
tm.assert_series_equal(hist, expected)
# sort ascending
hist = s.value_counts(ascending=True)
expected = Series([1, 2, 3, 4], index=list('cdab'))
tm.assert_series_equal(hist, expected)
# relative histogram.
hist = s.value_counts(normalize=True)
expected = Series([.4, .3, .2, .1], index=['b', 'a', 'd', 'c'])
tm.assert_series_equal(hist, expected)
def test_value_counts_bins(self):
klasses = [Index, Series]
for klass in klasses:
s_values = ['a', 'b', 'b', 'b', 'b', 'c', 'd', 'd', 'a', 'a']
s = klass(s_values)
# bins
pytest.raises(TypeError, lambda bins: s.value_counts(bins=bins), 1)
s1 = Series([1, 1, 2, 3])
res1 = s1.value_counts(bins=1)
exp1 = Series({Interval(0.997, 3.0): 4})
tm.assert_series_equal(res1, exp1)
res1n = s1.value_counts(bins=1, normalize=True)
exp1n = Series({Interval(0.997, 3.0): 1.0})
tm.assert_series_equal(res1n, exp1n)
if isinstance(s1, Index):
tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
else:
exp = np.array([1, 2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(s1.unique(), exp)
assert s1.nunique() == 3
# these return the same
res4 = s1.value_counts(bins=4, dropna=True)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4 = s1.value_counts(bins=4, dropna=False)
intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4, exp4)
res4n = s1.value_counts(bins=4, normalize=True)
exp4n = Series([0.5, 0.25, 0.25, 0],
index=intervals.take([0, 3, 1, 2]))
tm.assert_series_equal(res4n, exp4n)
# handle NA's properly
s_values = ['a', 'b', 'b', 'b', np.nan, np.nan,
'd', 'd', 'a', 'a', 'b']
s = klass(s_values)
expected = Series([4, 3, 2], index=['b', 'a', 'd'])
tm.assert_series_equal(s.value_counts(), expected)
if isinstance(s, Index):
exp = Index(['a', 'b', np.nan, 'd'])
tm.assert_index_equal(s.unique(), exp)
else:
exp = np.array(['a', 'b', np.nan, 'd'], dtype=object)
tm.assert_numpy_array_equal(s.unique(), exp)
assert s.nunique() == 3
s = klass({})
expected = Series([], dtype=np.int64)
tm.assert_series_equal(s.value_counts(), expected,
check_index_type=False)
# returned dtype differs depending on original
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), Index([]), exact=False)
else:
tm.assert_numpy_array_equal(s.unique(), np.array([]),
check_dtype=False)
assert s.nunique() == 0
@pytest.mark.parametrize('klass', [Index, Series])
def test_value_counts_datetime64(self, klass):
# GH 3002, datetime64[ns]
# don't test names though
txt = "\n".join(['xxyyzz20100101PIE', 'xxyyzz20100101GUM',
'xxyyzz20100101EGG', 'xxyyww20090101EGG',
'foofoo20080909PIE', 'foofoo20080909GUM'])
f = StringIO(txt)
df = pd.read_fwf(f, widths=[6, 8, 3],
names=["person_id", "dt", "food"],
parse_dates=["dt"])
s = klass(df['dt'].copy())
s.name = None
idx = pd.to_datetime(['2010-01-01 00:00:00Z',
'2008-09-09 00:00:00Z',
'2009-01-01 00:00:00Z'])
expected_s = Series([3, 2, 1], index=idx)
tm.assert_series_equal(s.value_counts(), expected_s)
expected = np_array_datetime64_compat(['2010-01-01 00:00:00Z',
'2009-01-01 00:00:00Z',
'2008-09-09 00:00:00Z'],
dtype='datetime64[ns]')
if isinstance(s, Index):
tm.assert_index_equal(s.unique(), DatetimeIndex(expected))
else:
tm.assert_numpy_array_equal(s.unique(), expected)
assert s.nunique() == 3
# with NaT
s = df['dt'].copy()
s = klass([v for v in s.values] + [pd.NaT])
result = s.value_counts()
assert result.index.dtype == 'datetime64[ns]'
tm.assert_series_equal(result, expected_s)
result = s.value_counts(dropna=False)
expected_s[pd.NaT] = 1
tm.assert_series_equal(result, expected_s)
unique = s.unique()
assert unique.dtype == 'datetime64[ns]'
# numpy_array_equal cannot compare pd.NaT
if isinstance(s, Index):
exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT])
tm.assert_index_equal(unique, exp_idx)
else:
tm.assert_numpy_array_equal(unique[:3], expected)
assert pd.isna(unique[3])
assert s.nunique() == 3
assert s.nunique(dropna=False) == 4
# timedelta64[ns]
td = df.dt - df.dt + timedelta(1)
td = klass(td, name='dt')
result = td.value_counts()
expected_s = Series([6], index=[Timedelta('1day')], name='dt')
tm.assert_series_equal(result, expected_s)
expected = TimedeltaIndex(['1 days'], name='dt')
if isinstance(td, Index):
tm.assert_index_equal(td.unique(), expected)
else:
tm.assert_numpy_array_equal(td.unique(), expected.values)
td2 = timedelta(1) + (df.dt - df.dt)
td2 = klass(td2, name='dt')
result2 = td2.value_counts()
tm.assert_series_equal(result2, expected_s)
def test_factorize(self):
for orig in self.objs:
o = orig.copy()
if isinstance(o, Index) and o.is_boolean():
exp_arr = np.array([0, 1] + [0] * 8, dtype=np.intp)
exp_uniques = o
exp_uniques = Index([False, True])
else:
exp_arr = np.array(range(len(o)), dtype=np.intp)
exp_uniques = o
labels, uniques = o.factorize()
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig),
check_names=False)
else:
# factorize explicitly resets name
tm.assert_index_equal(uniques, exp_uniques,
check_names=False)
def test_factorize_repeated(self):
for orig in self.objs:
o = orig.copy()
# don't test boolean
if isinstance(o, Index) and o.is_boolean():
continue
# sort by value, and create duplicates
if isinstance(o, Series):
o = o.sort_values()
n = o.iloc[5:].append(o)
else:
indexer = o.argsort()
o = o.take(indexer)
n = o[5:].append(o)
exp_arr = np.array([5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
dtype=np.intp)
labels, uniques = n.factorize(sort=True)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
tm.assert_index_equal(uniques, Index(orig).sort_values(),
check_names=False)
else:
tm.assert_index_equal(uniques, o, check_names=False)
exp_arr = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4],
np.intp)
labels, uniques = n.factorize(sort=False)
tm.assert_numpy_array_equal(labels, exp_arr)
if isinstance(o, Series):
expected = Index(o.iloc[5:10].append(o.iloc[:5]))
tm.assert_index_equal(uniques, expected, check_names=False)
else:
expected = o[5:10].append(o[:5])
tm.assert_index_equal(uniques, expected, check_names=False)
def test_duplicated_drop_duplicates_index(self):
# GH 4060
for original in self.objs:
if isinstance(original, Index):
# special case
if original.is_boolean():
result = original.drop_duplicates()
expected = Index([False, True], name='a')
tm.assert_index_equal(result, expected)
continue
# original doesn't have duplicates
expected = np.array([False] * len(original), dtype=bool)
duplicated = original.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = original.drop_duplicates()
tm.assert_index_equal(result, original)
assert result is not original
# has_duplicates
assert not original.has_duplicates
# create repeated values, 3rd and 5th values are duplicated
idx = original[list(range(len(original))) + [5, 3]]
expected = np.array([False] * len(original) + [True, True],
dtype=bool)
duplicated = idx.duplicated()
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
tm.assert_index_equal(idx.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep='last')
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep='last')
tm.assert_index_equal(result, idx[~expected])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = np.array(base)
duplicated = idx.duplicated(keep=False)
tm.assert_numpy_array_equal(duplicated, expected)
assert duplicated.dtype == bool
result = idx.drop_duplicates(keep=False)
tm.assert_index_equal(result, idx[~expected])
with tm.assert_raises_regex(
TypeError, r"drop_duplicates\(\) got an unexpected "
"keyword argument"):
idx.drop_duplicates(inplace=True)
else:
expected = Series([False] * len(original),
index=original.index, name='a')
tm.assert_series_equal(original.duplicated(), expected)
result = original.drop_duplicates()
tm.assert_series_equal(result, original)
assert result is not original
idx = original.index[list(range(len(original))) + [5, 3]]
values = original._values[list(range(len(original))) + [5, 3]]
s = Series(values, index=idx, name='a')
expected = Series([False] * len(original) + [True, True],
index=idx, name='a')
tm.assert_series_equal(s.duplicated(), expected)
tm.assert_series_equal(s.drop_duplicates(), original)
base = [False] * len(idx)
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep='last'), expected)
tm.assert_series_equal(s.drop_duplicates(keep='last'),
s[~np.array(base)])
base = [False] * len(original) + [True, True]
base[3] = True
base[5] = True
expected = Series(base, index=idx, name='a')
tm.assert_series_equal(s.duplicated(keep=False), expected)
tm.assert_series_equal(s.drop_duplicates(keep=False),
s[~np.array(base)])
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
def test_drop_duplicates_series_vs_dataframe(self):
# GH 14192
df = pd.DataFrame({'a': [1, 1, 1, 'one', 'one'],
'b': [2, 2, np.nan, np.nan, np.nan],
'c': [3, 3, np.nan, np.nan, 'three'],
'd': [1, 2, 3, 4, 4],
'e': [datetime(2015, 1, 1), datetime(2015, 1, 1),
datetime(2015, 2, 1), pd.NaT, pd.NaT]
})
for column in df.columns:
for keep in ['first', 'last', False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
def test_fillna(self):
# # GH 11343
# though Index.fillna and Series.fillna has separate impl,
# test here to confirm these works as the same
for orig in self.objs:
o = orig.copy()
values = o.values
# values will not be changed
result = o.fillna(o.astype(object).values[0])
if isinstance(o, Index):
tm.assert_index_equal(o, result)
else:
tm.assert_series_equal(o, result)
# check shallow_copied
assert o is not result
for null_obj in [np.nan, None]:
for orig in self.objs:
o = orig.copy()
klass = type(o)
if not self._allow_na_ops(o):
continue
if needs_i8_conversion(o):
values = o.astype(object).values
fill_value = values[0]
values[0:2] = pd.NaT
else:
values = o.values.copy()
fill_value = o.values[0]
values[0:2] = null_obj
expected = [fill_value] * 2 + list(values[2:])
expected = klass(expected)
o = klass(values)
# check values has the same dtype as the original
assert o.dtype == orig.dtype
result = o.fillna(fill_value)
if isinstance(o, Index):
tm.assert_index_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
# check shallow_copied
assert o is not result
@pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
def test_memory_usage(self):
for o in self.objs:
res = o.memory_usage()
res_deep = o.memory_usage(deep=True)
if (is_object_dtype(o) or (isinstance(o, Series) and
is_object_dtype(o.index))):
# if there are objects, only deep will pick them up
assert res_deep > res
else:
assert res == res_deep
if isinstance(o, Series):
assert ((o.memory_usage(index=False) +
o.index.memory_usage()) ==
o.memory_usage(index=True))
# sys.getsizeof will call the .memory_usage with
# deep=True, and add on some GC overhead
diff = res_deep - sys.getsizeof(o)
assert abs(diff) < 100
def test_searchsorted(self):
# See gh-12238
for o in self.objs:
index = np.searchsorted(o, max(o))
assert 0 <= index <= len(o)
index = np.searchsorted(o, max(o), sorter=range(len(o)))
assert 0 <= index <= len(o)
def test_validate_bool_args(self):
invalid_values = [1, "True", [1, 2, 3], 5.0]
for value in invalid_values:
with pytest.raises(ValueError):
self.int_series.drop_duplicates(inplace=value)
class TestTranspose(Ops):
errmsg = "the 'axes' parameter is not supported"
def test_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(obj.transpose(), obj)
else:
tm.assert_series_equal(obj.transpose(), obj)
def test_transpose_non_default_axes(self):
for obj in self.objs:
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, 1)
tm.assert_raises_regex(ValueError, self.errmsg,
obj.transpose, axes=1)
def test_numpy_transpose(self):
for obj in self.objs:
if isinstance(obj, Index):
tm.assert_index_equal(np.transpose(obj), obj)
else:
tm.assert_series_equal(np.transpose(obj), obj)
tm.assert_raises_regex(ValueError, self.errmsg,
np.transpose, obj, axes=1)
class TestNoNewAttributesMixin(object):
def test_mixin(self):
class T(NoNewAttributesMixin):
pass
t = T()
assert not hasattr(t, "__frozen")
t.a = "test"
assert t.a == "test"
t._freeze()
assert "__frozen" in dir(t)
assert getattr(t, "__frozen")
def f():
t.b = "test"
pytest.raises(AttributeError, f)
assert not hasattr(t, "b")
class TestToIterable(object):
# test that we convert an iterable to python types
dtypes = [
('int8', (int, long)),
('int16', (int, long)),
('int32', (int, long)),
('int64', (int, long)),
('uint8', (int, long)),
('uint16', (int, long)),
('uint32', (int, long)),
('uint64', (int, long)),
('float16', float),
('float32', float),
('float64', float),
('datetime64[ns]', Timestamp),
('datetime64[ns, US/Eastern]', Timestamp),
('timedelta64[ns]', Timedelta)]
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable(self, typ, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype, obj',
[
('object', object, 'a'),
('object', (int, long), 1),
('category', object, 'a'),
('category', (int, long), 1)])
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_object_and_category(self, typ, method,
dtype, rdtype, obj):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype', dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test items / iteritems yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.iteritems())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
'dtype, rdtype',
dtypes + [
('object', (int, long)),
('category', (int, long))])
@pytest.mark.parametrize('typ', [Series, Index])
def test_iterable_map(self, typ, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
'method',
[
lambda x: x.tolist(),
lambda x: list(x),
lambda x: list(x.__iter__()),
], ids=['tolist', 'list', 'iter'])
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp('1999-12-31'),
Timestamp('2000-12-31')])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp('2011-01-01'), Timestamp('2011-01-02')]
s = Series(vals)
assert s.dtype == 'datetime64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [Timestamp('2011-01-01', tz='US/Eastern'),
Timestamp('2011-01-02', tz='US/Eastern')]
s = Series(vals)
assert s.dtype == 'datetime64[ns, US/Eastern]'
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta('1 days'), Timedelta('2 days')]
s = Series(vals)
assert s.dtype == 'timedelta64[ns]'
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = Series(vals)
assert s.dtype == 'object'
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == 'M'
assert res == exp
@pytest.mark.parametrize('array, expected_type, dtype', [
(np.array([0, 1], dtype=np.int64), np.ndarray, 'int64'),
(np.array(['a', 'b']), np.ndarray, 'object'),
(pd.Categorical(['a', 'b']), pd.Categorical, 'category'),
(pd.DatetimeIndex(['2017', '2018']), np.ndarray, 'datetime64[ns]'),
(pd.DatetimeIndex(['2017', '2018'], tz="US/Central"), pd.DatetimeIndex,
'datetime64[ns, US/Central]'),
(pd.TimedeltaIndex([10**10]), np.ndarray, 'm8[ns]'),
(pd.PeriodIndex([2018, 2019], freq='A'), np.ndarray, 'object'),
(pd.IntervalIndex.from_breaks([0, 1, 2]), np.ndarray, 'object'),
])
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
if isinstance(l_values, np.ndarray):
tm.assert_numpy_array_equal(l_values, r_values)
elif isinstance(l_values, pd.Index):
tm.assert_index_equal(l_values, r_values)
elif pd.api.types.is_categorical(l_values):
tm.assert_categorical_equal(l_values, r_values)
else:
raise TypeError("Unexpected type {}".format(type(l_values)))
assert l_values.dtype == dtype
assert r_values.dtype == dtype
@pytest.mark.parametrize('array, expected', [
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(['0', '1']), np.array(['0', '1'], dtype=object)),
(pd.Categorical(['a', 'a']), np.array([0, 0], dtype='int8')),
(pd.DatetimeIndex(['2017-01-01T00:00:00']),
np.array(['2017-01-01T00:00:00'], dtype='M8[ns]')),
(pd.DatetimeIndex(['2017-01-01T00:00:00'], tz="US/Eastern"),
np.array(['2017-01-01T05:00:00'], dtype='M8[ns]')),
(pd.TimedeltaIndex([10**10]), np.array([10**10], dtype='m8[ns]')),
pytest.param(
pd.PeriodIndex(['2017', '2018'], freq='D'),
np.array([17167, 17532]),
marks=pytest.mark.xfail(reason="PeriodArray Not implemented")
),
])
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
| bsd-3-clause |
mehdidc/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
jyt109/stylize | example.py | 2 | 2048 | import sys
import os
from matplotlib import pyplot as plt
from scipy.misc import imread,imsave
from stylize import render
def show_img(img,title):
plt.clf()
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title(title)
plt.show()
if __name__ == "__main__":
try:
path = sys.argv[1]
except:
path = 'resources/iggy.jpg'
print "Going to go through a few examples using the stylize.render"
# Load an image into a numpy format and see it
img = imread(path)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
plt.title("Our source image, close to continue")
plt.show()
print "Please wait, rendering..."
defaults = render(img,verbose=True)
show_img(defaults,"Default stylization - polygonal")
print "Please wait, rendering..."
landmarks = render(img,features='landmarks',verbose=True)
show_img(landmarks,"Landmark features for curved stylization")
print "Please wait, rendering..."
abstract = render(img,depth=4,verbose=True)
show_img(abstract,"A depth of 4 results in an abstract representation")
print "Please wait, rendering..."
more_detail = render(img,ratio=0.00005,verbose=True)
show_img(more_detail,"Ratio 0.00005 results greater detail")
print "Please wait, rendering..."
less_detail = render(img,ratio=0.001,verbose=True)
show_img(less_detail,"Ratio 0.001 results in less detail")
print "Please wait, rendering... this one's going to take a minute or so"
smoother = render(img,iterations=25,verbose=True)
show_img(smoother,"Averaging over 25 iterations to make it smoother")
print "Please wait, rendering..."
aa = render(img,anti_aliasing=True,verbose=True)
show_img(aa,"Anti-aliasing to fight jaggies")
print "Saved results are in the examples directory!"
imsave('example_images/defaults.png',defaults)
imsave('example_images/landmarks.png',landmarks)
imsave('example_images/abstract.png',abstract)
imsave('example_images/more_detail.png',more_detail)
imsave('example_images/less_detail.png',less_detail)
imsave('example_images/smoother.png',smoother)
imsave('example_images/aa.png',aa)
| mit |
MKridler/pyxley | examples/datatables/project/app.py | 11 | 3137 | from flask import Flask
from flask import request, jsonify, render_template, make_response
import pandas as pd
import json
import sys
import glob
from react import jsx
import numpy as np
import re
import argparse
from pyxley.charts.datatables import DataTable
from pyxley import SimpleComponent
from pyxley.filters import SelectButton
from collections import OrderedDict
parser = argparse.ArgumentParser(description="Flask Template")
parser.add_argument("--env", help="production or local", default="local")
args = parser.parse_args()
TITLE = "Pyxley"
scripts = [
"./bower_components/jquery/dist/jquery.min.js",
"./bower_components/datatables/media/js/jquery.dataTables.js",
"./dataTables.fixedColumns.js",
"./bower_components/d3/d3.min.js",
"./bower_components/require/build/require.min.js",
"./bower_components/react/react.js",
"./bower_components/react-bootstrap/react-bootstrap.min.js",
"./conf_int.js",
"./bower_components/pyxley/build/pyxley.js"
]
css = [
"./bower_components/bootstrap/dist/css/bootstrap.min.css",
"./bower_components/datatables/media/css/jquery.dataTables.min.css",
"./css/main.css"
]
df = pd.DataFrame(json.load(open("./static/data.json", "r")))
df = df.dropna()
df["salary"] = df["salary"].apply(lambda x: float(re.sub("[^\d\.]", "", x)))
df["lower"] = ( 1. - (0.03*np.random.randn(df.shape[0]) + 0.15))
df["upper"] = ( 1. + (0.03*np.random.randn(df.shape[0]) + 0.15))
df["salary_upper"] = df["upper"]*df["salary"]
df["salary_lower"] = df["lower"]*df["salary"]
cols = OrderedDict([
("position", {"label": "Position"}),
("office", {"label": "Office"}),
("start_date", {"label": "Start Date"}),
("salary_lower", {"label": "Salary Range",
"confidence": {
"lower": "salary_lower",
"upper": "salary_upper"
}
})
])
addfunc = """
new $.fn.dataTable.FixedColumns(this, {
leftColumns: 1,
rightColumns: 0
});
confidence_interval(this.api().column(3, {"page":"current"}).data(), "mytable");
"""
drawfunc = """
confidence_interval(this.api().column(3, {"page":"current"}).data(), "mytable");
"""
tb = DataTable("mytable", "/mytable/", df,
columns=cols,
paging=True,
pageLength=9,
scrollX=True,
columnDefs=[{
"render": """<svg width="156" height="20"><g></g></svg>""",
"orderable": False,
"targets": 3
}],
sDom='<"top">rt<"bottom"lp><"clear">',
deferRender=True,
initComplete=addfunc,
drawCallback=drawfunc)
app = Flask(__name__)
tb.register_route(app)
ui = SimpleComponent(
"Table",
"./static/bower_components/pyxley/build/pyxley.js",
"component_id",
tb.params
)
sb = ui.render("./static/layout.js")
@app.route('/test', methods=["GET"])
def testtest():
return jsonify(jsfunc)
@app.route('/', methods=["GET"])
@app.route('/index', methods=["GET"])
def index():
_scripts = [
"./layout.js"
]
return render_template('index.html',
title=TITLE,
base_scripts=scripts,
page_scripts=_scripts,
css=css)
if __name__ == "__main__":
app.run(debug=True) | mit |
ucsd-ccbb/jupyter-genomics | src/crispr/ccbbucsd/malicrispr/count_plots.py | 1 | 4021 | # third-party libraries
import matplotlib.pyplot
import numpy
import pandas
# ccbb libraries
from ccbbucsd.utilities.analysis_run_prefixes import strip_run_prefix
from ccbbucsd.utilities.files_and_paths import build_multipart_fp, get_file_name_pieces, get_filepaths_by_prefix_and_suffix
# project-specific libraries
from ccbbucsd.malicrispr.count_files_and_dataframes import get_counts_df
__author__ = "Amanda Birmingham"
__maintainer__ = "Amanda Birmingham"
__email__ = "[email protected]"
__status__ = "prototype"
DEFAULT_PSEUDOCOUNT = 1
def get_boxplot_suffix():
return "boxplots.png"
def make_log2_series(input_series, pseudocount_val):
revised_series = input_series + pseudocount_val
log2_series = revised_series.apply(numpy.log2)
nan_log2_series = log2_series.replace([numpy.inf, -numpy.inf], numpy.nan)
return nan_log2_series.dropna().reset_index(drop=True)
# note that .reset_index(drop=True) is necessary as matplotlib boxplot function (perhaps among others)
# throws an error if the input series doesn't include an item with index 0--which can be the case if
# that first item was NaN and was dropped, and series wasn't reindexed.
def show_and_save_histogram(output_fp, title, count_data):
matplotlib.pyplot.figure(figsize=(20,20))
matplotlib.pyplot.hist(count_data)
matplotlib.pyplot.title(title)
matplotlib.pyplot.xlabel("log2(raw counts)")
matplotlib.pyplot.ylabel("Frequency")
matplotlib.pyplot.savefig(output_fp)
matplotlib.pyplot.show()
def show_and_save_boxplot(output_fp, title, samples_names, samples_data, rotation_val=0):
fig = matplotlib.pyplot.figure(1, figsize=(20,20))
ax = fig.add_subplot(111)
bp = ax.boxplot(samples_data)
ax.set_xticklabels(samples_names, rotation=rotation_val)
ax.set_xlabel("samples")
ax.set_ylabel("log2(raw counts)")
matplotlib.pyplot.title(title)
fig.savefig(output_fp, bbox_inches='tight')
matplotlib.pyplot.show()
def plot_raw_counts(input_dir, input_run_prefix, counts_suffix, output_dir, output_run_prefix, boxplot_suffix):
counts_fps_for_run = get_filepaths_by_prefix_and_suffix(input_dir, input_run_prefix, counts_suffix)
for curr_counts_fp in counts_fps_for_run:
_, curr_sample, _ = get_file_name_pieces(curr_counts_fp)
stripped_sample = strip_run_prefix(curr_sample, input_run_prefix)
count_header, curr_counts_df = get_counts_df(curr_counts_fp, input_run_prefix)
curr_counts_df.rename(columns={count_header:stripped_sample}, inplace=True)
count_header = stripped_sample
log2_series = make_log2_series(curr_counts_df[count_header], DEFAULT_PSEUDOCOUNT)
title = " ".join([input_run_prefix, count_header, "with pseudocount", str(DEFAULT_PSEUDOCOUNT)])
output_fp_prefix = build_multipart_fp(output_dir, [count_header, input_run_prefix])
boxplot_fp = output_fp_prefix + "_" + boxplot_suffix
show_and_save_boxplot(boxplot_fp, title, [count_header], log2_series)
hist_fp = output_fp_prefix + "_" + "hist.png"
show_and_save_histogram(hist_fp, title, log2_series)
def plot_combined_raw_counts(input_dir, input_run_prefix, combined_suffix, output_dir, output_run_prefix, boxplot_suffix):
output_fp = build_multipart_fp(output_dir, [output_run_prefix, boxplot_suffix])
combined_counts_fp = build_multipart_fp(input_dir, [input_run_prefix, combined_suffix])
combined_counts_df = pandas.read_table(combined_counts_fp)
samples_names = combined_counts_df.columns.values[1:] # TODO: remove hardcode
samples_data = []
for curr_name in samples_names:
log2_series = make_log2_series(combined_counts_df[curr_name], DEFAULT_PSEUDOCOUNT)
samples_data.append(log2_series.tolist())
title = " ".join([input_run_prefix, "all samples", "with pseudocount", str(DEFAULT_PSEUDOCOUNT)])
show_and_save_boxplot(output_fp, title, samples_names, samples_data, 90)
| mit |
deeplook/notebooks | freemium/utils.py | 1 | 9144 | """
Utilities, ...
Requirements (not strictly tested from scratch, sorry!)
conda install -y rasterio # pulls in many other things
conda install -c conda-forge ipyleaflet
# jupyter labextension install jupyter-leaflet # for jupyterlab
conda install -c conda-forge ipywidgets
pip install requests
pip install folium
pip install pillow
pip install mercantile
pip install contextily
pip install geographiclib
pip install geopy>=1.15.0
It is recommended to start with an Anaconda distribution.
"""
import re
import os
import sys
import random
from itertools import tee
import requests
import contextily as ctx
from geographiclib.geodesic import Geodesic
from geopy.geocoders import Here
from geopy.distance import geodesic
from ipyleaflet import Marker, CircleMarker, Polyline
from ipywidgets import HTML
from pyproj import Proj, transform
app_id = os.getenv('HEREMAPS_APP_ID')
app_code = os.getenv('HEREMAPS_APP_CODE')
if not app_id or not app_code:
try:
from here_credentials import app_id, app_code
except ImportError:
raise ValueError('Cannot find value for APP_ID and/or APP_CODE...')
geocoder = Here(app_id=app_id, app_code=app_code)
def mask_app_id(text):
"Mask out credentials in given string for presentations."
masked = re.sub('app_id=[\-\w]+', 'app_id=******', text)
masked = re.sub('app_code=[\-\w]+', 'app_code=******', masked)
return masked
# Conversion between lat/lon in degrees (and zoom) to x/y/zoom as used in tile sets,
# from http://wiki.openstreetmap.org/wiki/Slippy_map_tilenames#Python
from math import radians, degrees, log, cos, tan, pi, atan, sinh
def deg2tile(lat_deg, lon_deg, zoom):
lat_rad = radians(lat_deg)
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((1.0 - log(tan(lat_rad) + (1 / cos(lat_rad))) / pi) / 2.0 * n)
return (xtile, ytile)
# not used here
def tile2deg(xtile, ytile, zoom):
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_rad = atan(sinh(pi * (1 - 2 * ytile / n)))
lat_deg = degrees(lat_rad)
return (lat_deg, lon_deg)
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def chunks(seq, n):
"Yield successive n-sized chunks from l."
# for item in zip(*(iter(seq),) * n): yield item
for i in range(0, len(seq), n):
yield seq[i:i + n]
# not used here
def latlon_for_address(address):
"Return a lat/lon tuple for the given address by geocoding it."
res = geocoder.geocode(address)
return res.latitude, res.longitude
def build_here_tiles_url(**kwdict):
"""
Return a HERE map tiles URL, based on default values that can be
overwritten by kwdict...
To be used for map building services like leaflet, folium, and
geopandas (with additional fields inside a dict)...
"""
params = dict(
app_id = app_id,
app_code = app_code,
maptype = 'traffic',
tiletype = 'traffictile',
scheme = 'normal.day',
tilesize = '256',
tileformat = 'png8',
lg = 'eng',
x = '{x}',
y = '{y}',
z = '{z}',
server = random.choice('1234')
)
params.update(kwdict)
url = (
'https://{server}.{maptype}.maps.api.here.com'
'/maptile/2.1/{tiletype}/newest/{scheme}/{z}/{x}/{y}/{tilesize}/{tileformat}'
'?lg={lg}&app_id={app_id}&app_code={app_code}'
).format(**params)
return url
def build_here_basemap(**kwdict):
"""
Return a dict HERE map tiles URL, based on default values that can be
overwritten with params in kwdict...
To be used for ipyleaflet.
"""
params = dict(
url = build_here_tiles_url(**kwdict),
min_zoom = 1,
max_zoom = 18,
attribution = 'Tiles © HERE.com',
name = 'HERE'
)
params.update(kwdict)
return params
def get_route_positions(start, end, **kwargs):
"""
Get routing data.
"""
lat0, lon0 = start
lat1, lon1 = end
params = dict(
language='en',
mode='fastest;car;traffic:disabled'
)
params.update(kwargs)
url = (
f'https://route.cit.api.here.com'
f'/routing/7.2/calculateroute.json'
f'?app_id={app_id}&app_code={app_code}'
# f'&waypoint0=street!{addr}'
f'&waypoint0=geo!{lat0},{lon0}'
f'&waypoint1=geo!{lat1},{lon1}'
# f'&language={language}'
# f'&mode=fastest;car;traffic:disabled'
f'&metricsystem=metric'
f'&jsonattributes=41' # ?
# f'maneuverattributes=po,ti,pt,ac,di,fj,ix' # ?
f'&routeattributes=sh,gr'
f'&instructionFormat=text' # or html
# f'&mode=fastest;publicTransport&combineChange=true&departure=now'
)
for key in params:
val = params[key]
url += f'&{key}={val}'
obj = requests.get(url).json()
return obj['response']['route']
leg = obj['response']['route'][0]['leg']
res = []
for man in leg[0]['maneuver']:
pos = man['position']
lat, lon = pos['latitude'], pos['longitude']
inst = man['instruction']
res.append(dict(lat=lat, lon=lon, maneuver=inst))
return res
def add_route_to_map(route, some_map, color='blue'):
"""
Add a route from the HERE REST API to the given map.
This includes markers for all points where a maneuver is needed, like 'turn left'.
And it includes a path with lat/lons from start to end and little circle markers
around them.
"""
path_positions = list(chunks(route[0]['shape'], 2))
maneuvers = {
(man['position']['latitude'], man['position']['longitude']): man['instruction']
for man in route[0]['leg'][0]['maneuver']}
polyline = Polyline(
locations=path_positions,
color=color,
fill=False
)
some_map += polyline
for lat, lon in path_positions:
if (lat, lon) in maneuvers:
some_map += CircleMarker(location=(lat, lon), radius=2)
marker = Marker(location=(lat, lon), draggable=False)
message1 = HTML()
message1.value = maneuvers[(lat, lon)]
marker.popup = message1
some_map += marker
else:
some_map += CircleMarker(location=(lat, lon), radius=3)
def geo_distance(p, q):
"Return the geodesic distance from point p to q (both lat/lon pairs) in meters."
(lat0, lon0), (lat1, lon1) = p, q
g = Geodesic.WGS84.Inverse(lat0, lon0, lat1, lon1)
return g['s12']
def mid_point(loc1, loc2):
"""
Calculate point in the geodesic middle between two given points.
"""
geod = Geodesic.WGS84
inv_line = geod.InverseLine(*(loc1 + loc2))
distance_m = geod.Inverse(*(loc1 + loc2))["s12"]
loc = inv_line.Position(distance_m / 2, Geodesic.STANDARD | Geodesic.LONG_UNROLL)
lat, lon = loc['lat2'], loc['lon2']
return lat, lon
class Isoline(object):
def __init__(self, the_map, **kwdict):
self.the_map = the_map
self.isoline = None
self.url = (
'https://isoline.route.api.here.com'
'/routing/7.2/calculateisoline.json'
'?app_id={app_id}&app_code={app_code}'
'&start=geo!{lat},{lon}'
'&mode=fastest;car;traffic:disabled'
'&range={{meters}}' # seconds/meters
'&rangetype=distance' # time/distance
#'&departure=now' # 2013-07-04T17:00:00+02
#'&resolution=20' # meters
).format(**kwdict)
self.cache = {}
def __call__(self, meters=1000):
if meters not in self.cache:
print('loading', meters)
url = self.url.format(meters=meters)
obj = requests.get(url).json()
self.cache[meters] = obj
obj = self.cache[meters]
isoline = obj['response']['isoline'][0]
shape = isoline['component'][0]['shape']
path = [tuple(map(float, pos.split(','))) for pos in shape]
if self.isoline:
self.the_map -= self.isoline
self.isoline = Polyline(locations=path, color='red', weight=2, fill=True)
self.the_map += self.isoline
def Mercator2WGS84(x, y):
return transform(Proj(init='epsg:3857'), Proj(init='epsg:4326'), x, y)
def add_basemap(ax, zoom, url='http://tile.stamen.com/terrain/tileZ/tileX/tileY.png'):
# Special thanks to Prof. Martin Christen at FHNW.ch in Basel for
# his GIS-Hack to make the output scales show proper lat/lon values!
xmin, xmax, ymin, ymax = ax.axis()
basemap, extent = ctx.bounds2img(xmin, ymin, xmax, ymax, zoom=zoom, ll=True, url=url)
# calculate extent from WebMercator to WGS84
xmin84, ymin84 = Mercator2WGS84(extent[0], extent[2])
xmax84, ymax84 = Mercator2WGS84(extent[1], extent[3])
extentwgs84 = (xmin84, xmax84, ymin84, ymax84)
ax.imshow(basemap, extent=extentwgs84, interpolation='bilinear')
# restore original x/y limits
ax.axis((xmin, xmax, ymin, ymax)) | mit |
samzhang111/scikit-learn | sklearn/manifold/t_sne.py | 52 | 34602 | # Author: Alexander Fabisch -- <[email protected]>
# Author: Christopher Moody <[email protected]>
# Author: Nick Travers <[email protected]>
# License: BSD 3 clause (C) 2014
# This is the exact and Barnes-Hut t-SNE implementation. There are other
# modifications of the algorithm:
# * Fast Optimization for t-SNE:
# http://cseweb.ucsd.edu/~lvdmaaten/workshops/nips2010/papers/vandermaaten.pdf
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from ..neighbors import BallTree
from ..base import BaseEstimator
from ..utils import check_array
from ..utils import check_random_state
from ..utils.extmath import _ravel
from ..decomposition import RandomizedPCA
from ..metrics.pairwise import pairwise_distances
from . import _utils
from . import _barnes_hut_tsne
from ..utils.fixes import astype
MACHINE_EPSILON = np.finfo(np.double).eps
def _joint_probabilities(distances, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances.
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, None, desired_perplexity, verbose)
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
return P
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose):
"""Compute joint probabilities p_ij from distances using just nearest
neighbors.
This method is approximately equal to _joint_probabilities. The latter
is O(N), but limiting the joint probability to nearest neighbors improves
this substantially to O(uN).
Parameters
----------
distances : array, shape (n_samples * (n_samples-1) / 2,)
Distances of samples are stored as condensed matrices, i.e.
we omit the diagonal and duplicate entries and store everything
in a one-dimensional array.
desired_perplexity : float
Desired perplexity of the joint probability distributions.
verbose : int
Verbosity level.
Returns
-------
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
"""
# Compute conditional probabilities such that they approximately match
# the desired perplexity
distances = astype(distances, np.float32, copy=False)
neighbors = astype(neighbors, np.int64, copy=False)
conditional_P = _utils._binary_search_perplexity(
distances, neighbors, desired_perplexity, verbose)
m = "All probabilities should be finite"
assert np.all(np.isfinite(conditional_P)), m
P = conditional_P + conditional_P.T
sum_P = np.maximum(np.sum(P), MACHINE_EPSILON)
P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON)
assert np.all(np.abs(P) <= 1.0)
return P
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components,
skip_num_points=0):
"""t-SNE objective function: gradient of the KL divergence
of p_ijs and q_ijs and the absolute error.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
# Gradient: dC/dY
grad = np.ndarray((n_samples, n_components))
PQd = squareform((P - Q) * n)
for i in range(skip_num_points, n_samples):
np.dot(_ravel(PQd[i]), X_embedded[i] - X_embedded, out=grad[i])
grad = grad.ravel()
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad *= c
return kl_divergence, grad
def _kl_divergence_error(params, P, neighbors, degrees_of_freedom, n_samples,
n_components):
"""t-SNE objective function: the absolute error of the
KL divergence of p_ijs and q_ijs.
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors : array (n_samples, K)
The neighbors is not actually required to calculate the
divergence, but is here to match the signature of the
gradient function
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
X_embedded = params.reshape(n_samples, n_components)
# Q is a heavy-tailed distribution: Student's t-distribution
n = pdist(X_embedded, "sqeuclidean")
n += 1.
n /= degrees_of_freedom
n **= (degrees_of_freedom + 1.0) / -2.0
Q = np.maximum(n / (2.0 * np.sum(n)), MACHINE_EPSILON)
# Optimization trick below: np.dot(x, y) is faster than
# np.sum(x * y) because it calls BLAS
# Objective: C (Kullback-Leibler divergence of P and Q)
if len(P.shape) == 2:
P = squareform(P)
kl_divergence = 2.0 * np.dot(P, np.log(P / Q))
return kl_divergence
def _kl_divergence_bh(params, P, neighbors, degrees_of_freedom, n_samples,
n_components, angle=0.5, skip_num_points=0,
verbose=False):
"""t-SNE objective function: KL divergence of p_ijs and q_ijs.
Uses Barnes-Hut tree methods to calculate the gradient that
runs in O(NlogN) instead of O(N^2)
Parameters
----------
params : array, shape (n_params,)
Unraveled embedding.
P : array, shape (n_samples * (n_samples-1) / 2,)
Condensed joint probability matrix.
neighbors: int64 array, shape (n_samples, K)
Array with element [i, j] giving the index for the jth
closest neighbor to point i.
degrees_of_freedom : float
Degrees of freedom of the Student's-t distribution.
n_samples : int
Number of samples.
n_components : int
Dimension of the embedded space.
angle : float (default: 0.5)
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
verbose : int
Verbosity level.
Returns
-------
kl_divergence : float
Kullback-Leibler divergence of p_ij and q_ij.
grad : array, shape (n_params,)
Unraveled gradient of the Kullback-Leibler divergence with respect to
the embedding.
"""
params = astype(params, np.float32, copy=False)
X_embedded = params.reshape(n_samples, n_components)
neighbors = astype(neighbors, np.int64, copy=False)
if len(P.shape) == 1:
sP = squareform(P).astype(np.float32)
else:
sP = P.astype(np.float32)
grad = np.zeros(X_embedded.shape, dtype=np.float32)
error = _barnes_hut_tsne.gradient(sP, X_embedded, neighbors,
grad, angle, n_components, verbose,
dof=degrees_of_freedom)
c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom
grad = grad.ravel()
grad *= c
return error, grad
def _gradient_descent(objective, p0, it, n_iter, objective_error=None,
n_iter_check=1, n_iter_without_progress=50,
momentum=0.5, learning_rate=1000.0, min_gain=0.01,
min_grad_norm=1e-7, min_error_diff=1e-7, verbose=0,
args=None, kwargs=None):
"""Batch gradient descent with momentum and individual gains.
Parameters
----------
objective : function or callable
Should return a tuple of cost and gradient for a given parameter
vector. When expensive to compute, the cost can optionally
be None and can be computed every n_iter_check steps using
the objective_error function.
p0 : array-like, shape (n_params,)
Initial parameter vector.
it : int
Current number of iterations (this function will be called more than
once during the optimization).
n_iter : int
Maximum number of gradient descent iterations.
n_iter_check : int
Number of iterations before evaluating the global error. If the error
is sufficiently low, we abort the optimization.
objective_error : function or callable
Should return a tuple of cost and gradient for a given parameter
vector.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
momentum : float, within (0.0, 1.0), optional (default: 0.5)
The momentum generates a weight for previous gradients that decays
exponentially.
learning_rate : float, optional (default: 1000.0)
The learning rate should be extremely high for t-SNE! Values in the
range [100.0, 1000.0] are common.
min_gain : float, optional (default: 0.01)
Minimum individual gain for each parameter.
min_grad_norm : float, optional (default: 1e-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
min_error_diff : float, optional (default: 1e-7)
If the absolute difference of two successive cost function values
is below this threshold, the optimization will be aborted.
verbose : int, optional (default: 0)
Verbosity level.
args : sequence
Arguments to pass to objective function.
kwargs : dict
Keyword arguments to pass to objective function.
Returns
-------
p : array, shape (n_params,)
Optimum parameters.
error : float
Optimum.
i : int
Last iteration.
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
p = p0.copy().ravel()
update = np.zeros_like(p)
gains = np.ones_like(p)
error = np.finfo(np.float).max
best_error = np.finfo(np.float).max
best_iter = 0
for i in range(it, n_iter):
new_error, grad = objective(p, *args, **kwargs)
grad_norm = linalg.norm(grad)
inc = update * grad >= 0.0
dec = np.invert(inc)
gains[inc] += 0.05
gains[dec] *= 0.95
np.clip(gains, min_gain, np.inf)
grad *= gains
update = momentum * update - learning_rate * grad
p += update
if (i + 1) % n_iter_check == 0:
if new_error is None:
new_error = objective_error(p, *args)
error_diff = np.abs(new_error - error)
error = new_error
if verbose >= 2:
m = "[t-SNE] Iteration %d: error = %.7f, gradient norm = %.7f"
print(m % (i + 1, error, grad_norm))
if error < best_error:
best_error = error
best_iter = i
elif i - best_iter > n_iter_without_progress:
if verbose >= 2:
print("[t-SNE] Iteration %d: did not make any progress "
"during the last %d episodes. Finished."
% (i + 1, n_iter_without_progress))
break
if grad_norm <= min_grad_norm:
if verbose >= 2:
print("[t-SNE] Iteration %d: gradient norm %f. Finished."
% (i + 1, grad_norm))
break
if error_diff <= min_error_diff:
if verbose >= 2:
m = "[t-SNE] Iteration %d: error difference %f. Finished."
print(m % (i + 1, error_diff))
break
if new_error is not None:
error = new_error
return p, error, i
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False):
"""Expresses to what extent the local structure is retained.
The trustworthiness is within [0, 1]. It is defined as
.. math::
T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1}
\sum_{j \in U^{(k)}_i (r(i, j) - k)}
where :math:`r(i, j)` is the rank of the embedded datapoint j
according to the pairwise distances between the embedded datapoints,
:math:`U^{(k)}_i` is the set of points that are in the k nearest
neighbors in the embedded space but not in the original space.
* "Neighborhood Preservation in Nonlinear Projection Methods: An
Experimental Study"
J. Venna, S. Kaski
* "Learning a Parametric Embedding by Preserving Local Structure"
L.J.P. van der Maaten
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
X_embedded : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
n_neighbors : int, optional (default: 5)
Number of neighbors k that will be considered.
precomputed : bool, optional (default: False)
Set this flag if X is a precomputed square distance matrix.
Returns
-------
trustworthiness : float
Trustworthiness of the low-dimensional embedding.
"""
if precomputed:
dist_X = X
else:
dist_X = pairwise_distances(X, squared=True)
dist_X_embedded = pairwise_distances(X_embedded, squared=True)
ind_X = np.argsort(dist_X, axis=1)
ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1]
n_samples = X.shape[0]
t = 0.0
ranks = np.zeros(n_neighbors)
for i in range(n_samples):
for j in range(n_neighbors):
ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0]
ranks -= n_neighbors
t += np.sum(ranks[ranks > 0])
t = 1.0 - t * (2.0 / (n_samples * n_neighbors *
(2.0 * n_samples - 3.0 * n_neighbors - 1.0)))
return t
class TSNE(BaseEstimator):
"""t-distributed Stochastic Neighbor Embedding.
t-SNE [1] is a tool to visualize high-dimensional data. It converts
similarities between data points to joint probabilities and tries
to minimize the Kullback-Leibler divergence between the joint
probabilities of the low-dimensional embedding and the
high-dimensional data. t-SNE has a cost function that is not convex,
i.e. with different initializations we can get different results.
It is highly recommended to use another dimensionality reduction
method (e.g. PCA for dense data or TruncatedSVD for sparse data)
to reduce the number of dimensions to a reasonable amount (e.g. 50)
if the number of features is very high. This will suppress some
noise and speed up the computation of pairwise distances between
samples. For more tips see Laurens van der Maaten's FAQ [2].
Read more in the :ref:`User Guide <t_sne>`.
Parameters
----------
n_components : int, optional (default: 2)
Dimension of the embedded space.
perplexity : float, optional (default: 30)
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms. Larger datasets
usually require a larger perplexity. Consider selcting a value
between 5 and 50. The choice is not extremely critical since t-SNE
is quite insensitive to this parameter.
early_exaggeration : float, optional (default: 4.0)
Controls how tight natural clusters in the original space are in
the embedded space and how much space will be between them. For
larger values, the space between natural clusters will be larger
in the embedded space. Again, the choice of this parameter is not
very critical. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high.
learning_rate : float, optional (default: 1000)
The learning rate can be a critical parameter. It should be
between 100 and 1000. If the cost function increases during initial
optimization, the early exaggeration factor or the learning rate
might be too high. If the cost function gets stuck in a bad local
minimum increasing the learning rate helps sometimes.
n_iter : int, optional (default: 1000)
Maximum number of iterations for the optimization. Should be at
least 200.
n_iter_without_progress : int, optional (default: 30)
Maximum number of iterations without progress before we abort the
optimization.
min_grad_norm : float, optional (default: 1E-7)
If the gradient norm is below this threshold, the optimization will
be aborted.
metric : string or callable, optional
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them. The default is "euclidean" which is
interpreted as squared euclidean distance.
init : string, optional (default: "random")
Initialization of embedding. Possible options are 'random' and 'pca'.
PCA initialization cannot be used with precomputed distances and is
usually more globally stable than random initialization.
verbose : int, optional (default: 0)
Verbosity level.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton. Note that different initializations
might result in different local minima of the cost function.
method : string (default: 'barnes_hut')
By default the gradient calculation algorithm uses Barnes-Hut
approximation running in O(NlogN) time. method='exact'
will run on the slower, but exact, algorithm in O(N^2) time. The
exact algorithm should be used when nearest-neighbor errors need
to be better than 3%. However, the exact method cannot scale to
millions of examples.
angle : float (default: 0.5)
Only used if method='barnes_hut'
This is the trade-off between speed and accuracy for Barnes-Hut T-SNE.
'angle' is the angular size (referred to as theta in [3]) of a distant
node as measured from a point. If this size is below 'angle' then it is
used as a summary node of all points contained within it.
This method is not very sensitive to changes in this parameter
in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing
computation time and angle greater 0.8 has quickly increasing error.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
Examples
--------
>>> import numpy as np
>>> from sklearn.manifold import TSNE
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = TSNE(n_components=2, random_state=0)
>>> np.set_printoptions(suppress=True)
>>> model.fit_transform(X) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
array([[ 0.00017599, 0.00003993],
[ 0.00009891, 0.00021913],
[ 0.00018554, -0.00009357],
[ 0.00009528, -0.00001407]])
References
----------
[1] van der Maaten, L.J.P.; Hinton, G.E. Visualizing High-Dimensional Data
Using t-SNE. Journal of Machine Learning Research 9:2579-2605, 2008.
[2] van der Maaten, L.J.P. t-Distributed Stochastic Neighbor Embedding
http://homepage.tudelft.nl/19j49/t-SNE.html
[3] L.J.P. van der Maaten. Accelerating t-SNE using Tree-Based Algorithms.
Journal of Machine Learning Research 15(Oct):3221-3245, 2014.
http://lvdmaaten.github.io/publications/papers/JMLR_2014.pdf
"""
def __init__(self, n_components=2, perplexity=30.0,
early_exaggeration=4.0, learning_rate=1000.0, n_iter=1000,
n_iter_without_progress=30, min_grad_norm=1e-7,
metric="euclidean", init="random", verbose=0,
random_state=None, method='barnes_hut', angle=0.5):
if init not in ["pca", "random"] or isinstance(init, np.ndarray):
msg = "'init' must be 'pca', 'random' or a NumPy array"
raise ValueError(msg)
self.n_components = n_components
self.perplexity = perplexity
self.early_exaggeration = early_exaggeration
self.learning_rate = learning_rate
self.n_iter = n_iter
self.n_iter_without_progress = n_iter_without_progress
self.min_grad_norm = min_grad_norm
self.metric = metric
self.init = init
self.verbose = verbose
self.random_state = random_state
self.method = method
self.angle = angle
self.embedding_ = None
def _fit(self, X, skip_num_points=0):
"""Fit the model using X as training data.
Note that sparse arrays can only be handled by method='exact'.
It is recommended that you convert your sparse array to dense
(e.g. `X.toarray()`) if it fits in memory, or otherwise using a
dimensionality reduction technique (e.g. TrucnatedSVD).
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. Note that this
when method='barnes_hut', X cannot be a sparse array and if need be
will be converted to a 32 bit float array. Method='exact' allows
sparse arrays and 64bit floating point inputs.
skip_num_points : int (optional, default:0)
This does not compute the gradient for points with indices below
`skip_num_points`. This is useful when computing transforms of new
data where you'd like to keep the old data fixed.
"""
if self.method not in ['barnes_hut', 'exact']:
raise ValueError("'method' must be 'barnes_hut' or 'exact'")
if self.angle < 0.0 or self.angle > 1.0:
raise ValueError("'angle' must be between 0.0 - 1.0")
if self.method == 'barnes_hut' and sp.issparse(X):
raise TypeError('A sparse matrix was passed, but dense '
'data is required for method="barnes_hut". Use '
'X.toarray() to convert to a dense numpy array if '
'the array is small enough for it to fit in '
'memory. Otherwise consider dimensionality '
'reduction techniques (e.g. TruncatedSVD)')
X = check_array(X, dtype=np.float32)
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
random_state = check_random_state(self.random_state)
if self.early_exaggeration < 1.0:
raise ValueError("early_exaggeration must be at least 1, but is "
"%f" % self.early_exaggeration)
if self.n_iter < 200:
raise ValueError("n_iter should be at least 200")
if self.metric == "precomputed":
if self.init == 'pca':
raise ValueError("The parameter init=\"pca\" cannot be used "
"with metric=\"precomputed\".")
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square distance matrix")
distances = X
else:
if self.verbose:
print("[t-SNE] Computing pairwise distances...")
if self.metric == "euclidean":
distances = pairwise_distances(X, metric=self.metric,
squared=True)
else:
distances = pairwise_distances(X, metric=self.metric)
if not np.all(distances >= 0):
raise ValueError("All distances should be positive, either "
"the metric or precomputed distances given "
"as X are not correct")
# Degrees of freedom of the Student's t-distribution. The suggestion
# degrees_of_freedom = n_components - 1 comes from
# "Learning a Parametric Embedding by Preserving Local Structure"
# Laurens van der Maaten, 2009.
degrees_of_freedom = max(self.n_components - 1.0, 1)
n_samples = X.shape[0]
# the number of nearest neighbors to find
k = min(n_samples - 1, int(3. * self.perplexity + 1))
neighbors_nn = None
if self.method == 'barnes_hut':
if self.verbose:
print("[t-SNE] Computing %i nearest neighbors..." % k)
if self.metric == 'precomputed':
# Use the precomputed distances to find
# the k nearest neighbors and their distances
neighbors_nn = np.argsort(distances, axis=1)[:, :k]
else:
# Find the nearest neighbors for every point
bt = BallTree(X)
# LvdM uses 3 * perplexity as the number of neighbors
# And we add one to not count the data point itself
# In the event that we have very small # of points
# set the neighbors to n - 1
distances_nn, neighbors_nn = bt.query(X, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
P = _joint_probabilities_nn(distances, neighbors_nn,
self.perplexity, self.verbose)
else:
P = _joint_probabilities(distances, self.perplexity, self.verbose)
assert np.all(np.isfinite(P)), "All probabilities should be finite"
assert np.all(P >= 0), "All probabilities should be zero or positive"
assert np.all(P <= 1), ("All probabilities should be less "
"or then equal to one")
if self.init == 'pca':
pca = RandomizedPCA(n_components=self.n_components,
random_state=random_state)
X_embedded = pca.fit_transform(X)
elif isinstance(self.init, np.ndarray):
X_embedded = self.init
elif self.init == 'random':
X_embedded = None
else:
raise ValueError("Unsupported initialization scheme: %s"
% self.init)
return self._tsne(P, degrees_of_freedom, n_samples, random_state,
X_embedded=X_embedded,
neighbors=neighbors_nn,
skip_num_points=skip_num_points)
def _tsne(self, P, degrees_of_freedom, n_samples, random_state,
X_embedded=None, neighbors=None, skip_num_points=0):
"""Runs t-SNE."""
# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P
# and the Student's t-distributions Q. The optimization algorithm that
# we use is batch gradient descent with three stages:
# * early exaggeration with momentum 0.5
# * early exaggeration with momentum 0.8
# * final optimization with momentum 0.8
# The embedding is initialized with iid samples from Gaussians with
# standard deviation 1e-4.
if X_embedded is None:
# Initialize embedding randomly
X_embedded = 1e-4 * random_state.randn(n_samples,
self.n_components)
params = X_embedded.ravel()
opt_args = {}
opt_args = {"n_iter": 50, "momentum": 0.5, "it": 0,
"learning_rate": self.learning_rate,
"verbose": self.verbose, "n_iter_check": 25,
"kwargs": dict(skip_num_points=skip_num_points)}
if self.method == 'barnes_hut':
m = "Must provide an array of neighbors to use Barnes-Hut"
assert neighbors is not None, m
obj_func = _kl_divergence_bh
objective_error = _kl_divergence_error
sP = squareform(P).astype(np.float32)
neighbors = neighbors.astype(np.int64)
args = [sP, neighbors, degrees_of_freedom, n_samples,
self.n_components]
opt_args['args'] = args
opt_args['min_grad_norm'] = 1e-3
opt_args['n_iter_without_progress'] = 30
# Don't always calculate the cost since that calculation
# can be nearly as expensive as the gradient
opt_args['objective_error'] = objective_error
opt_args['kwargs']['angle'] = self.angle
opt_args['kwargs']['verbose'] = self.verbose
else:
obj_func = _kl_divergence
opt_args['args'] = [P, degrees_of_freedom, n_samples,
self.n_components]
opt_args['min_error_diff'] = 0.0
opt_args['min_grad_norm'] = 0.0
# Early exaggeration
P *= self.early_exaggeration
params, error, it = _gradient_descent(obj_func, params, **opt_args)
opt_args['n_iter'] = 100
opt_args['momentum'] = 0.8
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations with early "
"exaggeration: %f" % (it + 1, error))
# Save the final number of iterations
self.n_iter_final = it
# Final optimization
P /= self.early_exaggeration
opt_args['n_iter'] = self.n_iter
opt_args['it'] = it + 1
params, error, it = _gradient_descent(obj_func, params, **opt_args)
if self.verbose:
print("[t-SNE] Error after %d iterations: %f" % (it + 1, error))
X_embedded = params.reshape(n_samples, self.n_components)
return X_embedded
def fit_transform(self, X, y=None):
"""Fit X into an embedded space and return that transformed
output.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row.
Returns
-------
X_new : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
embedding = self._fit(X)
self.embedding_ = embedding
return self.embedding_
def fit(self, X, y=None):
"""Fit X into an embedded space.
Parameters
----------
X : array, shape (n_samples, n_features) or (n_samples, n_samples)
If the metric is 'precomputed' X must be a square distance
matrix. Otherwise it contains a sample per row. If the method
is 'exact', X may be a sparse matrix of type 'csr', 'csc'
or 'coo'.
"""
self.fit_transform(X)
return self
def _check_fitted(self):
if self.embedding_ is None:
raise ValueError("Cannot call `transform` unless `fit` has"
"already been called")
| bsd-3-clause |
andrewnc/scikit-learn | examples/datasets/plot_random_dataset.py | 348 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/example/multivariate_time_series/src/lstnet.py | 17 | 11583 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
#Todo: Ensure skip connection implementation is correct
import os
import math
import numpy as np
import pandas as pd
import mxnet as mx
import argparse
import logging
import metrics
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Deep neural network for multivariate time series forecasting",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='../data',
help='relative path to input data')
parser.add_argument('--max-records', type=int, default=None,
help='total records before data split')
parser.add_argument('--q', type=int, default=24*7,
help='number of historical measurements included in each training example')
parser.add_argument('--horizon', type=int, default=3,
help='number of measurements ahead to predict')
parser.add_argument('--splits', type=str, default="0.6,0.2",
help='fraction of data to use for train & validation. remainder used for test.')
parser.add_argument('--batch-size', type=int, default=128,
help='the batch size.')
parser.add_argument('--filter-list', type=str, default="6,12,18",
help='unique filter sizes')
parser.add_argument('--num-filters', type=int, default=100,
help='number of each filter size')
parser.add_argument('--recurrent-state-size', type=int, default=100,
help='number of hidden units in each unrolled recurrent cell')
parser.add_argument('--seasonal-period', type=int, default=24,
help='time between seasonal measurements')
parser.add_argument('--time-interval', type=int, default=1,
help='time between each measurement')
parser.add_argument('--gpus', type=str, default='',
help='list of gpus to run, e.g. 0 or 0,2,5. empty means using cpu. ')
parser.add_argument('--optimizer', type=str, default='adam',
help='the optimizer type')
parser.add_argument('--lr', type=float, default=0.001,
help='initial learning rate')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout rate for network')
parser.add_argument('--num-epochs', type=int, default=100,
help='max num of epochs')
parser.add_argument('--save-period', type=int, default=20,
help='save checkpoint for every n epochs')
parser.add_argument('--model_prefix', type=str, default='electricity_model',
help='prefix for saving model params')
def build_iters(data_dir, max_records, q, horizon, splits, batch_size):
"""
Load & generate training examples from multivariate time series data
:return: data iters & variables required to define network architecture
"""
# Read in data as numpy array
df = pd.read_csv(os.path.join(data_dir, "electricity.txt"), sep=",", header=None)
feature_df = df.iloc[:, :].astype(float)
x = feature_df.as_matrix()
x = x[:max_records] if max_records else x
# Construct training examples based on horizon and window
x_ts = np.zeros((x.shape[0] - q, q, x.shape[1]))
y_ts = np.zeros((x.shape[0] - q, x.shape[1]))
for n in range(x.shape[0]):
if n + 1 < q:
continue
elif n + 1 + horizon > x.shape[0]:
continue
else:
y_n = x[n + horizon, :]
x_n = x[n + 1 - q:n + 1, :]
x_ts[n-q] = x_n
y_ts[n-q] = y_n
# Split into training and testing data
training_examples = int(x_ts.shape[0] * splits[0])
valid_examples = int(x_ts.shape[0] * splits[1])
x_train, y_train = x_ts[:training_examples], \
y_ts[:training_examples]
x_valid, y_valid = x_ts[training_examples:training_examples + valid_examples], \
y_ts[training_examples:training_examples + valid_examples]
x_test, y_test = x_ts[training_examples + valid_examples:], \
y_ts[training_examples + valid_examples:]
#build iterators to feed batches to network
train_iter = mx.io.NDArrayIter(data=x_train,
label=y_train,
batch_size=batch_size)
val_iter = mx.io.NDArrayIter(data=x_valid,
label=y_valid,
batch_size=batch_size)
test_iter = mx.io.NDArrayIter(data=x_test,
label=y_test,
batch_size=batch_size)
return train_iter, val_iter, test_iter
def sym_gen(train_iter, q, filter_list, num_filter, dropout, rcells, skiprcells, seasonal_period, time_interval):
input_feature_shape = train_iter.provide_data[0][1]
X = mx.symbol.Variable(train_iter.provide_data[0].name)
Y = mx.sym.Variable(train_iter.provide_label[0].name)
# reshape data before applying convolutional layer (takes 4D shape incase you ever work with images)
conv_input = mx.sym.reshape(data=X, shape=(0, 1, q, -1))
###############
# CNN Component
###############
outputs = []
for i, filter_size in enumerate(filter_list):
# pad input array to ensure number output rows = number input rows after applying kernel
padi = mx.sym.pad(data=conv_input, mode="constant", constant_value=0,
pad_width=(0, 0, 0, 0, filter_size - 1, 0, 0, 0))
convi = mx.sym.Convolution(data=padi, kernel=(filter_size, input_feature_shape[2]), num_filter=num_filter)
acti = mx.sym.Activation(data=convi, act_type='relu')
trans = mx.sym.reshape(mx.sym.transpose(data=acti, axes=(0, 2, 1, 3)), shape=(0, 0, 0))
outputs.append(trans)
cnn_features = mx.sym.Concat(*outputs, dim=2)
cnn_reg_features = mx.sym.Dropout(cnn_features, p=dropout)
###############
# RNN Component
###############
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(rcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
rnn_features = outputs[-1] #only take value from final unrolled cell for use later
####################
# Skip-RNN Component
####################
stacked_rnn_cells = mx.rnn.SequentialRNNCell()
for i, recurrent_cell in enumerate(skiprcells):
stacked_rnn_cells.add(recurrent_cell)
stacked_rnn_cells.add(mx.rnn.DropoutCell(dropout))
outputs, states = stacked_rnn_cells.unroll(length=q, inputs=cnn_reg_features, merge_outputs=False)
# Take output from cells p steps apart
p = int(seasonal_period / time_interval)
output_indices = list(range(0, q, p))
outputs.reverse()
skip_outputs = [outputs[i] for i in output_indices]
skip_rnn_features = mx.sym.concat(*skip_outputs, dim=1)
##########################
# Autoregressive Component
##########################
auto_list = []
for i in list(range(input_feature_shape[2])):
time_series = mx.sym.slice_axis(data=X, axis=2, begin=i, end=i+1)
fc_ts = mx.sym.FullyConnected(data=time_series, num_hidden=1)
auto_list.append(fc_ts)
ar_output = mx.sym.concat(*auto_list, dim=1)
######################
# Prediction Component
######################
neural_components = mx.sym.concat(*[rnn_features, skip_rnn_features], dim=1)
neural_output = mx.sym.FullyConnected(data=neural_components, num_hidden=input_feature_shape[2])
model_output = neural_output + ar_output
loss_grad = mx.sym.LinearRegressionOutput(data=model_output, label=Y)
return loss_grad, [v.name for v in train_iter.provide_data], [v.name for v in train_iter.provide_label]
def train(symbol, train_iter, valid_iter, data_names, label_names):
devs = mx.cpu() if args.gpus is None or args.gpus is '' else [mx.gpu(int(i)) for i in args.gpus.split(',')]
module = mx.mod.Module(symbol, data_names=data_names, label_names=label_names, context=devs)
module.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
module.init_params(mx.initializer.Uniform(0.1))
module.init_optimizer(optimizer=args.optimizer, optimizer_params={'learning_rate': args.lr})
for epoch in range(1, args.num_epochs+1):
train_iter.reset()
val_iter.reset()
for batch in train_iter:
module.forward(batch, is_train=True) # compute predictions
module.backward() # compute gradients
module.update() # update parameters
train_pred = module.predict(train_iter).asnumpy()
train_label = train_iter.label[0][1].asnumpy()
print('\nMetrics: Epoch %d, Training %s' % (epoch, metrics.evaluate(train_pred, train_label)))
val_pred = module.predict(val_iter).asnumpy()
val_label = val_iter.label[0][1].asnumpy()
print('Metrics: Epoch %d, Validation %s' % (epoch, metrics.evaluate(val_pred, val_label)))
if epoch % args.save_period == 0 and epoch > 1:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if epoch == args.num_epochs:
module.save_checkpoint(prefix=os.path.join("../models/", args.model_prefix), epoch=epoch, save_optimizer_states=False)
if __name__ == '__main__':
# parse args
args = parser.parse_args()
args.splits = list(map(float, args.splits.split(',')))
args.filter_list = list(map(int, args.filter_list.split(',')))
# Check valid args
if not max(args.filter_list) <= args.q:
raise AssertionError("no filter can be larger than q")
if not args.q >= math.ceil(args.seasonal_period / args.time_interval):
raise AssertionError("size of skip connections cannot exceed q")
# Build data iterators
train_iter, val_iter, test_iter = build_iters(args.data_dir, args.max_records, args.q, args.horizon, args.splits, args.batch_size)
# Choose cells for recurrent layers: each cell will take the output of the previous cell in the list
rcells = [mx.rnn.GRUCell(num_hidden=args.recurrent_state_size)]
skiprcells = [mx.rnn.LSTMCell(num_hidden=args.recurrent_state_size)]
# Define network symbol
symbol, data_names, label_names = sym_gen(train_iter, args.q, args.filter_list, args.num_filters,
args.dropout, rcells, skiprcells, args.seasonal_period, args.time_interval)
# train cnn model
train(symbol, train_iter, val_iter, data_names, label_names)
| apache-2.0 |
jerryyjr/cosc445_project | scripts/plot-synchronization.py | 4 | 3866 | """
Plots the synchronization between cells
Copyright (C) 2012 Ahmet Ay, Jack Holland, Adriana Sperlea
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import matplotlib.pyplot as plt
import pylab
import sys
import shared
import math
def main():
# check the given arguments
if len(sys.argv) < 6:
usage()
else:
folder = sys.argv[1]
parsets = shared.toInt(sys.argv[2])
ofolder = sys.argv[3]
image_name = sys.argv[4]
excel_name = sys.argv[5]
mutants = ["wildtype", "delta", "her1", "her7", "her7her13", "her13"]
markers = ['o', '^', 's', '*', 'h', 'D']
colors = ['k', 'b', 'g', 'r', 'c', 'm']
num_mutants = 6
# Create excel file in which the data used to create the plots will be stored
excel_file = shared.openFile(ofolder + "/" + excel_name + "-sync.csv", "w")
for index in range(num_mutants):
mutant = mutants[index]
marker = markers[index]
color = colors[index]
# open the first file to get the height, width and interval
f = shared.openFile(folder + "/" + mutant + "/set_0_sync_mh1.feats", "r")
# split the lines to get data
data = [line.split(",") for line in f]
# calculate the tissue size
height = shared.toInt(data[0][0])
interval = shared.toFlo(data[0][1])
#split_time = shared.toFlo(data[0][2])
width = len(data[1]) - 1
indexes = [0 for i in range(width)]
averages = [0 for i in range(width)]
stderr = [0 for i in range(width)]
for parset in range(parsets):
f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_sync_mh1.feats", "r")
# split the lines to get data
data = [line.split(",") for line in f]
for col in range(width):
for line in range(1, height + 1):
averages[col] += shared.toFlo(data[line][col])
f.close()
for col in range(width):
indexes[col] = (((interval / 2) * col + (interval / 2) * col + interval) / 2) / 6
averages[col] /= height * parsets
if mutant == "wildtype":
excel_file.write("mutant,")
for index in indexes:
excel_file.write(str(index) + ",")
excel_file.write("\n")
for parset in range(parsets):
f = shared.openFile(folder + "/" + mutant + "/set_" + str(parset) + "_sync_mh1.feats", "r")
data = [line.split(",") for line in f]
# std error = std deviation / sqrt(num data points)
for col in range(width):
for line in range(1, height + 1):
stderr[col] += (shared.toFlo(data[line][col]) - averages[col]) ** 2
stderr[col] = math.sqrt(stderr[col] / (height * parsets))
stderr[col] /= math.sqrt(height * parsets)
# Print the means and standard deviations to the excel_file
excel_file.write(mutant + ",")
for average in averages:
excel_file.write(str(average) + ",")
excel_file.write("\n,")
for stder in stderr:
excel_file.write(str(stder) + ",")
excel_file.write("\n")
plt.errorbar(indexes, averages, stderr, fmt='ro', linestyle='-', marker=marker, color=color, label=mutant)
plt.legend(prop={'size':8}, loc=3)
pylab.xlim([0, (width + 1) * (interval / 2) / 6])
plt.savefig(ofolder + "/" + image_name + ".png", format = "png")
plt.close()
def usage():
print "Usage: python plot-synchronization.py <directory with synchronization files> <number of parameter sets> <directory to store the image> <name of image> <name of excel file>"
exit(0)
main()
| gpl-3.0 |
amitgroup/parts-net | pnet/bernoulli_mm.py | 1 | 19546 | from __future__ import division, print_function, absolute_import
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from sklearn.base import BaseEstimator
import numpy as np
import random, collections
import scipy.sparse
# Author: Gustav Larsson
# Mark Stoehr <[email protected]>
#
EPS = np.finfo(float).eps
def log_product_of_bernoullis_mixture_likelihood(X, log_odds, log_inv_mean_sums):
"""Log likelihood function for the product of Bernoullis
mixture distribution
"""
return np.dot(X,log_odds.T) + log_inv_mean_sums
def sample_bernoulli(mean, n_samples=1,
random_state=None,
data_type=np.uint8):
"""Generate random samples from a Bernoulli distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.rand(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
return (rand.T < mean).T.astype(data_type)
class BernoulliMM(BaseEstimator):
"""
Bernoulli Mixture model with an EM solver.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
random_state : RandomState or an int seed (0 by default)
A random number generator instance
min_prob : float, optional
Floor for the minimum probability
thresh : float, optional
Convergence threshold.
n_iter : float, optional
Number of EM iterations to perform
n_init : int, optional
Number of random initializations to perform with
the best kept.
params : string, optional
Controls which parameters are updated during training.
If 'w' is in the string then the weights are updated,
and if 'm' is in the string then the means are updated.
The default is 'wm'.
init_params : string, optional
Controls which parameters are updated during initialization.
If 'w' is in the string then the weights are updated,
and if 'm' is in the string then the means are updated.
The default is 'wm'.
float_type : numpy type, optional
What float type to use for the parameter arrays.
Attributes
----------
`weights_` : array, shape (`n_components`,)
Stores the mixing weights for each component
`means_` : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
`converged_` : bool
True when convergence was reached in fit(), False otherwise.
Examples
--------
Create a mixture model with 2 mixture componenents.
>>> import amitgroup as ag
>>> import numpy as np
>>> data = np.array([[1, 1, 0], [0, 0, 1], [1, 1, 1]])
>>> mixture = ag.stats.BernoulliMixture(2, data)
Run the algorithm until specified tolerance.
>>> mixture.run_EM(1e-3)
Display the mixture templates and the corresponding weights.
>>> mixture.templates # doctest: +SKIP
array([[ 0.95 , 0.95 , 0.50010438],
[ 0.05 , 0.05 , 0.95 ]])
>>> mixture.weights # doctest: +SKIP
array([ 0.66671347, 0.33328653])
Display the latent variable, describing what combination of mixture components
a certain data frame came from:
>>> mixture.affinities # doctest: +SKIP
array([[ 9.99861515e-01, 1.38484719e-04],
[ 2.90861524e-03, 9.97091385e-01],
[ 9.97376426e-01, 2.62357439e-03]])
"""
def __init__(self, n_components=1,
random_state=None, thresh=1e-6, min_prob=1e-2, min_num=30,
n_iter=100,tol=1e-6, n_init=1, params='wm', init_params='wm',blocksize=0,
float_type=np.float64,
binary_type=np.uint8, verbose=False):
self.n_components = n_components
self.thresh = thresh
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.float_type = float_type
self.binary_type = binary_type
self.min_prob = min_prob
self.min_num = 30
self.verbose=verbose
# blocksize controls whether we do the likelihood computation in blocks to prevent memory blowup
self.blocksize = blocksize
if self.n_init < 1:
raise ValueError('BernoulliMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components,dtype=self.float_type)/ self.n_components
self.converged_ = False
def score_samples(self, X):
"""Evaluate the model on data
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob: array_like, shape (n_samples,)
Log probabilities of each data point in X
responsibilities: array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
X = np.asarray(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('the shape of X is not compatible with self')
if self.blocksize > 0:
logprob = np.zeros(X.shape[0],dtype=self.float_type)
responsibilities = np.zeros((X.shape[0],self.n_components),dtype=self.float_type)
block_id = 0
for block_id in range(0,X.shape[0],self.blocksize):
blockend = min(X.shape[0],block_id+self.blocksize)
lpr = (log_product_of_bernoullis_mixture_likelihood(X[block_id:blockend], self.log_odds_,
self.log_inv_mean_sums_)
+ np.log(self.weights_))
logprob[block_id:blockend] = logsumexp(lpr, axis=1)
responsibilities[block_id:blockend] = np.exp(lpr - (logprob[block_id:blockend])[:, np.newaxis])
else:
lpr = (log_product_of_bernoullis_mixture_likelihood(X, self.log_odds_,
self.log_inv_mean_sums_)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
data_shape = X.shape[1:]
# flatten data to just be binary vectors
data_length = np.prod(data_shape)
if len(data_shape) > 1:
logprob, responsibilities = self.score_samples(X.reshape(X.shape[0], data_length))
else:
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
X[comp_in_X] = sample_bernoulli(
self.means_[comp],
num_comp_in_X, random_state=random_state,
data_type=self.binary_type).T
return X
def set_means_weights(self,means,weights):
"""
Set the means and the weights of the model so that one can
load a model from being saved.
"""
self.means_ = means
self.weights_ = weights
self.log_odds_, self.log_inv_mean_sums_ = _compute_log_odds_inv_means_sums(self.means_)
def fit(self, X):
"""
Run the EM algorithm to specified convergence.
Parameters
----------
X : array_like, shape (n,) + d
List of data points assumed that the dimensions are such that
`np.prod(X.shape[1:])==n_features`
"""
random_state = check_random_state(self.random_state)
X = np.asarray(X, dtype=self.binary_type)
if X.ndim == 1:
X = X[:, np.newaxis]
if self.verbose:
print('Starting EM with {} samples, {} dimensions and {} classes'.format(X.shape[0], X.shape[1], self.n_components))
data_shape = X.shape[1:]
# flatten data to just be binary vectors
data_length = np.prod(data_shape)
if len(data_shape) > 1:
X = X.reshape(X.shape[0], data_length)
if X.shape[0] < self.n_components:
raise ValueError(
'BernoulliMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
inv_X = 1 - X
max_log_prob = -np.infty
# if debug_plot:
# plw = ag.plot.PlottingWindow(subplots=(1, self.num_mix), figsize=(self.num_mix*3, 3))
for cur_init in range(self.n_init):
if self.verbose:
print("Current parameter initialization: {0}".format(cur_init))
if 'm' in self.init_params or not hasattr(self,'means_'):
if self.verbose:
print("Initializing means")
if 0:
indices = np.arange(X.shape[0])
random_state.shuffle(indices)
self.means_ = np.array(tuple(
np.clip(X[indices[i::self.n_components]].mean(0),
self.min_prob,
1-self.min_prob)
for i in range(self.n_components)))
repr_samples = X[random_state.choice(X.shape[0], self.n_components, replace=False)]
#self.means_ = repr_samples.clip(self.min_prob, 1 - self.min_prob)
self.means_ = repr_samples.clip(0.2, 0.8)
self.log_odds_, self.log_inv_mean_sums_ = _compute_log_odds_inv_means_sums(self.means_)
if 'w' in self.init_params or not hasattr(self,'weights_'):
if self.verbose:
print("Initializing weights")
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
log_likelihood = []
self.iterations = 0
self.converged_ = False
for i in range(self.n_iter):
# Expectation Step
curr_log_likelihood, responsibilities = self.score_samples(X)
log_likelihood.append(curr_log_likelihood.sum())
if self.verbose:
print("Iteration {0}: loglikelihood {1}".format(i, log_likelihood[-1]))
# check for convergence
if i > 0 and abs(log_likelihood[-1] - log_likelihood[-2])/abs(log_likelihood[-2]) < \
self.thresh:
self.converged_ = True
break
# ag.info("Iteration {0}: loglikelihood {1}".format(self.iterations, loglikelihood))
# maximization step
self._do_mstep(X,
responsibilities,
self.params,
self.min_prob)
if self.n_iter:
if log_likelihood[-1] > max_log_prob:
if self.verbose:
print("updated best params for {0}".format(self.score(X).sum()))
max_log_prob = log_likelihood[-1]
best_params = {'weights': self.weights_,
'means' : self.means_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if len(data_shape) > 1:
X = X.reshape(*( (X.shape[0],) + data_shape))
if self.n_iter:
self.means_ = best_params['means']
self.log_odds_, self.log_inv_mean_sums_ = _compute_log_odds_inv_means_sums(self.means_)
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_prob=1e-7):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
if self.blocksize > 0:
weighted_X_sum=np.zeros((weights.shape[0],X.shape[1]),dtype=self.float_type)
for blockstart in range(0,X.shape[0],self.blocksize):
blockend=min(X.shape[0],blockstart+self.blocksize)
res = responsibilities[blockstart:blockend].T
weighted_X_sum += np.dot(res,X[blockstart:blockend])
else:
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = np.clip(weighted_X_sum * inverse_weights,min_prob,1-min_prob)
self.log_odds_, self.log_inv_mean_sums_ = _compute_log_odds_inv_means_sums(self.means_)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model"""
ndim = self.means_.shape[1]
mean_params = ndim * self.n_components
return int(mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
def cluster_underlying_data(self,Z,X=None,
responsibilities=None):
"""In cases where binary codes for underlying data are clustered
and we wish to recover clusters from the underlying data based
on the clustering of binary codes
Parameters
----------
Z : array of shape(n_samples, k)
List of k-dimensional data points. Each row
corresponds to a single data point. X is the binary codes for these
data, or the responsibilities are a posterior distribution over the classes
that generated the data.
X : array of shape(n_samples, n_dimensions), optional
List of n_dimensions-dimensional data points. Each row
corresponds to a single data point. Should be defined
if responsibilities is None. Should be binary data
responsibilities : array of shape(n_samples, n_components)
Should be defined if X is None, posterior distribution over
the n_components for each data point.
Returns
"""
# check that the number of data points matches the number
# of data estimated
if X is None:
if responsibilities is None:
raise RuntimeError("no binary data provided")
else:
responsibilities = self.predict_proba(X)
responsibilities = responsibilities.T
underlying_clusters = np.dot(responsibilities,
Z.reshape(Z.shape[0],np.prod(Z.shape[1:]))) / np.lib.stride_tricks.as_strided(responsibilities.sum(1),
shape=(responsibilities.shape[0],
np.prod(Z.shape[1:])),
strides=(responsibilities.strides[0],0))
return underlying_clusters
def _compute_log_odds_inv_means_sums(means):
"""Compute the log odds, and the sums over the log inverse means
to enable fast likelihood computation
"""
log_inv_means = np.log(1-means)
return np.log(means) - log_inv_means, log_inv_means.sum(-1)
| bsd-3-clause |
dchilds7/Deysha-Star-Formation | vispy/visuals/isocurve.py | 18 | 7809 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import numpy as np
from .line import LineVisual
from ..color import ColorArray
from ..color.colormap import _normalize, get_colormap
from ..geometry.isocurve import isocurve
from ..testing import has_matplotlib
# checking for matplotlib
_HAS_MPL = has_matplotlib()
if _HAS_MPL:
from matplotlib import _cntr as cntr
class IsocurveVisual(LineVisual):
"""Displays an isocurve of a 2D scalar array.
Parameters
----------
data : ndarray | None
2D scalar array.
levels : ndarray, shape (Nlev,) | None
The levels at which the isocurve is constructed from "*data*".
color_lev : Color, colormap name, tuple, list or array
The color to use when drawing the line. If a list is given, it
must be of shape (Nlev), if an array is given, it must be of
shape (Nlev, ...). and provide one color per level (rgba, colorname).
clim : tuple
(min, max) limits to apply when mapping level values through a
colormap.
**kwargs : dict
Keyword arguments to pass to `LineVisual`.
Notes
-----
"""
def __init__(self, data=None, levels=None, color_lev=None, clim=None,
**kwargs):
self._data = None
self._levels = levels
self._color_lev = color_lev
self._clim = clim
self._need_color_update = True
self._need_level_update = True
self._need_recompute = True
self._X = None
self._Y = None
self._iso = None
self._level_min = None
self._data_is_uniform = False
self._lc = None
self._cl = None
self._li = None
self._connect = None
self._verts = None
kwargs['method'] = 'gl'
kwargs['antialias'] = False
LineVisual.__init__(self, **kwargs)
if data is not None:
self.set_data(data)
@property
def levels(self):
""" The threshold at which the isocurve is constructed from the
2D data.
"""
return self._levels
@levels.setter
def levels(self, levels):
self._levels = levels
self._need_level_update = True
self._need_recompute = True
self.update()
@property
def color(self):
return self._color_lev
@color.setter
def color(self, color):
self._color_lev = color
self._need_level_update = True
self._need_color_update = True
self.update()
def set_data(self, data):
""" Set the scalar array data
Parameters
----------
data : ndarray
A 2D array of scalar values. The isocurve is constructed to show
all locations in the scalar field equal to ``self.levels``.
"""
self._data = data
# if using matplotlib isoline algorithm we have to check for meshgrid
# and we can setup the tracer object here
if _HAS_MPL:
if self._X is None or self._X.T.shape != data.shape:
self._X, self._Y = np.meshgrid(np.arange(data.shape[0]),
np.arange(data.shape[1]))
self._iso = cntr.Cntr(self._X, self._Y, self._data.astype(float))
if self._clim is None:
self._clim = (data.min(), data.max())
# sanity check,
# should we raise an error here, since no isolines can be drawn?
# for now, _prepare_draw returns False if no isoline can be drawn
if self._data.min() != self._data.max():
self._data_is_uniform = False
else:
self._data_is_uniform = True
self._need_recompute = True
self.update()
def _get_verts_and_connect(self, paths):
""" retrieve vertices and connects from given paths-list
"""
verts = np.vstack(paths)
gaps = np.add.accumulate(np.array([len(x) for x in paths])) - 1
connect = np.ones(gaps[-1], dtype=bool)
connect[gaps[:-1]] = False
return verts, connect
def _compute_iso_line(self):
""" compute LineVisual vertices, connects and color-index
"""
level_index = []
connects = []
verts = []
# calculate which level are within data range
# this works for now and the existing examples, but should be tested
# thoroughly also with the data-sanity check in set_data-function
choice = np.nonzero((self.levels > self._data.min()) &
(self._levels < self._data.max()))
levels_to_calc = np.array(self.levels)[choice]
# save minimum level index
self._level_min = choice[0][0]
for level in levels_to_calc:
# if we use matplotlib isoline algorithm we need to add half a
# pixel in both (x,y) dimensions because isolines are aligned to
# pixel centers
if _HAS_MPL:
nlist = self._iso.trace(level, level, 0)
paths = nlist[:len(nlist)//2]
v, c = self._get_verts_and_connect(paths)
v += np.array([0.5, 0.5])
else:
paths = isocurve(self._data.astype(float).T, level,
extend_to_edge=True, connected=True)
v, c = self._get_verts_and_connect(paths)
level_index.append(v.shape[0])
connects.append(np.hstack((c, [False])))
verts.append(v)
self._li = np.hstack(level_index)
self._connect = np.hstack(connects)
self._verts = np.vstack(verts)
def _compute_iso_color(self):
""" compute LineVisual color from level index and corresponding color
"""
level_color = []
colors = self._lc
for i, index in enumerate(self._li):
level_color.append(np.zeros((index, 4)) +
colors[i+self._level_min])
self._cl = np.vstack(level_color)
def _levels_to_colors(self):
# computes ColorArrays for given levels
# try _color_lev as colormap, except as everything else
try:
f_color_levs = get_colormap(self._color_lev)
except:
colors = ColorArray(self._color_lev).rgba
else:
lev = _normalize(self._levels, self._clim[0], self._clim[1])
# map function expects (Nlev,1)!
colors = f_color_levs.map(lev[:, np.newaxis])
# broadcast to (nlev, 4) array
if len(colors) == 1:
colors = colors * np.ones((len(self._levels), 1))
# detect color_lev/levels mismatch and raise error
if (len(colors) != len(self._levels)):
raise TypeError("Color/level mismatch. Color must be of shape "
"(Nlev, ...) and provide one color per level")
self._lc = colors
def _prepare_draw(self, view):
if (self._data is None or self._levels is None or
self._color_lev is None or self._data_is_uniform):
return False
if self._need_level_update:
self._levels_to_colors()
self._need_level_update = False
if self._need_recompute:
self._compute_iso_line()
self._compute_iso_color()
LineVisual.set_data(self, pos=self._verts, connect=self._connect,
color=self._cl)
self._need_recompute = False
if self._need_color_update:
self._compute_iso_color()
LineVisual.set_data(self, color=self._cl)
self._need_color_update = False
return LineVisual._prepare_draw(self, view)
| bsd-3-clause |
petricm/DIRAC | Core/Utilities/Graphs/PieGraph.py | 4 | 5568 | ########################################################################
# $HeadURL$
########################################################################
""" PieGraph represents a pie graph
The DIRAC Graphs package is derived from the GraphTool plotting package of the
CMS/Phedex Project by ... <to be added>
"""
from __future__ import print_function
__RCSID__ = "$Id$"
import numpy, math, time
from matplotlib.patches import Wedge, Shadow
from matplotlib.cbook import is_string_like
from DIRAC.Core.Utilities.Graphs.PlotBase import PlotBase
from DIRAC.Core.Utilities.Graphs.GraphData import GraphData
from DIRAC.Core.Utilities.Graphs.GraphUtilities import *
class PieGraph( PlotBase ):
def __init__( self, data, ax, prefs, *args, **kw ):
PlotBase.__init__( self, data, ax, prefs, *args, **kw )
self.pdata = data
def pie( self, explode = None,
colors = None,
autopct = None,
pctdistance = 0.6,
shadow = False
):
start = time.time()
labels = self.pdata.getLabels()
if labels[0][0] == "NoLabels":
try:
self.pdata.initialize(key_type='string')
self.pdata.sortLabels()
labels = self.pdata.getLabels()
nLabels = self.pdata.getNumberOfLabels()
explode = [0.] * nLabels
if nLabels > 0:
explode[0] = 0.1
except Exception as x:
print("PieGraph Error: can not interpret data for the plot")
#labels.reverse()
values = [l[1] for l in labels]
x = numpy.array( values, numpy.float64 )
self.legendData = labels
sx = float( numpy.sum( x ) )
if sx > 1: x = numpy.divide( x, sx )
labels = [l[0] for l in labels]
if explode is None: explode = [0] * len( x )
assert( len( x ) == len( labels ) )
assert( len( x ) == len( explode ) )
plot_axis_labels = self.prefs.get( 'plot_axis_labels', True )
center = 0, 0
radius = 1.1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in zip( x, labels, explode ):
x, y = center
theta2 = theta1 + frac
thetam = 2 * math.pi * 0.5 * ( theta1 + theta2 )
x += expl * math.cos( thetam )
y += expl * math.sin( thetam )
color = self.palette.getColor( label )
w = Wedge( ( x, y ), radius, 360. * theta1, 360. * theta2,
facecolor = color,
lw = pixelToPoint( 0.5, self.dpi ),
edgecolor = '#999999' )
slices.append( w )
self.ax.add_patch( w )
w.set_label( label )
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = Shadow( w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder( 0.9 * w.get_zorder() )
self.ax.add_patch( shad )
if plot_axis_labels:
if frac > 0.03:
xt = x + 1.05 * radius * math.cos( thetam )
yt = y + 1.05 * radius * math.sin( thetam )
thetam %= 2 * math.pi
if 0 < thetam and thetam < math.pi:
valign = 'bottom'
elif thetam == 0 or thetam == math.pi:
valign = 'center'
else:
valign = 'top'
if thetam > math.pi / 2.0 and thetam < 3.0 * math.pi / 2.0:
halign = 'right'
elif thetam == math.pi / 2.0 or thetam == 3.0 * math.pi / 2.0:
halign = 'center'
else:
halign = 'left'
t = self.ax.text( xt, yt, label,
size = pixelToPoint( self.prefs['subtitle_size'], self.dpi ),
horizontalalignment = halign,
verticalalignment = valign )
t.set_family( self.prefs['font_family'] )
t.set_fontname( self.prefs['font'] )
t.set_size( pixelToPoint( self.prefs['text_size'], self.dpi ) )
texts.append( t )
if autopct is not None:
xt = x + pctdistance * radius * math.cos( thetam )
yt = y + pctdistance * radius * math.sin( thetam )
if is_string_like( autopct ):
s = autopct % ( 100. * frac )
elif callable( autopct ):
s = autopct( 100. * frac )
else:
raise TypeError( 'autopct must be callable or a format string' )
t = self.ax.text( xt, yt, s,
horizontalalignment = 'center',
verticalalignment = 'center' )
t.set_family( self.prefs['font_family'] )
t.set_fontname( self.prefs['font'] )
t.set_size( pixelToPoint( self.prefs['text_size'], self.dpi ) )
autotexts.append( t )
theta1 = theta2
i += 1
self.legendData.reverse()
self.ax.set_xlim( ( -1.25, 1.25 ) )
self.ax.set_ylim( ( -1.25, 1.25 ) )
self.ax.set_axis_off()
if autopct is None: return slices, texts
else: return slices, texts, autotexts
min_amount = .1
def getLegendData( self ):
return self.legendData
def draw( self ):
self.ylabel = ''
self.prefs['square_axis'] = True
PlotBase.draw( self )
def my_display( x ):
if x > 100 * self.min_amount:
return '%.1f' % x + '%'
else:
return ""
nLabels = self.pdata.getNumberOfLabels()
explode = [0.] * nLabels
if nLabels > 0:
explode[0] = 0.1
self.wedges, text_labels, percent = self.pie( explode = explode, autopct = my_display )
| gpl-3.0 |
yafeunteun/wikipedia-spam-classifier | revscoring/revscoring/utilities/tune.py | 1 | 9729 | """
Tunes a set of models against a training set to identify the best
model/configuration.
Usage:
tune <params-config> <features> <label>
[--observations=<path>]
[--scoring=<type>]
[--test-prop=<prop>]
[--folds=<num>]
[--report=<path>]
[--label-type=<type>]
[--processes=<num>]
[--cv-timeout=<mins>]
[--scale-features]
[--verbose] [--debug]
Options:
<params-config> The path to a YAML configuration file containing the
models and parameter values to search when tuning
<features> The classpath to a feature_list to use when
interpreting the feature values of the observations
<label> The name of the field to be predicted
--observations=<path> The path to a file containing observations to train
and test against. [default: <stdin>]
--scoring=<type> The type of scoring strategy to optimize for when
choosing parameter sets [default: roc_auc]
--folds=<num> The number of cross-validation folds to try
[default: 5]
--report=<path> Path to a file to write the tuning report to
[default: <stdout>]
--processes=<num> The number of parallel processes to start for
model building [default: <cpu-count>]
--cv-timeout=<mins> The number of minutes to wait for a model to
cross-validate before timing out
[default: <forever>]
--scale-features Scales the feature values before tuning
--verbose Print progress information to stderr
--debug Print debug information to stderr
"""
import datetime
import json
import logging
import multiprocessing
import sys
import time
import traceback
from collections import defaultdict
import docopt
import numpy
import yamlconf
from sklearn import cross_validation, grid_search, preprocessing
from tabulate import tabulate
from . import metrics
from .. import __version__
from ..dependencies import solve
from .util import Timeout, read_observations
logger = logging.getLogger(__name__)
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.INFO if not args['--debug'] else logging.DEBUG,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
params_config = yamlconf.load(open(args['<params-config>']))
features_path = args['<features>']
features = yamlconf.import_path(features_path)
if args['--observations'] == "<stdin>":
observations = read_observations(sys.stdin)
else:
observations = read_observations(open(args['--observations']))
logger.info("Reading feature values & labels...")
label_name = args['<label>']
value_labels = \
[(list(solve(features, cache=ob['cache'])), ob[label_name])
for ob in observations]
# Get a sepecialized scorer if we have one
scoring = metrics.SCORERS.get(args['--scoring'], args['--scoring'])
folds = int(args['--folds'])
if args['--report'] == "<stdout>":
report = sys.stdout
else:
report = open(args['--report'], "w")
if args['--processes'] == "<cpu-count>":
processes = multiprocessing.cpu_count()
else:
processes = int(args['--processes'])
if args['--cv-timeout'] == "<forever>":
cv_timeout = None
else:
cv_timeout = float(args['--cv-timeout']) * 60 # Convert to seconds
scale_features = args['--scale-features']
verbose = args['--verbose']
run(params_config, features_path, value_labels, scoring, folds,
report, processes, cv_timeout, scale_features, verbose)
def run(params_config, features_path, value_labels, scoring, folds,
report, processes, cv_timeout, scale_features, verbose):
if scale_features:
logger.debug("Scaling features...")
ss = preprocessing.StandardScaler()
feature_values, labels = (list(vect) for vect in zip(*value_labels))
scaled_feature_values = ss.fit_transform(feature_values)
value_labels = list(zip(scaled_feature_values, labels))
# Prepare the worker pool
logger.debug("Starting up multiprocessing pool (processes={0})"
.format(processes))
pool = multiprocessing.Pool(processes=processes)
# Start writing the model tuning report
possible_labels = set(label for _, label in value_labels)
report.write("# Model tuning report\n")
report.write("- Revscoring version: {0}\n".format(__version__))
report.write("- Features: {0}\n".format(features_path))
report.write("- Date: {0}\n".format(datetime.datetime.now().isoformat()))
report.write("- Observations: {0}\n".format(len(value_labels)))
report.write("- Labels: {0}\n".format(json.dumps(list(possible_labels))))
report.write("- Scoring: {0}\n".format(scoring))
report.write("- Folds: {0}\n".format(folds))
report.write("\n")
# For each estimator and paramset, submit the job.
cv_result_sets = defaultdict(lambda: [])
for name, estimator, param_grid in _estimator_param_grid(params_config):
logger.debug("Submitting jobs for {0}:".format(name))
for params in param_grid:
logger.debug("\tsubmitting {0}..."
.format(format_params(params)))
result = pool.apply_async(_cross_validate,
[value_labels, estimator, params],
{'cv_timeout': cv_timeout,
'scoring': scoring, 'folds': folds})
cv_result_sets[name].append((params, result))
# Barrier synchronization
logger.info("Running gridsearch for {0} model/params pairs ..."
.format(sum(len(p_r) for p_r in cv_result_sets)))
grid_scores = []
for name, param_results in cv_result_sets.items():
for params, result in param_results:
scores = result.get() # This is a line that blocks
grid_scores.append((name, params, scores.mean(), scores.std()))
# Write the rest of the report! First, print the top 10 combinations
report.write("# Top scoring configurations\n")
grid_scores.sort(key=lambda gs: gs[2], reverse=True)
table = tabulate(
((name, round(mean_score, 3), round(std_score, 3),
format_params(params))
for name, params, mean_score, std_score in
grid_scores[:10]),
headers=["model", "mean(scores)", "std(scores)", "params"],
tablefmt="pipe"
)
report.write(table + "\n")
report.write("\n")
# Now print out scores for each model.
report.write("# Models\n")
for name, param_results in cv_result_sets.items():
report.write("## {0}\n".format(name))
param_scores = ((p, r.get()) for p, r in param_results)
param_stats = [(p, s.mean(), s.std()) for p, s in param_scores]
param_stats.sort(key=lambda v: v[1], reverse=True)
table = tabulate(
((round(mean_score, 3), round(std_score, 3),
format_params(params))
for params, mean_score, std_score in
param_stats),
headers=["mean(scores)", "std(scores)", "params"],
tablefmt="pipe"
)
report.write(table + "\n")
report.write("\n")
report.close()
def format_params(doc):
return ", ".join("{0}={1}".format(k, json.dumps(v))
for k, v in doc.items())
def _estimator_param_grid(params_config):
for name, config in params_config.items():
try:
EstimatorClass = yamlconf.import_module(config['class'])
estimator = EstimatorClass()
except Exception:
logger.warn("Could not load estimator {0}"
.format(config['class']))
logger.warn("Exception:\n" + traceback.format_exc())
continue
if not hasattr(estimator, "fit"):
logger.warn("Estimator {0} does not have a fit() method."
.format(config['class']))
continue
param_grid = grid_search.ParameterGrid(config['params'])
yield name, estimator, param_grid
def _cross_validate(value_labels, estimator, params, scoring="roc_auc",
folds=5, cv_timeout=None, verbose=False):
start = time.time()
feature_values, labels = (list(vect) for vect in zip(*value_labels))
estimator.set_params(**params)
try:
logger.debug("Running cross-validation for " +
"{0} with timeout of {1} seconds"
.format(estimator.__class__.__name__, cv_timeout))
with Timeout(cv_timeout):
scores = cross_validation.cross_val_score(
estimator, feature_values,
labels, scoring=scoring,
cv=folds)
duration = time.time() - start
logger.debug("Cross-validated {0} with {1} in {2} minutes: {3} ({4})"
.format(estimator.__class__.__name__,
format_params(params),
round(duration / 60, 3),
round(scores.mean(), 3),
round(scores.std(), 3)))
return scores
except Exception:
logger.warn("Could not cross-validate estimator {0}"
.format(estimator.__class__.__name__))
logger.warn("Exception:\n" + traceback.format_exc())
return numpy.array([0] * folds)
| mit |
hlin117/statsmodels | statsmodels/examples/try_polytrend.py | 33 | 1477 |
from __future__ import print_function
import numpy as np
#import statsmodels.linear_model.regression as smreg
from scipy import special
import statsmodels.api as sm
from statsmodels.datasets.macrodata import data
dta = data.load()
gdp = np.log(dta.data['realgdp'])
from numpy import polynomial
from scipy import special
maxorder = 20
polybase = special.chebyt
polybase = special.legendre
t = np.linspace(-1,1,len(gdp))
exog = np.column_stack([polybase(i)(t) for i in range(maxorder)])
fitted = [sm.OLS(gdp, exog[:, :maxr]).fit().fittedvalues for maxr in
range(2,maxorder)]
print((np.corrcoef(exog[:,1:6], rowvar=0)*10000).astype(int))
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gdp, 'o')
for i in range(maxorder-2):
plt.plot(fitted[i])
plt.figure()
#plt.plot(gdp, 'o')
for i in range(maxorder-4, maxorder-2):
#plt.figure()
plt.plot(gdp - fitted[i])
plt.title(str(i+2))
plt.figure()
plt.plot(gdp, '.')
plt.plot(fitted[-1], lw=2, color='r')
plt.plot(fitted[0], lw=2, color='g')
plt.title('GDP and Polynomial Trend')
plt.figure()
plt.plot(gdp - fitted[-1], lw=2, color='r')
plt.plot(gdp - fitted[0], lw=2, color='g')
plt.title('Residual GDP minus Polynomial Trend (green: linear, red: legendre(20))')
#orthonormalize an exog using QR
ex2 = t[:,None]**np.arange(6) #np.vander has columns reversed
q2,r2 = np.linalg.qr(ex2, mode='full')
np.max(np.abs(np.dot(q2.T, q2)-np.eye(6)))
plt.figure()
plt.plot(q2, lw=2)
plt.show()
| bsd-3-clause |
Debaq/Triada | FullAxis_GUI/DB/BASE DE DATOS EXPERIMENTO/experimento 3/barbara hicks/medidor3.py | 27 | 3052 | import argparse
import sys
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import numpy as np
import json
parser = argparse.ArgumentParser(description="Does some awesome things.")
parser.add_argument('message', type=str, help="pass a message into the script")
args = parser.parse_args(sys.argv[1:])
data = []
New_data=[]
dt=[]
with open(args.message) as json_file:
data = json.load(json_file)
def graph(grid,d_tiempo):
plt.switch_backend('TkAgg') #default on my system
f = plt.figure(num=args.message, figsize=(20,15))
mng = plt._pylab_helpers.Gcf.figs.get(f.number, None)
print(New_data)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.title(args.message)
if grid == 1:
tempo = d_tiempo
tempo_init = tempo[0]
tempo_end = tempo[-1]
gs1 = GridSpec(4, 1)
gs1.update(left=0.05, right=0.95, wspace=0.5, hspace=0.3, bottom=0.08)
ax1 = plt.subplot(gs1[0, :])
ax1.grid()
ax1.set_ylabel('Pitch',fontsize=8)
if grid ==1:
L1 = ax1.plot(d_tiempo,New_data['pitch'])
else:
L1 = ax1.plot(d_tiempo,data['pitch'])
ax2 = plt.subplot(gs1[1, :])
ax2.grid()
ax2.set_ylabel('Roll',fontsize=8)
if grid ==1:
L1 = ax2.plot(d_tiempo,New_data['roll'])
else:
L1 = ax2.plot(d_tiempo,data['roll'])
ax3 = plt.subplot(gs1[2, :])
ax3.grid()
ax3.set_ylabel('Yaw',fontsize=8)
if grid ==1:
L1 = ax3.plot(d_tiempo,New_data['yaw'])
else:
L1 = ax3.plot(d_tiempo,data['yaw'])
ax4 = plt.subplot(gs1[3, :])
ax4.grid()
ax4.set_ylabel('Tiempo',fontsize=8)
if grid ==1:
L1 = ax4.plot(d_tiempo,New_data['ledblue'])
L2 = ax4.plot(d_tiempo,New_data['ledred'])
else:
L1 = ax4.plot(d_tiempo,data['ledblue'])
L2 = ax4.plot(d_tiempo,data['ledred'])
plt.show()
def find_nearest(array,values):
idx = np.abs(np.subtract.outer(array, values)).argmin(0)
return idx
def corte(init_cut,end_cut,a,b,c,d,e,f,g,h,i):
a=a[init_cut:end_cut]
b=b[init_cut:end_cut]
c=c[init_cut:end_cut]
d=d[init_cut:end_cut]
e=e[init_cut:end_cut]
f=f[init_cut:end_cut]
g=g[init_cut:end_cut]
h=h[init_cut:end_cut]
i=i[init_cut:end_cut]
datos={'roll':a,'pitch':b,'yaw':c, 'X':d, 'Y':e, 'Z':f,'time':g, 'ledblue':h, 'ledred':i}
return datos
def reset_tempo(var_in,var_out):
uni = var_in[0]
for t in range(0,len(var_in)):
var_out.append(round((var_in[t]-uni),3))
return var_out
graph(0,data['time'])
init_cut = float(input("tiempo inicial: "))
init_cuty = find_nearest(data['time'],init_cut)
end_cut = float(input("tiempo final: "))
end_cuty = find_nearest(data['time'],end_cut)
New_data=corte(init_cuty,end_cuty,data['pitch'],data['roll'],data['yaw'],data['X'],data['Y'],data['Z'],data['time'],data['ledblue'],data['ledred'])
data = []
print(data)
data = New_data
print(data)
dt = reset_tempo(New_data['time'],dt)
graph(0,dt)
| gpl-3.0 |
mehdidc/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
wchan/tensorflow | tensorflow/examples/skflow/boston.py | 1 | 1609 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import datasets, cross_validation, metrics
from sklearn import preprocessing
from tensorflow.contrib import skflow
# Load dataset
boston = datasets.load_boston()
X, y = boston.data, boston.target
# Split dataset into train / test
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y,
test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10, 10],
steps=5000, learning_rate=0.1, batch_size=1)
# Fit
regressor.fit(X_train, y_train)
# Predict and score
score = metrics.mean_squared_error(regressor.predict(scaler.fit_transform(X_test)), y_test)
print('MSE: {0:f}'.format(score))
| apache-2.0 |
rew4332/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 30 | 4727 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.learn.python.learn.dataframe.queues.feeding_functions as ff
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(tf.test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with tf.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with tf.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with tf.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with tf.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mne-tools/mne-tools.github.io | dev/_downloads/cae85de1d2b532e063fb12463baa0fca/publication_figure.py | 10 | 11215 | """
.. _ex-publication-figure:
===================================
Make figures more publication ready
===================================
In this example, we show several use cases to take MNE plots and
customize them for a more publication-ready look.
"""
# Authors: Eric Larson <[email protected]>
# Daniel McCloy <[email protected]>
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
###############################################################################
# Imports
# -------
# We are importing everything we need for this example:
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import (make_axes_locatable, ImageGrid,
inset_locator)
import mne
###############################################################################
# Evoked plot with brain activation
# ---------------------------------
#
# Suppose we want a figure with an evoked plot on top, and the brain activation
# below, with the brain subplot slightly bigger than the evoked plot. Let's
# start by loading some :ref:`example data <sample-dataset>`.
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_stc = op.join(data_path, 'MEG', 'sample', 'sample_audvis-meg-eeg-lh.stc')
fname_evoked = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
evoked = mne.read_evokeds(fname_evoked, 'Left Auditory')
evoked.pick_types(meg='grad').apply_baseline((None, 0.))
max_t = evoked.get_peak()[1]
stc = mne.read_source_estimate(fname_stc)
###############################################################################
# During interactive plotting, we might see figures like this:
evoked.plot()
stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample',
subjects_dir=subjects_dir, initial_time=max_t,
time_viewer=False, show_traces=False)
###############################################################################
# To make a publication-ready figure, first we'll re-plot the brain on a white
# background, take a screenshot of it, and then crop out the white margins.
# While we're at it, let's change the colormap, set custom colormap limits and
# remove the default colorbar (so we can add a smaller, vertical one later):
colormap = 'viridis'
clim = dict(kind='value', lims=[4, 8, 12])
# Plot the STC, get the brain image, crop it:
brain = stc.plot(views='lat', hemi='split', size=(800, 400), subject='sample',
subjects_dir=subjects_dir, initial_time=max_t, background='w',
colorbar=False, clim=clim, colormap=colormap,
time_viewer=False, show_traces=False)
screenshot = brain.screenshot()
brain.close()
###############################################################################
# Now let's crop out the white margins and the white gap between hemispheres.
# The screenshot has dimensions ``(h, w, 3)``, with the last axis being R, G, B
# values for each pixel, encoded as integers between ``0`` and ``255``. ``(255,
# 255, 255)`` encodes a white pixel, so we'll detect any pixels that differ
# from that:
nonwhite_pix = (screenshot != 255).any(-1)
nonwhite_row = nonwhite_pix.any(1)
nonwhite_col = nonwhite_pix.any(0)
cropped_screenshot = screenshot[nonwhite_row][:, nonwhite_col]
# before/after results
fig = plt.figure(figsize=(4, 4))
axes = ImageGrid(fig, 111, nrows_ncols=(2, 1), axes_pad=0.5)
for ax, image, title in zip(axes, [screenshot, cropped_screenshot],
['Before', 'After']):
ax.imshow(image)
ax.set_title('{} cropping'.format(title))
###############################################################################
# A lot of figure settings can be adjusted after the figure is created, but
# many can also be adjusted in advance by updating the
# :data:`~matplotlib.rcParams` dictionary. This is especially useful when your
# script generates several figures that you want to all have the same style:
# Tweak the figure style
plt.rcParams.update({
'ytick.labelsize': 'small',
'xtick.labelsize': 'small',
'axes.labelsize': 'small',
'axes.titlesize': 'medium',
'grid.color': '0.75',
'grid.linestyle': ':',
})
###############################################################################
# Now let's create our custom figure. There are lots of ways to do this step.
# Here we'll create the figure and the subplot axes in one step, specifying
# overall figure size, number and arrangement of subplots, and the ratio of
# subplot heights for each row using :mod:`GridSpec keywords
# <matplotlib.gridspec>`. Other approaches (using
# :func:`~matplotlib.pyplot.subplot2grid`, or adding each axes manually) are
# shown commented out, for reference.
# sphinx_gallery_thumbnail_number = 4
# figsize unit is inches
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(4.5, 3.),
gridspec_kw=dict(height_ratios=[3, 4]))
# alternate way #1: using subplot2grid
# fig = plt.figure(figsize=(4.5, 3.))
# axes = [plt.subplot2grid((7, 1), (0, 0), rowspan=3),
# plt.subplot2grid((7, 1), (3, 0), rowspan=4)]
# alternate way #2: using figure-relative coordinates
# fig = plt.figure(figsize=(4.5, 3.))
# axes = [fig.add_axes([0.125, 0.58, 0.775, 0.3]), # left, bot., width, height
# fig.add_axes([0.125, 0.11, 0.775, 0.4])]
# we'll put the evoked plot in the upper axes, and the brain below
evoked_idx = 0
brain_idx = 1
# plot the evoked in the desired subplot, and add a line at peak activation
evoked.plot(axes=axes[evoked_idx])
peak_line = axes[evoked_idx].axvline(max_t, color='#66CCEE', ls='--')
# custom legend
axes[evoked_idx].legend(
[axes[evoked_idx].lines[0], peak_line], ['MEG data', 'Peak time'],
frameon=True, columnspacing=0.1, labelspacing=0.1,
fontsize=8, fancybox=True, handlelength=1.8)
# remove the "N_ave" annotation
axes[evoked_idx].texts = []
# Remove spines and add grid
axes[evoked_idx].grid(True)
axes[evoked_idx].set_axisbelow(True)
for key in ('top', 'right'):
axes[evoked_idx].spines[key].set(visible=False)
# Tweak the ticks and limits
axes[evoked_idx].set(
yticks=np.arange(-200, 201, 100), xticks=np.arange(-0.2, 0.51, 0.1))
axes[evoked_idx].set(
ylim=[-225, 225], xlim=[-0.2, 0.5])
# now add the brain to the lower axes
axes[brain_idx].imshow(cropped_screenshot)
axes[brain_idx].axis('off')
# add a vertical colorbar with the same properties as the 3D one
divider = make_axes_locatable(axes[brain_idx])
cax = divider.append_axes('right', size='5%', pad=0.2)
cbar = mne.viz.plot_brain_colorbar(cax, clim, colormap, label='Activation (F)')
# tweak margins and spacing
fig.subplots_adjust(
left=0.15, right=0.9, bottom=0.01, top=0.9, wspace=0.1, hspace=0.5)
# add subplot labels
for ax, label in zip(axes, 'AB'):
ax.text(0.03, ax.get_position().ymax, label, transform=fig.transFigure,
fontsize=12, fontweight='bold', va='top', ha='left')
###############################################################################
# Custom timecourse with montage inset
# ------------------------------------
#
# Suppose we want a figure with some mean timecourse extracted from a number of
# sensors, and we want a smaller panel within the figure to show a head outline
# with the positions of those sensors clearly marked.
# If you are familiar with MNE, you know that this is something that
# :func:`mne.viz.plot_compare_evokeds` does, see an example output in
# :ref:`ex-hf-sef-data` at the bottom.
#
# In this part of the example, we will show you how to achieve this result on
# your own figure, without having to use :func:`mne.viz.plot_compare_evokeds`!
#
# Let's start by loading some :ref:`example data <sample-dataset>`.
data_path = mne.datasets.sample.data_path()
fname_raw = op.join(data_path, "MEG", "sample", "sample_audvis_raw.fif")
raw = mne.io.read_raw_fif(fname_raw)
# For the sake of the example, we focus on EEG data
raw.pick_types(meg=False, eeg=True)
###############################################################################
# Let's make a plot.
# channels to plot:
to_plot = [f"EEG {i:03}" for i in range(1, 5)]
# get the data for plotting in a short time interval from 10 to 20 seconds
start = int(raw.info['sfreq'] * 10)
stop = int(raw.info['sfreq'] * 20)
data, times = raw.get_data(picks=to_plot,
start=start, stop=stop, return_times=True)
# Scale the data from the MNE internal unit V to µV
data *= 1e6
# Take the mean of the channels
mean = np.mean(data, axis=0)
# make a figure
fig, ax = plt.subplots(figsize=(4.5, 3))
# plot some EEG data
ax.plot(times, mean)
###############################################################################
# So far so good. Now let's add the smaller figure within the figure to show
# exactly, which sensors we used to make the timecourse.
# For that, we use an "inset_axes" that we plot into our existing axes.
# The head outline with the sensor positions can be plotted using the
# `~mne.io.Raw` object that is the source of our data.
# Specifically, that object already contains all the sensor positions,
# and we can plot them using the ``plot_sensors`` method.
# recreate the figure (only necessary for our documentation server)
fig, ax = plt.subplots(figsize=(4.5, 3))
ax.plot(times, mean)
axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2)
# pick_channels() edits the raw object in place, so we'll make a copy here
# so that our raw object stays intact for potential later analysis
raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins)
###############################################################################
# That looks nice. But the sensor dots are way too big for our taste. Luckily,
# all MNE-Python plots use Matplotlib under the hood and we can customize
# each and every facet of them.
# To make the sensor dots smaller, we need to first get a handle on them to
# then apply a ``*.set_*`` method on them.
# If we inspect our axes we find the objects contained in our plot:
print(axins.get_children())
###############################################################################
# That's quite a a lot of objects, but we know that we want to change the
# sensor dots, and those are most certainly a "PathCollection" object.
# So let's have a look at how many "collections" we have in the axes.
print(axins.collections)
###############################################################################
# There is only one! Those must be the sensor dots we were looking for.
# We finally found exactly what we needed. Sometimes this can take a bit of
# experimentation.
sensor_dots = axins.collections[0]
# Recreate the figure once more; shrink the sensor dots; add axis labels
fig, ax = plt.subplots(figsize=(4.5, 3))
ax.plot(times, mean)
axins = inset_locator.inset_axes(ax, width="30%", height="30%", loc=2)
raw.copy().pick_channels(to_plot).plot_sensors(title="", axes=axins)
sensor_dots = axins.collections[0]
sensor_dots.set_sizes([1])
# add axis labels, and adjust bottom figure margin to make room for them
ax.set(xlabel="Time (s)", ylabel="Amplitude (µV)")
fig.subplots_adjust(bottom=0.2)
| bsd-3-clause |
idoerg/BOA | src/classify/mnbayes.py | 1 | 9467 | """
NOTE: OUT OF DATE
Multinomial Naive Bayes classifier
"""
import nltk
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB
import pylab as pl
from sklearn.utils import shuffle
from sklearn.metrics import roc_curve, auc
import os,sys
import itertools
import argparse
from collections import defaultdict
import random
import Bio
import re
from Bio import SeqIO, SeqFeature
from Bio.SeqRecord import SeqRecord
from Bio import Entrez
import training
import genbank
import cPickle
import gzip
import copy
import text_classifier
word_reg = re.compile("[a-z]+")
class MNBayes(text_classifier.TextClassifier):
def __init__(self,trainDir,labelFile,numTrees=10):
self.classifier = None
self.labelFile = labelFile
self.trainingDir = trainDir
self.labels = None
self.all_words = None
self.numTrees = numTrees
self.classifier = SklearnClassifier(MultinomialNB())
#self.labels = training.setup(labelFile)
#self.train()
def train(self):
feature_sets = self.getFeatures()
self.classifier.train(feature_sets)
""" Determines training error"""
def trainingError(self):
feature_sets = self.getFeatures()
p = nltk.classify.accuracy(self.classifier,feature_sets)
return p
""" Make sure that the algorithm works on training data using a k fold
cross validation scheme """
def kfoldCrossValidation(self,k):
feature_sets = self.getFeatures()
error = 0
for i in range(k):
self.classifier = SklearnClassifier(MultinomialNB())
n = len(feature_sets)/k
train_set,test_set = feature_sets[:n*i],feature_sets[n*i:]
test_set1 = feature_sets[:n*i]
train_set = feature_sets[n*i:n*(i+1)]
test_set2 = feature_sets[i+1:]
test_set = test_set1+test_set2
self.classifier.train(feature_sets)
p = nltk.classify.accuracy(self.classifier,test_set)
return p
""" Make sure that the algorithm works on training data using a leave one out
cross validation scheme """
def leave1OutCrossValidation(self):
error = 0
feature_sets = self.getFeatures()
N = len(feature_sets)
for i in range(N):
self.classifier = SklearnClassifier(MultinomialNB())
train_set1,test_set,train_set2 = feature_sets[:i],feature_sets[i],feature_sets[i+1:]
train_set = train_set1+train_set2
test_set = [test_set]
self.classifier.train(feature_sets)
p = nltk.classify.accuracy(self.classifier,test_set)
error+=p
return error/N
""" Construct a learning curve to see if there is overfitting"""
def learningCurve(self,numTrials=4):
accuracies = []
feature_sets = self.getFeatures()
for k in xrange(1,len(feature_sets)-1):
total = 0
for i in xrange(numTrials):
self.classifier = SklearnClassifier(MultinomialNB())
random.shuffle(feature_sets)
train_set,test_set = feature_sets[:k],feature_sets[k:]
self.classifier.train(feature_sets)
p = nltk.classify.accuracy(self.classifier,test_set)
total+=p
accuracies.append(total/numTrials)
return accuracies
""" Train on only k features and return training labels and predicted labels """
def testClassify(self,k):
feature_sets = self.getFeatures()
random.shuffle(feature_sets)
self.classifier = SklearnClassifier(MultinomialNB())
self.classifier.train(feature_sets[k:])
features,ref_labels = zip(*feature_sets[:k])
pred_labels = self.classifier.classify_many(features)
return ref_labels,pred_labels
""" nltk confusion matrix """
def confusionMatrix(self,ref,test):
ref.sort(key=lambda x: x[0])
test.sort(key=lambda x: x[0])
_,ref_labels = zip(*ref)
_,test_labels = zip(*test)
cm = ConfusionMatrix(ref_labels, test_labels)
return cm
""" Classifies proteins based on its text """
def classify(self,db,fastain):
proIDs,features,labels = [],[],[]
prevFeatureset = ''
prevText = ''
for seq_record in SeqIO.parse(fastain, "fasta"):
title = seq_record.id
toks = title.split("|")
proteinID = toks[5]
query_rows = genbank.proteinQuery(proteinID,db)
ids,text = zip(*query_rows)
text = ''.join(map(str,text))
if text=='':
label = ['na']
else:
text = word_reg.findall(text)
featureset = self.gene_features(text)
assert text!=prevText
assert featureset!=prevFeatureset
prevFeatureset = featureset
prevText = text
label = self.classifier.batch_classify([featureset])
proIDs.append(proteinID)
labels+=label
return zip(proIDs,labels)
def go():
pass
if __name__=="__main__":
parser = argparse.ArgumentParser(description=\
'A naive bayes classifier that attempts to categorize context genes')
parser.add_argument(\
'--training-labels', type=str, required=False,
help='A training data set to serve as a template for categorizing context genes')
parser.add_argument(\
'--genbank-files', type=str, nargs="+", required=False,
help='Genbank files containing annotations of bacterial genes')
parser.add_argument(\
'--test', action='store_const', const=True, default=False,
help='Run unittests')
args = parser.parse_args()
if not args.test:
go()
else:
del sys.argv[1:]
import unittest
class TestTraining1(unittest.TestCase):
def setUp(self):
self.root = os.environ['BACFINDER_HOME']
self.genbankDir = "%s/example/Streptococcus_pyogenes"%self.root
self.trainDir = "%s/data/training/protein"%self.root
self.genbankFile = "%s/example/Streptococcus_pyogenes/NC_011375.gbk"%self.root
self.test_file = "test_labels.txt"
string = "\n".join(["#Organism: Y12234.1 (as-48A-D1) and AJ438950.1 (as-48E - H), Enterococcus faecalis subsp. liquefaciens plasmid submitted as separate sequences)",
"#Reference: http://jb.asm.org/content/190/1/240.full, http://aem.asm.org/content/69/2/1229.full.pdf",
"#locus_tag label name",
"CAA72917.1 toxin",
"CAA72918.1 modifier",
"CAA72919.1 transport",
'#Organism: AF061787.1, Escherichia coli plasmid pTUC100',
'#Reference: http://www.ncbi.nlm.nih.gov/pmc/articles/PMC93700',
'#locus_tag label name',
'AAD28494.1 toxin',
'AAD28495.1 modifier',
'AAD28496.1 modifier',
'AAD28497.1 transport'])
open(self.test_file,'w').write("%s\n"%string)
def tearDown(self):
os.remove(self.test_file)
def testText(self):
nb = MNBayes(self.trainDir,self.test_file)
nb.train()
p = nb.trainingError()
print "Accuracy:",p
def test1(self):
#Labs = training.setup(self.genbankDir,self.labelFile)
nb = MNBayes(self.trainDir,self.test_file)
nb.train()
#nb.classifier.show_most_informative_features()
class TestTraining2(unittest.TestCase):
def setUp(self):
#self.genbankDir = "../example/Streptococcus_pyogenes"
#self.genbankFile = "../example/Streptococcus_pyogenes/NC_011375.gbk"
self.root = os.environ['BACFINDER_HOME']
self.trainDir = "%s/data/training/protein"%self.root
self.labelFile = "%s/data/training/training_proteins.txt"%self.root
self.zip = "test_serial.zip"
#Obtain training labels
def test1(self):
#Labs = training.setup(self.genbankDir,self.labelFile)
nb = MNBayes(self.trainDir,self.labelFile)
nb.train()
original = copy.deepcopy(nb)
#nb.classifier.show_most_informative_features()
nb.dump(self.zip)
nb.load(self.zip)
self.assertEquals(nb.labelFile,original.labelFile)
def test2(self):
#Obtain training labels
#Labs = training.setup(self.genbankDir,self.labelFile)
nb = MNBayes(self.trainDir,self.labelFile)
nb.train()
p = nb.trainingError()
print "Accuracy:",p
self.assertTrue(p>0.5)
unittest.main()
| gpl-3.0 |
jjx02230808/project0223 | sklearn/datasets/__init__.py | 72 | 3807 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_breast_cancer
from .base import load_linnerud
from .base import load_boston
from .base import get_data_home
from .base import clear_data_home
from .base import load_sample_images
from .base import load_sample_image
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import load_lfw_pairs
from .lfw import load_lfw_people
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_lfw_pairs',
'load_lfw_people',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause |
Sentient07/scikit-learn | sklearn/tests/test_pipeline.py | 13 | 31148 | """
Test the pipeline module.
"""
from tempfile import mkdtemp
import shutil
import time
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals.joblib import Memory
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_make_union_kwargs():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list)
assert_equal(3, fu.n_jobs)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1}
)
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Step names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Step names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())],
memory=memory)
assert_raises_regex(ValueError, "'memory' should either be a string or a"
" joblib.Memory instance, got 'memory=1' instead.",
cached_pipe.fit, X, y)
def test_pipeline_memory():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_false(hasattr(transf, 'means_'))
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_equal(ts, cached_pipe.named_steps['transf'].timestamp_)
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert_equal(ts, cached_pipe_2.named_steps['transf_2'].timestamp_)
finally:
shutil.rmtree(cachedir)
| bsd-3-clause |
rrohan/scikit-learn | sklearn/utils/extmath.py | 70 | 21951 | """
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array, NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports CSR sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Data must be of same type. Supported types '
'are 32 and 64 bit float. '
'Falling back to np.dot.', NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.utils.validation import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter, random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
"""
random_state = check_random_state(random_state)
# generating random gaussian vectors r with shape: (A.shape[1], size)
R = random_state.normal(size=(A.shape[1], size))
# sampling the range of A using by linear projection of r
Y = safe_sparse_dot(A, R)
del R
# perform power iterations with Y to further 'imprint' the top
# singular vectors of A in Y
for i in xrange(n_iter):
Y = safe_sparse_dot(A, safe_sparse_dot(A.T, Y))
# extracting an orthonormal basis of the A range samples
Q, R = linalg.qr(Y, mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter=0,
transpose='auto', flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples.
n_iter: int (default is 0)
Number of power iterations (can be used to deal with very noisy
problems).
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case).
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if transpose == 'auto' and n_samples > n_features:
transpose = True
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
U, V = svd_flip(U, V)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping. Otherwise,
use the rows of v. The choice of which variable to base the decision on
is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _batch_mean_variance_update(X, old_mean, old_variance, old_sample_count):
"""Calculate an average mean update and a Youngs and Cramer variance update.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
old_mean : array-like, shape: (n_features,)
old_variance : array-like, shape: (n_features,)
old_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample variance:
recommendations, The American Statistician, Vol. 37, No. 3, pp. 242-247
"""
new_sum = X.sum(axis=0)
new_variance = X.var(axis=0) * X.shape[0]
old_sum = old_mean * old_sample_count
n_samples = X.shape[0]
updated_sample_count = old_sample_count + n_samples
partial_variance = old_sample_count / (n_samples * updated_sample_count) * (
n_samples / old_sample_count * old_sum - new_sum) ** 2
unnormalized_variance = old_variance * old_sample_count + new_variance + \
partial_variance
return ((old_sum + new_sum) / updated_sample_count,
unnormalized_variance / updated_sample_count,
updated_sample_count)
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
| bsd-3-clause |
Phlos/LASIF_scripts | analyse_misfits.py | 1 | 22062 | # scripts to analyse misfit
import matplotlib.pyplot as plt
import numpy as np
import os
from lasif.components.project import Project
from lasif import LASIFNotFoundError
from lasif import LASIFAdjointSourceCalculationError
def calc_misfit_for_channel(comm, event_name, iteration_name, channel_name):
'''
calculate the misfit for a specific channel
'''
# obtain information about the windows
wm = comm.windows.get(event_name, iteration_name)
it = comm.iterations.get(iteration_name)
event_weight = it.events[event_name]["event_weight"]
wins = wm.get(channel_name)
station_weight = it.events[event_name]["stations"][".".join(channel_name.split(".")[:2])]["station_weight"]
channel_misfit = 0
total_channel_weight = 0
for win in wins.windows:
try:
misfit = win.misfit_value
except LASIFAdjointSourceCalculationError:
continue
except LASIFNotFoundError as e:
print str(e)
continue
channel_misfit += misfit * win.weight
total_channel_weight += win.weight
# rare, but sometimes all wins for a certain channel fail the calculation
if total_channel_weight == 0:
print 'total channel weight is zero'
# make sure the misfits are consistent with adj src calculation
channel_misfit *= event_weight * station_weight / total_channel_weight
print "channel {1} misfit is {0:.5f}".format(channel_misfit, channel_name)
def calc_misfit_per_event(comm, iteration_name, event_name):
# loop over windows to calculate total misfit
# obtain information about the windows
wm = comm.windows.get(event_name, iteration_name)
# obtain information about all stations for this event
#stations = comm.query.get_all_stations_for_event(event_name)
it = comm.iterations.get(iteration_name)
event_weight = it.events[event_name]["event_weight"]
channels = wm.list()
if len(channels) == 0:
print 'Warning! event '+event_name+' has no windows for iteration '+iteration_name+' ! '
return np.nan
event_misfit = 0
for channel in channels:
wins = wm.get(channel)
station_weight = it.events[event_name]["stations"][".".join(channel.split(".")[:2])]["station_weight"]
channel_misfit = 0
total_channel_weight = 0
for win in wins.windows:
try:
misfit = win.misfit_value
except LASIFAdjointSourceCalculationError:
continue
except LASIFNotFoundError as e:
print str(e)
continue
channel_misfit += misfit * win.weight
total_channel_weight += win.weight
# rare, but sometimes all wins for a certain channel fail the calculation
if total_channel_weight == 0:
continue
# make sure the misfits are consistent with adj src calculation
channel_misfit *= event_weight * station_weight / total_channel_weight
#print "channel {1} misfit is {0:.2f}".format(channel_misfit, channel)
event_misfit += channel_misfit
return event_misfit
'''
# station list for windows
window_stations = wm.list()
if len(window_stations) == 0:
print "Event "+event_name+" contained no windows in iteration "+iteration+" in project "+Lasif_path
return 0.
#- for every station for that particular event, obtain misfits
misfit_sum = 0.
for stationID, value in stations.iteritems(): # Loop as dicts are unordered.
# get windows for station
wins = wm.get_windows_for_station(stationID)
# get window info
# get the misfits of all windows
# determine number of windows
sz = np.size(wins)
misfit_sta = 0.
for ii in np.arange(sz):
misfit_val = wins[ii].windows[0].misfit_value # non-normalised
misfit_wt = wins[ii].windows[0].weight # window weight?
misfit_weighted = misfit_val * misfit_wt
misfit_sta += misfit_weighted
misfit_sum += misfit_sta
'''
return misfit_sum
def calc_misfit(Lasif_path='./EMed_full/', iteration_name='0_new', save=False, verbose=True):
'''
For a given Lasif project and iteration name, calculate the total misfit
'''
# import the project communicator
comm = Project(Lasif_path).comm
it = comm.iterations.get(iteration_name)
# for each event, calculate event misfit (loop over windows like in plot_misfit_map etc)
total_misfit = 0.
dict_event_misfits = {}
for event_name in sorted(it.events.keys()):
if verbose:
print ' ...event '+event_name
event_misfit = calc_misfit_per_event(comm, iteration_name, event_name)
dict_event_misfits[event_name] = event_misfit
total_misfit += event_misfit
if save:
outfile = 'iter_misfit.'+iteration_name+'.npy'
print 'saving file to '+outfile
np.save(outfile, (total_misfit, dict_event_misfits) )
return total_misfit, dict_event_misfits
def calc_misfit_development_dict(misfit_dict, event_list, iter_list):
'''
Dictionary transform tool. Super dumb.
Make a dict of misfits with order misfit_dict[event_name][iteration_name]
from one with order misfit_dict[iteration_name][event_name]
input:
misfit_dict: stored as misfit_dict[iter_name][event_name]
'''
dict_misfits_per_event = {}
for event_name in event_list:
misf_per_event = {}
for iteration_name in iter_list:
# the index [1] is because index [0] stores the total iteration misfit
if event_name in misfit_dict[iteration_name][1].keys():
misf_per_event[iteration_name] = misfit_dict[iteration_name][1][event_name]
if any(misf_per_event):
# add to dictionary if there are any iterations with this event
dict_misfits_per_event[event_name] = misf_per_event
return dict_misfits_per_event
#def add_sisyphus_misfit_log(ax1, filename, normalise):
'''
This function does not work well
'''
#misfit_log = [float(x.split(' ')[-1].strip()) for x in open(filename).readlines()]
#iters=[]
#n_full_iters = len(misfit_log)/4
#for ii in range(n_full_iters):
#for jj in range(5)[1:]:
#iters.append(ii+0.1*jj)
##print 'iters len {}, misfit log len {}'.format(len(iters), len(misfit_log))
## plotting
#print 'value of misfit_log[0] is {}'.format(misfit_log[0])
#print 'value of misfit_log[1] is {}'.format(misfit_log[1])
#if normalise:
#misfit_log = [x / misfit_log[0] for x in misfit_log]
#ax1.plot(iters, misfit_log, label='SISYPHUS misfits incl. test steps')
#ax1.plot(iters[3::4], misfit_log[3::4], marker='o', label='SISYPHUS misfits of _4 models')
#return iters, misfit_log
def plot_misfit_development(Lasif_path='EMed_full', iter_list=['10_new', '19_4'],
misfit_dict_in={}, highlight_event=[],
normalise=True, sort_alpha_by='init_misfit', verbose=True,
save=False, savepath='./plots/'):
'''
Plot the misfit developement as a function of iteration
for the supplied list of iterations (or 'ALL')
INPUT:
lasif_path the path to the Lasif project
iter_list = [] a list that contains the iterations to be
compared. They will be shown in the order supplied
misfit_dict={} a list of pre-calculated misfits per iter. If it
is empty, the misfits will be calculated
normalise should all misfits be normalised by their initial
value.
sort_alpha_by how should the event misfit lines be shaded?
verbose How much output is given
save True or False; whether the figure should be plotted
or saved.
savepath: location where plot is saved ('./plots/' by default)
OUTPUT:
dict_misfits_per_event : a dictionary of misfits per event
ordered misfit{event}{iter}
misfit_dict : same, but ordered misfit{iter}{event}
{a figure} : a figure with misfit development per
event as a function of iteration.
'''
##############################
### some preparation
##############################
# make sure the function doesn't accidentally work on external vars.
misfit_dict = misfit_dict_in.copy()
# import the project communicator & get event list
comm = Project(Lasif_path).comm
event_list = sorted(comm.events.list())
############################################
### Loading/calculating event misfits
############################################
# loop over iters to check if misfit exists
for it in iter_list:
misfit_npy_file_name='iter_misfit.'+it+'.npy'
if it in misfit_dict.keys():
print 'Misfit for iteration '+it+' already exists'
continue
elif os.path.exists(misfit_npy_file_name):
print 'Loading misfit for iteration '+it+' from file'
misfit_dict[it] = np.load(misfit_npy_file_name)
else:
print 'Calculating misfit for iteration '+it
misfit_dict[it] = calc_misfit(Lasif_path, it, save=True, verbose=verbose)
############################################
### Make new dict with misf[event][iter]
### instead of misf[iter][event]
############################################
# (This method is objectively rather stupid...
#+ I wish I had a table or something)
# invert the dictionary
#if not any(dict_misfits_per_event):
dict_misfits_per_event = calc_misfit_development_dict(misfit_dict, event_list, iter_list)
# loop over iterations to obtain iteration misfit list
iter_misfit_total=[]
for iter_name in iter_list:
if iter_name in misfit_dict.keys():
iter_misfit_total.append(misfit_dict[iter_name][0])
else:
iter_misfit_total.append(np.nan)
# loop over events to gather the development of misfits
#dict_misfits_per_event = {}
# also save some other event properties for plotting sorting
misf1 = {}; maxmisf1 = 0;
event_mags={}; maxmag = 0; minmag=15
evmag_normd = {}; maxmag_normd = 0
no_misfits = []
for event_name in event_list:
if event_name in dict_misfits_per_event.keys():
misf_per_event = dict_misfits_per_event[event_name]
else:
print 'event '+event_name+' has no misfits for any of these iterations.'
no_misfits.append(event_name)
# make a list of events that are in at least one of the iterations
event_list_reduced = [x for x in event_list if x not in no_misfits]
# calculate some properties
for event_name in event_list_reduced:
# first misfit determination
for iter_name in iter_list:
if event_name in misfit_dict[iter_name][1].keys():
misf1[event_name] = misfit_dict[iter_name][1][event_name]
break
maxmisf1 = max(misf1[event_name], maxmisf1)
# event magnitude
event_mags[event_name] = comm.events.get(event_name)['magnitude']
maxmag = max(event_mags[event_name], maxmag)
minmag = min(event_mags[event_name], minmag)
# number of stations
nsta = len(comm.query.get_all_stations_for_event(event_name))
# normalised event magnitude
evmag_normd[event_name] = event_mags[event_name] / nsta
maxmag_normd = max(maxmag_normd, evmag_normd[event_name])
#------------------------
# PLOTTING
#------------------------
if save:
plt.ioff()
fig, ax1 = plt.subplots(1, figsize=(12.,10.))
# plot in figure
plotline = {};
firstline=True;
for event_name in event_list_reduced:
# transform dict to lists (for plotting)
y=[]
for itname in iter_list:
if itname in dict_misfits_per_event[event_name]:
y.append(dict_misfits_per_event[event_name][itname])
else:
print 'Warning! event '+event_name+' has no misfit for iter '+itname
y.append(np.nan)
x = np.arange(len(iter_list)) + 0.5
# plot real or normalised misfit
if normalise:
plotline[event_name] = ax1.plot(x,y/misf1[event_name], color='black')
labely=('normalised misfit')
else:
plotline[event_name] = ax1.plot(x,y, color='black')
labely=('misfit')
# only give one event a label (not 83 black lines)
if firstline:
plotline[event_name][0].set_label('misfits per event, shaded by '+sort_alpha_by)
firstline=False
# shade the lines according to sort_alpha_by
if sort_alpha_by=='init_misfit':
alpha_value = misf1[event_name]/maxmisf1
elif sort_alpha_by=='event_mag':
alpha_value = (event_mags[event_name]-minmag)/(maxmag-minmag)
alpha_value = max(alpha_value, 0.1)
#print 'alpha for '+event_name+' is {0:.2f} because mag here is {1:.1f}'.format(alpha_value, event_mags[event_name])
elif sort_alpha_by=='event_mag_by_nsta':
alpha_value = (evmag_normd[event_name] / maxmag_normd )
plotline[event_name][0].set_alpha(alpha_value)
# give the highlighted event special looks
if event_name in highlight_event:
plotline[event_name][0].set_color('green')
plotline[event_name][0].set_linewidth(2)
plotline[event_name][0].set_label('event '+event_name)
# Plot total misfit development
if normalise:
ax1.plot(x,iter_misfit_total/iter_misfit_total[0], color='red', linewidth=2, label='total misfit')
eilim = ax1.get_ylim()
ax1.set_ylim(max(eilim[0], 0.7), min(eilim[1], max(iter_misfit_total/iter_misfit_total[0]) * 1.1))
else:
ax1.plot(x,iter_misfit_total, color='red', linewidth=2, label='total misfit')
eilim = ax1.get_ylim()
ax1.set_ylim(max(eilim[0], 0.7*min(iter_misfit_total)), min(eilim[1], max(iter_misfit_total) * 1.1))
# Prettify the plot
ax1.set_xticks(x)
ax1.set_xticklabels(iter_list)
ax1.set_xlim(0,len(iter_list))
ax1.set_xlabel('iteration name')
ax1.set_ylabel(labely)
ax1.set_title('Misfit development across iterations')
plt.legend(loc='upper left')
# actual drawing
if save:
if normalise:
normorno='.normalised'
else:
normorno=''
if len(highlight_event)>0:
hili='.highlight-'+str(highlight_event)
else:
hili=''
savename='misfit_development.'+iter_list[0]+'-to-'+iter_list[-1]+normorno+'.alpha_sorted_'+sort_alpha_by+hili+'.png'
savepath=os.path.join(savepath,savename)
print 'saving figure to '+savepath
plt.savefig(savepath)
plt.close()
else:
plt.ion()
plt.show()
plt.ioff()
return dict_misfits_per_event, misfit_dict
def plot_misfit_log_sisyphus(filename, iter_list,
save=False, savepath='./plots/', inversion_name='inv_20170731'):
'''
Plot the misfit development based on Alexey's misfit
'''
misfit_log = [x.split(' ')[-1].strip() for x in open(filename).readlines()]
iters=[]
n_full_iters = len(misfit_log)/4
for ii in range(n_full_iters):
for jj in range(5)[1:]:
iters.append(ii+0.1*jj)
print 'iters len {}, misfit log len {}'.format(len(iters), len(misfit_log))
# plotting
fig, ax1 = plt.subplots(1, figsize=(12.,10.))
ax1.plot(iters, misfit_log, label='misfits incl. test steps')
ax1.plot(iters[3::4], misfit_log[3::4], marker='o', label='misfits of _4 models')
# Prettify the plot
ax1.set_xlabel('iteration')
ax1.set_ylabel('total misfit')
ax1.set_title('Misfit development from SISYPHUS misfit.log for '+inversion_name)
ax1.set_xticks(iters[3::4])
ax1.set_xticklabels(iter_list)
plt.legend()
# Actual displaying
if save:
savename=inversion_name+'.misfit_development.sisyphus.png'
savepath=os.path.join(savepath,savename)
print 'saving figure to '+savepath
plt.savefig(savepath)
plt.close()
else:
plt.ion()
plt.show()
plt.ioff()
def get_good_bad_events(misfit_dict, iter1, iter2, goodorbad=None):
'''
Print out the events that are good or bad (misfit increase, or misfit decrease)
'''
good_ones = []; bad_ones=[]
for event_name in misfit_dict[iter1][1].keys():
m_init = misfit_dict[iter1][1][event_name]
m_final= misfit_dict[iter2][1][event_name]
if m_final < m_init:
good_ones.append(event_name)
elif m_final > m_init:
bad_ones.append(event_name)
if goodorbad=='bad':
for event_name in bad_ones:
m_init = misfit_dict[iter1][1][event_name]
m_final= misfit_dict[iter2][1][event_name]
incr = m_final - m_init
rel_incr = (m_final - m_init) / m_init
print 'event {0:<9} is bad: '.format(event_name)+ \
'misfit went {0:6.2f} --> {1:6.2f}'.format(m_init, m_final) +\
'({0:+5.1f} = {1:+5.1f}%)'.format(incr, 100.*rel_incr)
elif goodorbad=='good':
for event_name in good_ones:
m_init = misfit_dict[iter1][1][event_name]
m_final= misfit_dict[iter2][1][event_name]
incr = m_final - m_init
rel_incr = (m_final - m_init) / m_init
print 'event {0:<9} is good: '.format(event_name)+ \
'misfit went {0:6.2f} --> {1:6.2f}'.format(m_init, m_final) +\
'({0:+5.1f} = {1:5.1f}%)'.format(incr, 100.*rel_incr)
return good_ones, bad_ones
def plot_iter_correlations(Lasif_path, iter_name, prop1, prop2, save=False, savepath='./plots/', loglogplot=False, no_output=True):
'''
Plot for example the correlation between # of windows and total misfit
Properties calculated for each event are:
nsta : # of stations available
nwinsta : # of windows in iter
nwinsta/nsta : percentage of stations with windows
event_magnitude : event magnitude
total_misfit : total misfit in iter
event_magnitude*nsta : event mangitude corrected for the number of stations available
prop1 and prop2 can be any of the above
'''
# check whether input is alright
allowed_props = ['total_misfit', 'nsta', 'nwinsta', 'event_magnitude', 'nwinsta/nsta', 'event_magnitude*nsta']
for prop in [prop1, prop2]:
if prop not in allowed_props:
print 'ERROR: property {} not in allowed properties, i.e. {}'.format(prop, ', '.join(allowed_props))
return
from LASIF_scripts.external_fn.Annotations import AnnoteFinder
# Lasif preparation
from lasif.components.project import Project
comm = Project(Lasif_path).comm
it = comm.iterations.get(iter_name)
events_in_iter = sorted(it.events.keys())
## ====================
## Gathering properties
# misfits - dict
# Obtaining misfits in the usable format (if misfits are required
if 'total_misfit' in [prop1, prop2]:
misfit_npy_file_name='iter_misfit.'+iter_name+'.npy'
if os.path.exists(misfit_npy_file_name):
print 'Loading misfit for iteration '+iter_name+' from file'
_, misfits_per_event = np.load(misfit_npy_file_name)
elif Lasif_path:
print 'Calculating misfit for iteration '+iter_name
_, misfits_per_event = calc_misfit(Lasif_path, iter_name, save=True, verbose=True)
else:
print 'Error: need to calculate misfits, but no lasif path supplied'
return
# make into list
misfits = []
for event_name in events_in_iter:
misfits.append(misfits_per_event[event_name])
# number of stations per event - dict
nsta = []
for event_name in events_in_iter:
itev = it.events[event_name]
#nsta[event_name] = len(itev['stations'])
nsta.append(len(itev['stations']))
# number of stations with windows per event - dict
nwinsta = []
for event_name in events_in_iter:
wm = comm.windows.get(event_name, iter_name)
window_channels = wm.list()
window_stations = ['.'.join(chan.split('.')[:-2]) for chan in window_channels]
window_stations = sorted(list(set(window_stations)))
# nwinsta[event_name] = len(window_stations)
nwinsta.append(len(window_stations))
# event magnitude
mags = []
for event_name in events_in_iter:
ev = comm.events.get(event_name)
# mags[event_name] = ev['magnitude']
mags.append(ev['magnitude'])
## ====================
## Plotting properties
propslist = []; labels = []
for prop in [prop1, prop2]:
if prop == 'total_misfit':
propslist.append(misfits)
labels.append('total misfit for event')
elif prop == 'nsta':
propslist.append(nsta)
labels.append('number of stations for event')
elif prop == 'nwinsta':
propslist.append(nwinsta)
labels.append('number of stations with windows for event')
elif prop == 'event_magnitude':
propslist.append(mags)
labels.append('event magnitude')
elif prop == 'nwinsta/nsta':
propslist.append([100.*x/y for x,y in zip(nwinsta, nsta)])
labels.append('percentage of stations with windows')
elif prop == 'event_magnitude*nsta':
propslist.append([x*y for x,y in zip(mags, nsta)])
labels.append('event magnitude * number of stations for event')
if not len(propslist) == 2:
print 'ERROR: not two property lists?: {}'.format(len(propslist))
return
# plot
fig, ax1 = plt.subplots(1, figsize=(12.,10.))
# x = range(10)
# y = range(10)
# annotes = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j']
# fig, ax = plt.subplots()
# ax.scatter(x,y)
# af = AnnoteFinder(x,y, annotes, ax=ax)
# fig.canvas.mpl_connect('button_press_event', af)
# plt.show()
ax1.scatter(propslist[0], propslist[1], marker='o', edgecolor='none')
ax1.grid(True)
af = AnnoteFinder(propslist[0], propslist[1], events_in_iter, ax=ax1)
ax1.set_xlabel(labels[0])
ax1.set_ylabel(labels[1])
ax1.set_title('Correlations for {}'.format(iter_name))
if loglogplot:
ax1.set_xscale('log')
ax1.set_yscale('log')
# Make the annotations visible upon clicking a point
fig.canvas.mpl_connect('button_press_event', af)
# Actual displaying
if save:
savename='iter_{}.correlations.{}-vs-{}.png'.format(iter_name, prop1, prop2)
savepath=os.path.join(savepath,savename)
print 'saving figure to '+savepath
plt.savefig(savepath)
plt.close()
else:
plt.ion()
plt.show()
plt.ioff()
if not no_output:
return fig, ax1
| gpl-3.0 |
etyephe/pygom | pygom/model/deterministic.py | 1 | 73661 | """
.. moduleauthor:: Edwin Tye <[email protected]>
This module is defined such that operation on ode are all gathered
in one place. Future extension of operations should be added here
"""
__all__ = ['DeterministicOde']
import copy
import io
from numbers import Number
# import sympy.core.numbers
import numpy as np
import sympy
import scipy.linalg
from sympy.core.function import diff
from .base_ode_model import BaseOdeModel
from ._model_errors import ArrayError, InputError, \
IntegrationError, InitializeError
from ._model_verification import simplifyEquation
# import ode_utils as myUtil
# from .ode_utils import shapeAdjust, compileCode
from . import ode_utils
from . import _ode_composition
class DeterministicOde(BaseOdeModel):
"""
This contains the interface and operation
built above the already defined set of ode
Parameters
----------
state: list
A list of states (string)
param: list
A list of the parameters (string)
derived_param: list
A list of the derived parameters (tuple of (string,string))
transition: list
A list of transition (:class:`.Transition`)
birth_death: list
A list of birth or death process (:class:`.Transition`)
ode: list
A list of ode (:class:`Transition`)
"""
def __init__(self,
state=None,
param=None,
derived_param=None,
transition=None,
birth_death=None,
ode=None):
"""
Constructor that is built on top of a BaseOdeModel
"""
super(DeterministicOde, self).__init__(state,
param,
derived_param,
transition,
birth_death,
ode)
self._ode = None
self._odeCompile = None
# and we assume initially that we don't want the Jacobian
self._Jacobian = None
self._JacobianCompile = None
# wtf... why!
self._diffJacobian = None
self._diffJacobianCompile = None
# Information... yea, what else would you expect
self._Grad = None
self._GradCompile = None
# more information....
self._GradJacobian = None
self._GradJacobianCompile = None
# More information!! ROAR! I think this is useless though
# because this is the hessian of the ode which most of the
# time we don't really care
self._Hessian = None
self._HessianWithParam = None
# all the symbols that we need in order to compile
# s = state + t
# sp = state + t + param
# the latter is required to compile the symbolic code
# to the numeric setting
self._s = self._stateList + [self._t]
self._sp = self._s + self._paramList
# information regarding the integration. We want an internal
# storage so we can invoke the plot method within the same class
self._t0 = None
self._x0 = None
self._odeOutput = None
self._odeSolution = None
self._odeTime = None
self._intName = None
self._paramValue = [0]*len(self._paramList)
# the class for shape re-adjustment. We would always like to
# operate in the matrix form if possible as it takes up less
# memory when operating, but the output is required to be of
# the vector form
self._SAUtil = ode_utils.shapeAdjust(self.num_state, self.num_param)
# compile the code. Note that we need the class because we
# compile both the formatted and unformatted version.
self._SC = ode_utils.compileCode()
def __eq__(self, other):
if isinstance(other, DeterministicOde):
if self.get_ode_eqn() == other.get_ode_eqn():
return True
else:
return False
else:
return False
def __repr__(self):
return "DeterministicOde" + self._get_model_str()
########################################################################
#
# Information about the ode
#
########################################################################
# TODO: check and see whether it is linear correctly!
def linear_ode(self):
"""
To check whether the input ode is linear
Returns
-------
bool
True if it is linear, False otherwise
"""
# we always assume that it is true to begin with
# if the ode is linear, then a numerical integration
# scheme is a waste of time
is_linear = True
# if we do not current possess the jacobian, we find it! ROAR!
if self._Jacobian is None:
self.get_jacobian_eqn()
# a really stupid way to determining whether it is linear.
# have not figured out a better way yet...
a = self._Jacobian.atoms()
for s in self._stateDict.values():
if s in a:
is_linear = False
# for i in range(0, self._numState):
# for j in range(0, self._numState):
# for k in range(0, self._numState):
# if self._Jacobian[i,j].has(self._stateList[k]):
# isLinear = False
return is_linear
# TODO: To check whether we have a DAE or just an ODE
# def isDAE(self):
# return None
# TODO: To find out whether there are situation where the
# jacobian is actually singular, i.e. if it can be a DAE
# def canDAE(self,x0,t0):
########################################################################
#
# Information about the ode
#
########################################################################
def get_ode_eqn(self, param_sub=False):
"""
Find the algebraic equations of the ode system.
Returns
-------
:class:`sympy.matrices.matrices`
ode in matrix form
"""
if self._ode is None:
self._findOde()
elif self._hasNewTransition:
self._findOde()
else:
pass
if param_sub:
return self._ode.subs(self._parameters)
else:
return self._ode
def print_ode(self, latex_output=False):
"""
Prints the ode in symbolic form onto the screen/console in actual
symbols rather than the word of the symbol.
Parameters
----------
latex_output: bool, optional
Defaults to false which prints the equation in terms of symbols,
if set to yes then the formula in terms of latex equations will
be printed onto the screen.
"""
A = self.get_ode_eqn()
B = sympy.zeros(A.rows, 2)
for i in range(A.shape[0]):
B[i, 0] = sympy.symbols('d' + str(self._stateList[i]) + '/dt=')
B[i, 1] = A[i]
if latex_output:
print(sympy.latex(B, mat_str="array", mat_delim=None,
inv_trig_style='full'))
else:
sympy.pretty_print(B)
def _findOde(self):
# lets see how we have defined our ode
# if it is explicit, then we go straight to the easy case
if self._explicitOde:
# we have explicit ode and we should obtain them directly
super(DeterministicOde, self)._computeOdeVector()
else:
# super(DeterministicOde, self)._computeTransitionMatrix()
# super(DeterministicOde, self)._computeTransitionVector()
# convert the transition matrix into the set of ode
self._ode = sympy.zeros(self.num_state, 1)
pureTransitionList = self._getAllTransition(pureTransitions=True)
from_list, to, eqn = self._unrollTransitionList(pureTransitionList)
for i, eqn in enumerate(eqn):
for k in from_list[i]:
self._ode[k] -= eqn
for k in to[i]:
self._ode[k] += eqn
# now we just need to add in the birth death processes
super(DeterministicOde, self)._computeBirthDeathVector()
self._ode += self._birthDeathVector
self._s = [s for s in self._iterStateList()] + [self._t]
self._sp = self._s + [p for p in self._iterParamList()]
# happy!
self._hasNewTransition = False
# tests to see whether we have an autonomous system. Need to
# convert a non-autonmous system into an autonomous. Note that
# we will not do the conversion internally and require the
# user to do this. May consider this a feature in the future.
for i, eqn in enumerate(self._ode):
if self._t in eqn.atoms():
raise Exception("Input is a non-autonomous system. " +
"We can only deal with an autonomous " +
"system at this moment in time")
self. _ode[i], isDifficult = simplifyEquation(eqn)
self._isDifficult = self._isDifficult or isDifficult
if self._isDifficult:
self._odeCompile = self._SC.compileExprAndFormat(self._sp,
self._ode,
modules='mpmath',
outType="vec")
else:
self._odeCompile = self._SC.compileExprAndFormat(self._sp,
self._ode,
outType="vec")
# assign None to all others because we have reset the set of equations.
self._Grad = None
self._Hessian = None
self._Jacobian = None
self._diffJacobian = None
return self._ode
def get_transition_graph(self, file_name=None, show=True):
"""
Returns the transition graph using graphviz
Parameters
----------
file_name: str, optional
name of the output file, defaults to None
show: bool, optional
If the graph should be plotted, defaults to True
Returns
-------
:class:`graphviz.Digraph`
"""
dot = _ode_composition.generateTransitionGraph(self, file_name)
if show:
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
img = mpimg.imread(io.BytesIO(dot.pipe("png")))
plt.imshow(img)
plt.show(block=False)
return dot
else:
return dot
#
# this is the main ode solver
#
def ode(self, state, t):
"""
Evaluate the ode given state and time
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
t: double
The current time
Returns
-------
:class:`numpy.ndarray`
output of the same length as the ode
"""
return self.eval_ode(time=t, state=state)
def ode_T(self, t, state):
"""
Same as :meth:`ode` but with t as the first parameter
"""
return self.ode(state, t)
def eval_ode(self, parameters=None, time=None, state=None):
"""
Evaluate the ode given time, state and parameters. An extension
of :meth:`ode` but now also include the parameters.
Parameters
----------
parameters: list
see :meth:`.parameters`
time: numeric
The current time
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.matrix` or :class:`mpmath.matrix`
output of the same length as the ode.
Notes
-----
There are differences between the output of this function and
:meth:`.ode`. Name and order of state and time are also
different.
See Also
--------
:meth:`.ode`
"""
if self._ode is None or self._hasNewTransition:
self.get_ode_eqn()
eval_param = self._getEvalParam(state, time, parameters)
return self._odeCompile(eval_param)
########################################################################
#
# jacobian related operations
#
########################################################################
def is_stiff(self, state=None, t=None):
"""
Test on the eigenvalues of the jacobian. We classify the
problem as stiff if any of the eigenvalues are positive
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
t: double
The current time
Returns
-------
:class:`numpy.ndarray`
eigenvalues of the system given input
"""
e = self.jacobian_eigenvalue(state, t)
return np.any(e > 0)
def jacobian_eigenvalue(self, state=None, t=None):
"""
Find out the eigenvalues of the jacobian given state and time. If
None is given, the initial values are used.
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
t: double
The current time
Returns
-------
bool
True if any eigenvalue is positive
"""
if state is None or t is None:
if self._x0 is not None and self._t0 is not None:
J = self.jacobian(self._x0, self._t0)
else:
J = self.jacobian(state, t)
return scipy.linalg.eig(J)[0]
def jacobian(self, state, t):
"""
Evaluate the jacobian given state and time
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
t: double
The current time
Returns
-------
:class:`numpy.ndarray`
Matrix of dimension [number of state x number of state]
"""
return self.eval_jacobian(time=t, state=state)
def jacobian_T(self, t, state):
"""
Same as :meth:`jacobian` but with t as first parameter
"""
return self.jacobian(state, t)
def _Jacobian_NoCheck(self, state, t):
return self._evalJacobian_NoCheck(time=t, state=state)
def _JacobianT_NoCheck(self, t, state):
return self._Jacobian_NoCheck(state, t)
def get_jacobian_eqn(self):
"""
Returns the jacobian in algebraic form
Returns
-------
:class:`sympy.matrices.matrices`
A matrix of dimension [number of state x number of state]
"""
if self._Jacobian is None:
self.get_ode_eqn()
states = [s for s in self._iterStateList()]
self._Jacobian = self._ode.jacobian(states)
for i in range(self.num_state):
for j in range(self.num_state):
eqn = self._Jacobian[i, j]
if eqn != 0:
self._Jacobian[i, j], isDifficult = simplifyEquation(eqn)
self._isDifficult = self._isDifficult or isDifficult
f = self._SC.compileExprAndFormat
if self._isDifficult:
self._JacobianCompile = f(self._sp,
self._Jacobian,
modules='mpmath')
else:
self._JacobianCompile = f(self._sp,
self._Jacobian)
return self._Jacobian
def eval_jacobian(self, parameters=None, time=None, state=None):
"""
Evaluate the jacobian given parameters, state and time. An extension
of :meth:`.jacobian` but now also include the parameters.
Parameters
----------
parameters: list
see :meth:`.parameters`
time: double
The current time
state: array list
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.matrix` or :class:`mpmath.matrix`
Matrix of dimension [number of state x number of state]
Notes
-----
Name and order of state and time are also different.
See Also
--------
:meth:`.jacobian`
"""
if self._Jacobian is None or self._hasNewTransition:
self.get_ode_eqn()
self.get_jacobian_eqn()
eval_param = self._getEvalParam(state, time, parameters)
return self._JacobianCompile(eval_param)
def _evalJacobian_NoCheck(self, time, state):
"""
Same as :meth:`eval_jacobian` but without the checks
"""
eval_param = list(state) + [time] + self._paramValue
return self._JacobianCompile(eval_param)
###### the sum of jacobian, i.e a_{i} = \sum_{j=1}^{d} J_{i,j}
def sens_jacobian_state(self, state_param, t):
"""
Evaluate the jacobian of the sensitivity w.r.t. the
state given state and time
Parameters
----------
state_param: array like
The current numerical value for the states as
well as the sensitivities, which can be
:class:`numpy.ndarray` or :class:`list`
t: double
The current time
Returns
-------
:class:`numpy.ndarray`
Matrix of dimension [number of state *
number of parameters x number of state]
"""
state = state_param[0:self.num_state]
sens = state_param[self.num_state::]
return self.eval_sens_jacobian_state(time=t, state=state, sens=sens)
def sens_jacobian_state_T(self, t, state):
"""
Same as :meth:`sens_jacobian_state_T` but with t as first parameter
"""
return self.sens_jacobian_state(state, t)
def eval_sens_jacobian_state(self, time=None, state=None, sens=None):
"""
Evaluate the jacobian of the sensitivities w.r.t the states given
parameters, state and time. An extension of :meth:`.sens_jacobian_state`
but now also include the parameters.
Parameters
----------
time: double
The current time
state: array list
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
sens: :class:`numpy.ndarray`
The sensitivities for the jacobians
Returns
-------
:class:`numpy.nparray`
Matrix of dimension [number of state x number of state]
Notes
-----
Name and order of state and time are also different.
See Also
--------
:meth:`.sens_jacobian_state`
"""
nS = self.num_state
nP = self.num_param
# dot first, then transpose, then reshape
# basically, some magic
# don't ask me what is actually going on here, I did it
# while having my wizard hat on
return np.reshape(self.diff_jacobian(state, time).dot(
self._SAUtil.vecToMatSens(sens)).transpose(), (nS*nP, nS))
############################## derivative of jacobian
def diff_jacobian(self, state, t):
"""
Evaluate the differential of jacobian given state and time
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
t: double
The current time
Returns
-------
:class:`numpy.ndarray`
Matrix of dimension [number of state x number of state]
"""
return self.eval_diff_jacobian(time=t, state=state)
def diff_jacobian_T(self, t, state):
"""
Same as :meth:`diff_jacobian` but with t as first parameter
"""
return self.diff_jacobian(state, t)
def get_diff_jacobian_eqn(self):
"""
Returns the jacobian differentiate w.r.t. states in algebraic form
Returns
-------
list
list of size (num of state,) each with
:mod:`sympy.matrices.matrices` of dimension
[number of state x number of state]
"""
if self._diffJacobian is None:
self.get_ode_eqn()
diff_jac = list()
for eqn in self._ode:
J = sympy.zeros(self.num_state, self.num_state)
for i, si in enumerate(self._iterStateList()):
diffEqn, D1 = simplifyEquation(diff(eqn, si, 1))
for j, sj in enumerate(self._iterStateList()):
J[i,j], D2 = simplifyEquation(diff(diffEqn, sj, 1))
self._isDifficult = self._isDifficult or D1 or D2
#binding.
diff_jac.append(J)
# extract first matrix as base. we have to get the first element
# as base if we want to use the class method of the object
diffJacMatrix = diff_jac[0]
for i in range(1, len(diff_jac)):
# sympy internal matrix joining
diffJacMatrix = diffJacMatrix.col_join(diff_jac[i])
self._diffJacobian = copy.deepcopy(diffJacMatrix)
f = self._SC.compileExprAndFormat
if self._isDifficult:
self._diffJacobianCompile = f(self._sp,
self._diffJacobian,
modules='mpmath')
else:
self._diffJacobianCompile = f(self._sp,
self._diffJacobian)
return self._diffJacobian
def eval_diff_jacobian(self, parameters=None, time=None, state=None):
"""
Evaluate the differential of the jacobian given parameters,
state and time. An extension of :meth:`.diff_jacobian` but now
also include the parameters.
Parameters
----------
parameters: list
see :meth:`.parameters`
time: double
The current time
state: array list
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.matrix` or :class:`mpmath.matrix`
Matrix of dimension [number of state x number of state]
Notes
-----
Name and order of state and time are also different.
See Also
--------
:meth:`.jacobian`
"""
if self._diffJacobian is None or self._hasNewTransition:
self.get_ode_eqn()
self.get_diff_jacobian_eqn()
eval_param = self._getEvalParam(state, time, parameters)
return self._diffJacobianCompile(eval_param)
########################################################################
#
# Gradient related operations
#
########################################################################
def get_grad_eqn(self):
"""
Return the gradient of the ode in algebraic form
Returns
-------
:class:`sympy.matrices.matrices`
A matrix of dimension [number of state x number of parameters]
"""
# finds
if self._Grad is None:
ode = self.get_ode_eqn()
self._Grad = sympy.zeros(self.num_state, self.num_param)
for i in range(self.num_state):
# need to adjust such that the first index is not
# included because it correspond to time
for j, p in enumerate(self._iterParamList()):
eqn, isDifficult = simplifyEquation(diff(ode[i], p, 1))
self._Grad[i,j] = eqn
self._isDifficult = self._isDifficult or isDifficult
if self._isDifficult:
self._GradCompile = self._SC.compileExprAndFormat(self._sp,
self._Grad,
modules='mpmath',
outType="mat")
else:
self._GradCompile = self._SC.compileExprAndFormat(self._sp,
self._Grad,
outType="mat")
return self._Grad
def grad(self, state, time):
"""
Evaluate the gradient given state and time
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
time: numeric
The current time
Returns
-------
:class:`numpy.ndarray`
Matrix of dimension [number of state x number of parameters]
"""
return self.eval_grad(state=state, time=time)
def grad_T(self, t, state):
"""
Same as :meth:`grad_T` but with t as first parameter
"""
return self.grad(state, t)
def eval_grad(self, parameters=None, time=None, state=None):
"""
Evaluate the gradient given parameters, state and time. An extension
of :meth:`grad` but now also include the parameters.
Parameters
----------
parameters: list
see :meth:`.parameters`
time: double
The current time
state: array list
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.matrix` or :class:`mpmath.matrix`
Matrix of dimension [number of state x number of state]
Notes
-----
Name and order of state and time are also different.
See Also
--------
:meth:`.grad`
"""
if self._Grad is None or self._hasNewTransition:
self.get_ode_eqn()
self.get_grad_eqn()
eval_param = self._getEvalParam(state, time, parameters)
return self._GradCompile(eval_param)
#
# jacobian of the Gradiant
#
def get_grad_jacobian_eqn(self):
"""
Return the jacobian of the gradient in algebraic form
Returns
-------
:class:`sympy.matrices.matrices`
A matrix of dimension [number of state *
number of parameters x number of state]
See also
--------
:meth:`.get_grad_eqn`
"""
if self._GradJacobian is None:
self._GradJacobian = sympy.zeros(self.num_state*self.num_param,
self.num_state)
G = self.get_grad_eqn()
for k in range(0, self.num_param):
for i in range(0, self.num_state):
for j, s in enumerate(self._iterStateList()):
z = k*self.num_state + i
eqn, isDifficult = simplifyEquation(diff(G[i,k], s, 1))
self._GradJacobian[z,j] = eqn
self._isDifficult = self._isDifficult or isDifficult
# end of the triple loop. All elements are now filled
f = self._SC.compileExprAndFormat
if self._isDifficult:
self._GradJacobianCompile = f(self._sp,
self._GradJacobian,
modules='mpmath')
else:
self._GradJacobianCompile = f(self._sp,
self._GradJacobian)
return self._GradJacobian
def grad_jacobian(self, state, time):
"""
Evaluate the Jacobian of the gradient given state and time
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
time: numeric
The current time
Returns
-------
:class:`numpy.ndarray`
Matrix of dimension [number of state x number of parameters]
See also
--------
:meth:`.grad`
"""
return self.eval_grad_jacobian(state=state, time=time)
def grad_jacobianT(self, t, state):
"""
Same as :meth:`grad_jacobian` but with t as first parameter
"""
return self.grad_jacobian(state, t)
def eval_grad_jacobian(self, parameters=None, time=None, state=None):
"""
Evaluate the jacobian of the gradient given parameters,
state and time. An extension of :meth:`.grad_jacobian`
but now also include the parameters.
Parameters
----------
parameters: list
see :meth:`.parameters`
time: double
The current time
state: array list
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.matrix` or :class:`mpmath.matrix`
Matrix of dimension [number of state x number of state]
Notes
-----
Name and order of state and time are also different.
See Also
--------
:meth:`.grad_jacobian`, :meth:`.get_grad_jacobian_eqn`
"""
if self._GradJacobian is None or self._hasNewTransition:
self.get_ode_eqn()
self.get_grad_jacobian_eqn()
eval_param = self._getEvalParam(state, time, parameters)
return self._GradJacobianCompile(eval_param)
########################################################################
#
# hessian related operations
#
########################################################################
def get_hessian_eqn(self):
"""
Return the Hessian of the ode in algebraic form
Returns
-------
list
list of dimension number of state, each with matrix
[number of parameters x number of parameters] in
:mod:`sympy.matricies.matricies`
Notes
-----
We deliberately return a list instead of a 3d array of a
tensor to avoid confusion
"""
if self._Hessian is None:
ode = self.get_ode_eqn()
self._Hessian = list()
# roll out the equation one by one. Each H below is a the
# second derivative of f_{j}(x), the j^{th} ode. Each ode
# correspond to a state
for eqn in ode:
H = sympy.zeros(self.num_param, self.num_param)
# although this can be simplified by first finding the gradient
# it is not required so we will be slow here
for i, pi in enumerate(self._iterParamList()):
a = diff(eqn, pi, 1)
for j, pj in enumerate(self._iterParamList()):
H[i,j], isDifficult = simplifyEquation(diff(a, pj, 1))
self._isDifficult = self._isDifficult or isDifficult
# end of double loop. Finished one state
self._Hessian.append(H)
return self._Hessian
def hessian(self, state, time):
"""
Evaluate the hessian given state and time
Parameters
----------
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
time: double
The current time
Returns
-------
list
list of dimension number of state, each with matrix
[number of parameters x number of parameters] in
:mod:`sympy.matricies.matricies`
"""
A = self.eval_hessian(state=state, time=time)
return [np.array(H, float) for H in A]
def eval_hessian(self, parameters=None, time=None, state=None):
"""
Evaluate the hessian given parameters, state and time. An extension
of :meth:`hessian` but now also include the parameters.
Parameters
----------
parameters: list
see :meth:`.parameters`
time: double
The current time
state: array list
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
list
list of dimension number of state, each with matrix
[number of parameters x number of parameters] in
:mod:`sympy.matricies.matricies`
See Also
--------
:meth:`.grad`, :meth:`.eval_grad`
"""
if self._hasNewTransition:
self.get_ode_eqn()
eval_param = list()
eval_param = self._addTimeEvalParam(eval_param, time)
eval_param = self._addStateEvalParam(eval_param, state)
if parameters is None:
if self._HessianWithParam is None:
self._computeHessianParam()
else:
self.parameters = parameters
if self._Hessian is None:
self._computeHessianParam()
if len(eval_param) == 0:
return self._Hessian
else:
H = list()
for i in range(0, self.num_state):
H = self._HessianWithParam[i].subs(eval_param)
return H
def _computeHessianParam(self):
self._Hessian = self.get_hessian_eqn()
self._HessianWithParam = copy.deepcopy(self._Hessian)
for H in self._HessianWithParam:
H = H.subs(self._parameters)
return None
########################################################################
#
# Sensitivity related operations (1st forward)
#
########################################################################
def sensitivity(self, sens, t, state, by_state=False):
"""
Evaluate the sensitivity given state and time. The default is to
output the values by parameters, i.e. :math:`s_{i},\\ldots,s_{i+n}` are
partial derivatives w.r.t. the states for
:math:`i \\in {1,1+p,1+2p,1+3p, \\ldots, 1+(n-1)p}`. This is
to take advantage of the fact that we have a block diagonal
jacobian that was already evaluated
Parameters
----------
sens: array like
The starting sensitivity of size [number of state x number of
parameters]. Which are normally zero or one,
depending on whether the initial conditions are also variables.
t: double
The current time
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
by_state: bool
how we want the output to be arranged. Default is True so
that we have a block diagonal structure
Returns
-------
:class:`numpy.ndarray`
"""
# TODO: allows the functionality to not evaluate all sensitivity
# S = \nabla_{time} \frac{\partial State}{\partial Parameters}
# rearrange the input if required
if by_state:
S = np.reshape(sens, (self.num_state, self.num_param))
else:
S = self._SAUtil.vecToMatSens(sens)
return self.eval_sensitivity(S=S, t=t, state=state, by_state=by_state)
def sensitivity_T(self, t, sens, state, by_state=False):
"""
Same as :meth:`sensitivity` but with t as first parameter
"""
return self.sensitivity(sens, t, state, by_state)
def eval_sensitivity(self, S, t, state, by_state=False):
"""
Evaluate the sensitivity given state and time
Parameters
----------
S: array like
Which should be :class:`numpy.ndarray`.
The starting sensitivity of size [number of state x number of
parameters]. Which are normally zero or one,
depending on whether the initial conditions are also variables.
t: double
The current time
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
by_state: bool
how we want the output to be arranged. Default is True so
that we have a block diagonal structure
Returns
-------
:class:`numpy.ndarray`
Notes
-----
It is different to :meth:`.eval_ode` and :meth:`.eval_jacobian` in
that the extra input argument is not a parameter
See Also
--------
:meth:`.sensitivity`
"""
# jacobian * sensitivities + G
# where G is the gradient
J = self.jacobian(state, t)
G = self.grad(state, t)
A = np.dot(J, S) + G
if by_state:
return np.reshape(A, self.num_state*self.num_param)
else:
return self._SAUtil.matToVecSens(A)
def ode_and_sensitivity(self, state_param, t, by_state=False):
"""
Evaluate the sensitivity given state and time
Parameters
----------
state_param: array like
The current numerical value for the states as well as the
sensitivities values all in one. We assume that the state
values comes first.
t: double
The current time
by_state: bool
Whether the output vector should be arranged by state or by
parameters. If False, then it means that the vector of output is
arranged according to looping i,j from Sensitivity_{i,j} with i
being the state and j the param. This is the preferred way because
it leds to a block diagonal Jacobian
Returns
-------
:class:`list`
concatenation of 2 element. First contains the ode, second the
sensitivity. Both are of type :class:`numpy.ndarray`
See Also
--------
:meth:`.sensitivity` , :meth:`.ode`
"""
if len(state_param) == self.num_state:
raise InputError("You have only inputted the initial condition " +
"for the states and not the sensitivity")
# unrolling, assuming that we would always put the state first
# there is no safety checks on this because it is impossible to
# distinguish what is state and what is sensitivity as they are
# all numeric value that can take the full range (-\infty,\infty)
state = state_param[0:self.num_state]
sens = state_param[self.num_state::]
out1 = self.ode(state, t)
out2 = self.sensitivity(sens, t, state, by_state)
return np.append(out1, out2)
def ode_and_sensitivity_T(self, t, state_param, by_state=False):
"""
Same as :meth:`ode_and_sensitivity` but with t as first parameter
"""
return self.ode_and_sensitivity(state_param, t, by_state)
def ode_and_sensitivity_jacobian(self, state_param, t, by_state=False):
"""
Evaluate the sensitivity given state and time. Output a block
diagonal sparse matrix as default.
Parameters
----------
state_param: array like
The current numerical value for the states as well as the
sensitivities values all in one. We assume that the state
values comes first.
t: double
The current time
by_state: bool
How the output is arranged, according to the vector of output.
It can be in terms of state or parameters, where by state means
that the jacobian is a block diagonal matrix.
Returns
-------
:class:`numpy.ndarray`
output of a square matrix of size: number of ode + 1 times number
of parameters
See Also
--------
:meth:`.ode_and_sensitivity`
"""
if len(state_param) == self.num_state:
raise InputError("Expecting both the state and the sensitivities")
else:
state = state_param[0:self.num_state]
# now we start the computation
J = self.jacobian(state, t)
# create the block diagonal Jacobian, assuming that whoever is
# calling this function wants it arranges by state-parameters
# Note that none of the ode integrator in scipy allow a sparse Jacobian
# matrix. All of them accept a banded matrix in packed format but not
# an actual sparse, or specifying the number of bands.
outJ = np.kron(np.eye(self.num_param), J)
# Jacobian of the gradient
GJ = self.grad_jacobian(state, t)
# and now we add the gradient
sensJacobianOfState = GJ + self.sens_jacobian_state(state_param, t)
if by_state:
arrange_vector = np.zeros(self.num_state * self.num_param)
k = 0
for j in range(0, self.num_param):
for i in range(0, self.num_state):
if i == 0:
arrange_vector[k] = (i*self.num_state) + j
else:
arrange_vector[k] = (i*(self.num_state - 1)) + j
k += 1
outJ = outJ[np.array(arrange_vector,int), :]
idx = np.array(arrange_vector, int)
sensJacobianOfState = sensJacobianOfState[idx, :]
# The Jacobian of the ode, then the sensitivities w.r.t state and
# the sensitivities. In block form. Theoretically, only the diagonal
# blocks are important but we output the full matrix for completeness
return np.asarray(np.bmat([
[J, np.zeros((self.num_state, self.num_state*self.num_param))],
[sensJacobianOfState, outJ]
]))
def ode_and_sensitivity_jacobian_T(self, t, state_param, by_state=False):
"""
Same as :meth:`ode_and_sensitivity_jacobian` but with t as
first parameter
"""
return self.ode_and_sensitivity_jacobian(state_param, t, by_state)
########################################################################
#
# Include initial value as parameters. Sensitivity related operations
# (1st forward)
#
########################################################################
def sensitivityIV(self, sensIV, t, state):
"""
Evaluate the sensitivity which include the initial values as
our parameters given state and time. The default is to
output the values by parameters, i.e. :math:`s_{i},\\ldots,s_{i+n}` are
partial derivatives w.r.t. the states for
:math:`i \\in {1,1+p,1+2p,1+3p, \\ldots, 1+(n-1)p}`. This is to take
advantage of the fact that we have a block diagonal Jacobian that was
already evaluated.
Parameters
----------
sensIV: array like
The starting sensitivity of size [number of state x number of
parameters] + [number of state x number of state] for the
initial condition. The latter is an identity matrix at time zero.
t: double
The current time
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.ndarray`
output of the same length as the ode
"""
nS = self.num_state
nP = self.num_param
# separate information out. Again, we do have not have checks here
# as it will be impossible to distinguish what is correct
sens = sensIV[:(nS*nP)]
S = self._SAUtil.vecToMatSens(sens)
IV = np.reshape(sensIV[-(nS*nS):], (nS, nS), 'F')
return self.eval_sensitivityIV(S=S, IV=IV, t=t, state=state)
def sensitivityIV_T(self, t, sensIV, state):
"""
Same as :meth:`sensitivityIV` but with t as first parameter
"""
return self.sensitivityIV(sensIV, t, state)
def eval_sensitivityIV(self, S, IV, t, state):
"""
Evaluate the sensitivity with initial values given
state and time
Parameters
----------
S: array like
Which should be :class:`numpy.ndarray`.
The starting sensitivity of size [number of state x number of
parameters]. Which are normally zero or one,
depending on whether the initial conditions are also variables.
IV: array like
sensitivities for the initial values
t: double
The current time
state: array like
The current numerical value for the states which can be
:class:`numpy.ndarray` or :class:`list`
Returns
-------
:class:`numpy.ndarray`
:math:`f(s(x,\\theta))` and :math:`f(s(x_{0}))`
Notes
-----
It is different to :meth:`.eval_ode` and :meth:`.eval_jacobian` in
that the extra input argument is not a parameter.
See Also
--------
:meth:`.sensitivityIV`
"""
# jacobian * sensitivities + G
# where G is the gradient
# Evidently, A below uses the same operations as
# A = self.eval_sensitivity(S,t,state)
# but we are evaluating them explicitly here because
# we will be using J as well when computing B
J = self.jacobian(state, t)
G = self.grad(state, t)
A = np.dot(J, S) + G
# and jacobian * sensitivities of the initial condition
B = np.dot(J, IV)
# we want to output by parameters
return self._SAUtil.matToVecSens(A), B.flatten('F')
def ode_and_sensitivityIV(self, state_param, t):
"""
Evaluate the sensitivity given state and time
Parameters
----------
state_param: array like
The current numerical value for the states as well as the
sensitivities values all in one. We assume that the state
values comes first.
t: double
The current time
Returns
-------
:class:`list`
concatenation of 3 element. First contains the ode, second the
sensitivity, then the sensitivity of the initial value. All
of them are of type
:class:`numpy.ndarray`
See Also
--------
:meth:`.sensitivity` , :meth:`.ode`
"""
if len(state_param) == self.num_state:
raise InputError("You have only inputed the initial condition " +
"for the states and not the sensitivity")
# unrolling, assuming that we would always put the state first
state = state_param[0:self.num_state]
# the remainings
sens_iv = state_param[self.num_state::]
# separate evaluation
out1 = self.ode(state, t)
out2,out3 = self.sensitivityIV(sens_iv, t, state)
return np.append(np.append(out1, out2), out3)
def ode_and_sensitivityIV_T(self, t, state_param):
"""
Same as :meth:`ode_and_sensitivityIV` but with t as first parameter
"""
return self.ode_and_sensitivityIV(state_param, t)
def ode_and_sensitivityIV_jacobian(self, state_param, t):
"""
Evaluate the sensitivity given state and time. Output a block
diagonal sparse matrix as default.
Parameters
----------
state_param: array like
The current numerical value for the states as well as the
sensitivities values all in one. We assume that the state
values comes first.
t: double
The current time
byState: bool
How the output is arranged, according to the vector of output.
It can be in terms of state or parameters, where by state means
that the jacobian is a block diagonal matrix.
Returns
-------
:class:`numpy.ndarray`
output of a square matrix of size: number of ode + 1 times number
of parameters
See Also
--------
:meth:`.ode_and_sensitivity`
"""
if len(state_param) == self.num_state:
raise InputError("Expecting both the state and the sensitivities")
else:
state = state_param[0:self.num_state]
nS = self.num_state
nP = self.num_param
# now we start the computation, the simply one :)
J = self.jacobian(state, t)
# now the jacobian of the state vs initial value
DJ = self.diff_jacobian(state, t)
A = DJ.dot(np.reshape(state_param[(nS*(nP+1))::], (nS, nS), 'F'))
A = np.reshape(A.transpose(), (nS*nS, nS))
if nP == 0:
return np.asarray(np.bmat([
[J, np.zeros((nS, nS*nS))],
[A, np.kron(np.eye(nS), J)]
]))
else:
# create the block diagonal jacobian, assuming that whoever is
# calling this function wants it arranges by state-parameters
outJ = np.kron(np.eye(nP), J)
# jacobian of the gradient
GJ = self.grad_jacobian(state, t)
GS = self.sens_jacobian_state(state_param[:(nS*(nP + 1))], t)
sensJacobianOfState = GJ + GS
# The jacobian of the ode, then the sensitivities w.r.t state
# and the sensitivities. In block form
return np.asarray(np.bmat([
[J, np.zeros((nS, nS*nP)), np.zeros((nS, nS*nS))],
[sensJacobianOfState, outJ, np.zeros((nS*nP, nS*nS))],
[A, np.zeros((nS*nS, nS*nP)), np.kron(np.eye(nS), J)]
]))
def ode_and_sensitivityIV_jacobian_T(self, t, state_param):
"""
Same as :meth:`ode_and_sensitivityIV_jacobian` but with t as
first parameter
"""
return self.ode_and_sensitivityIV_jacobian(state_param, t)
############################################################################
#
# Adjoint
#
############################################################################
def adjoint_interpolate(self, state, t, interpolant, func=None):
"""
Compute the adjoint given the adjoint vector, time, the functions
which was used to interpolate the state variable
Parameters
----------
state: array like
The current value of lambda, where lambda's are the Lagrangian
multipliers of the differential equation.
t: double
The current time.
interpolant: list
list of interpolating functions of the state
func: callable
This should take inputs similar to an ode, i.e. of the form
func(y,t). If j(y,t) is the cost function, then func
is a function that calculates
:math:`\\partial j \\over \\partial x`.
Returns
-------
:class:`numpy.ndarray`
output of the same length as the ode
"""
state_param = [o(t) for o in interpolant]
return self.adjoint(state, t, state_param, func)
def adjoint_interpolate_T(self, t, state, interpolant, objInput=None):
"""
Same as :meth:`adjoint_interpolate` but with t as first parameter
"""
return self.adjoint_interpolate(state, t, interpolant, objInput)
def _adjointInterpolate_NoCheck(self, state, t,
interpolant, func=None):
state_param = [o(t) for o in interpolant]
return self._adjoint_NoCheck(state, t, state_param, func)
def _adjointInterpolateT_NoCheck(self, t, state,
interpolant, func=None):
return self._adjoint_NoCheck(state, t, interpolant, func)
def adjoint(self, state, t, state_param, func=None):
"""
Compute the adjoint given the adjoint vector, time, state variable
and the objective function. Note that this function is very
restrictive in the sense that the (original) state variable changes
through time but this assumes it is a constant, i.e. we assume that
the original system is linear.
Parameters
----------
state: array like
The current value of lambda, where lambda's are the Lagrangian
multipliers of the differential equation.
t: double
The current time.
state_param: array like
The state vector that is (or maybe) required to evaluate the
jacobian of the original system
func: callable
This should take inputs similar to an ode, i.e. of the form
func(y,t). If j(y,t) is the cost function, then func
is a function that calculates
:math:`\\partial j \\over \\partial x`.
Returns
-------
:class:`numpy.ndarray`
output of the same length as the ode
Notes
-----
The size of lambda should be the same as the state. The integral
should be starting from T, the final time of the original system
and is integrated backwards (for stability).
"""
J = self.jacobian(state_param, t)
if func is None:
return np.dot(state, -J)
else:
return func(state_param, t) - J.transpose().dot(state)
def _adjoint_NoCheck(self, state, t, state_param, func=None):
J = self._Jacobian_NoCheck(state_param, t)
if func is None:
return np.dot(state, -J)
else:
return func(state_param, t) - J.transpose().dot(state)
def _adjoinT_NoCheck(self, t, state, state_param, func=None):
return self._adjoint_NoCheck(state, t, state_param, func)
def adjoint_T(self, t, state, state_param, func=None):
"""
Same as :meth:`adjoint` but with t as first parameter
"""
return self.adjoint(state, t, state_param, func)
def adjoint_jacobian(self, state, t, state_param, func=None):
"""
Compute the jacobian of the adjoint given the adjoint vector, time,
state variable and the objective function. This is simply the same
as the negative jacobian of the ode transposed.
Parameters
----------
state: array like
The current value of lambda, where lambda's are the Lagrangian
multipliers of the differential equation.
t: double
The current time.
state_param: array like
The state vector that is (or maybe) required to evaluate the
jacobian of the original system
func: callable
This should take inputs similar to an ode, i.e. of the form
func(y,t). If j(y,t) is the cost function, then func
is a function that calculates
:math:`\\partial j \\over \\partial x`.
Returns
-------
:class:`numpy.ndarray`
output of is a two dimensional array of size
[number of state x number of state]
Notes
-----
It takes the same number of argument as the adjoint for simplicity
when integrating.
See Also
--------
:meth:`.adjoint`
"""
return -self.jacobian(state_param, t).transpose()
def adjoint_jacobian_T(self, t, state, state_param, func=None):
"""
Same as :meth:`adjoint_jacobian_T` but with t being the
first parameter
"""
return self.adjoint_jacobian(state, t, state_param, func)
def adjoint_interpolate_jacobian(self, state, t,
interpolant, func=None):
"""
Compute the Jacobian of the adjoint given the adjoint vector, time,
function of the interpolation on the state variables and the
objective function. This is simply the same as the negative
Jacobian of the ode transposed.
Parameters
----------
state: array like
The current value of lambda, where lambda's are the Lagrangian
multipliers of the differential equation.
t: double
The current time.
interpolant: list
list of interpolating functions of the state
func: callable
This should take inputs similar to an ode, i.e. of the form
func(y,t). If j(y,t) is the cost function, then func is
a function that calculates :math:`\\partial j \\over \\partial x`.
Returns
-------
:class:`numpy.ndarray`
output of is a two dimensional array of size
[number of state x number of state]
Notes
-----
Same as :meth:`.adjoint_jacobian` but takes a list of interpolating
function instead of a single (vector) value
See Also
--------
:meth:`.adjoint_jacobian`
"""
state_param = [o(t) for o in interpolant]
return self.adjoint_jacobian(state, t, state_param, func)
def adjoint_interpolate_jacobian_T(self, t, state, interpolant, func=None):
"""
Same as :meth:`adjoint_interpolate_jacobian` but with t as
first parameter
"""
return self.adjoint_interpolate_jacobian(state, t,
interpolant, func)
########################################################################
#
# Sensitivity, forward-forward operations
#
########################################################################
def forwardforward(self, ff, t, state, s):
"""
Evaluate a single :math:`f(x)` of the forward-forward sensitivities
Parameters
----------
ff: array like
the forward-forward sensitivities in vector form
t: numeric
time
state: array like
the current state
s: array like
forward sensitivities in vector form
Returns
-------
:class:`numpy.ndarray`
:math:`f(x)` of size [number of state *
(number of parameters * number of parameters)]
"""
# byState is simply stupid in the forward forward case because the
# second derivative makes things only rational if we look at it from
# the state point of view
S = self._SAUtil.vecToMatSens(s)
FF = self._SAUtil.vecToMatFF(ff)
return self.eval_forwardforward(FF=FF, S=S, state=state, t=t)
def forwardforward_T(self, t, ff, s, state):
"""
Same as :meth:`forwardforward` but with t as the first
parameter
"""
return self.forwardforward(ff, t, state, s)
def eval_forwardforward(self, FF, S, state, t):
"""
Evaluate a single f(x) of the forward-forward sensitivities
Parameters
----------
FF: array like
this is in fact a 3rd order Tensor, aka 3d array
S: array like
sensitivities in matrix form
state: array like
the current state
t: numeric
time
Returns
-------
:class:`numpy.ndarray`
f(x) of size [number of state *
(number of parameters * number of parameters)]
"""
J = self.jacobian(state, t)
diffJ = self.diff_jacobian(state, t)
# evaluating by state/ode, the matrix of second derivative
# we have kron products into all these evaluations and the class
# here use a sparse matrix operation
outFF = self._SAUtil.kronParam(J).dot(FF)
outFF += self._SAUtil.kronState(A=S.T, pre=True).dot(diffJ).dot(S)
# now we need to magic our list / matrix into a vector, aka append
# each of the vectorized matrix one after another
return self._SAUtil.matToVecFF(outFF)
def ode_and_forwardforward(self, state_param, t):
"""
Evaluate a single f(x) of the ode and the
forward-forward sensitivities
Parameters
----------
state_param: array like
state and forward-forward sensitivities in vector form
t: numeric
time
Returns
-------
:class:`numpy.ndarray`
same size as the state_param input
"""
if len(state_param) == self.num_state:
raise InputError("You have only inputted the initial condition " +
"for the states and not the sensitivity")
elif len(state_param) == ((self.num_state + 1)*self.num_param):
raise InputError("You have only inputted the initial condition " +
"for the states and the sensitivity but not " +
"the forward forward condition")
# unrolling of parameters
state = state_param[0:self.num_state]
# we want the index up to numState * (numParam + 1)
# as in, (numState * numParam + numState,
# number of sensitivities + number of ode)
sens = state_param[self.num_state:(self.num_state*(self.num_param + 1))]
# the rest are then the forward forward sensitivities
ff = state_param[(self.num_state*(self.num_param + 1))::]
out1 = self.ode(state, t)
out2 = self.sensitivity(sens, t, state)
out3 = self.forwardforward(ff, t, state, sens)
return np.append(np.append(out1, out2), out3)
def ode_and_forwardforward_T(self, t, state_param):
"""
Same as :meth:`odeAndForwardForward` but with time
as the first input
"""
return self.ode_and_forwardforward(state_param, t)
def ode_and_forwardforward_jacobian(self, state_param, t):
"""
Return the jacobian after evaluation given the input
of the state and the forward forward sensitivities
Parameters
----------
state_param: array like
state and forward-forward sensitivities in vector form
t: numeric
time
Returns
-------
:class:`numpy.ndarray`
size of (a,a) where a is the length of the
state_param input
"""
if len(state_param) == self.num_state:
state = state_param
else:
state = state_param[0:self.num_state]
J = self.jacobian(state, t)
# create the block diagonal jacobian, assuming that whoever is
# calling this function wants it arranges by state-parameters
# We are only construct the block diagonal jacobian here
# instead of the full one unlike some of the other methods within
# this class
outJS = np.kron(np.eye(self.num_param), J)
outJFF = np.kron(np.eye(self.num_param*self.num_param), J)
# The jacobian of the ode, then the sensitivities, then the
# forward forward sensitivities
return scipy.linalg.block_diag(J, outJS, outJFF)
def ode_and_forwardforward_jacobian_T(self, t, state_param):
"""
Same as :meth:`ode_and_forwardforward_jacobian` but
with t being the first parameters
"""
return self.ode_and_forwardforward_jacobian(state_param, t)
########################################################################
#
# Initial conditions, integrations and result plots
#
########################################################################
@property
def initial_state(self):
"""
Return the initial state values
"""
return self._x0
@initial_state.setter
def initial_state(self, x0):
"""
Set the initial state values
Parameters
----------
x0: array like
initial condition of x at time 0
"""
err_str = "More than one state in the defined system"
if isinstance(x0, np.ndarray):
self._x0 = x0
elif isinstance(x0, (list, tuple)):
self._x0 = np.array(x0)
elif isinstance(x0, (int, float)):
if self.num_state == 1:
self._x0 = np.array([x0])
else:
raise InitializeError(err_str)
else:
raise InitializeError("err_str")
if len(self._x0) != self.num_state:
raise Exception("Number of state is " +
str(self.num_state)+ " but " +
str(len(self._x0))+ " detected")
@property
def initial_time(self):
"""
Return the initial time
"""
return self._t0
@initial_time.setter
def initial_time(self, t0):
"""
Set the initial time
Parameters
----------
t0: numeric
initial time where x0 is observed
"""
err_str = "Initial time should be a "
if isinstance(t0, Number):
self._t0 = t0
elif ode_utils.is_list_like(t0):
if len(t0) == 1:
if isinstance(t0[0], Number):
self._t0 = t0[0]
else:
raise InitializeError(err_str + "numeric value")
else:
raise InitializeError(err_str + "single value")
elif isinstance(t0, (list, tuple)):
if len(t0) == 1:
self._t0 = np.array(t0[0])
else:
raise InitializeError(err_str + "single value")
else:
raise InitializeError(err_str + "numeric value")
@property
def initial_values(self):
"""
Returns the initial values, both time and state as a tuple (x0, t0)
"""
return self.initial_state, self.initial_time
@initial_values.setter
def initial_values(self, x0t0):
"""
Set the initial values, both time and state
Parameters
----------
x0t0: array like
initial condition of x at time t and the initial time t where x
is observed
"""
assert len(x0t0) == 2, "Initial values require (x0, t0)"
self.initial_state = x0t0[0]
self.initial_time = x0t0[1]
def integrate(self, t, full_output=False):
"""
Integrate over a range of t when t is an array and a output at time t
Parameters
----------
t: array like
the range of time points which we want to see the result of
full_output: bool
if we want additional information
"""
# type checking
self._setIntegrateTime(t)
# if our parameters are stochastic, then we are going to generate
# another set of parameters to run
if self._stochasticParam is not None:
# this should always be true. If not, then we have screwed up
# somewhere within this class.
if isinstance(self._stochasticParam, dict):
self.parameters = self._stochasticParam
return self._integrate(self._odeTime, full_output)
def integrate2(self, t, full_output=False, method=None):
"""
Integrate over a range of t when t is an array and a output
at time t. Select a suitable method to integrate when
method is None.
Parameters
----------
t: array like
the range of time points which we want to see the result of
full_output: bool
if we want additional information
method: str, optional
the integration method. All those available in
:class:`ode <scipy.integrate.ode>` are allowed with 'vode'
and 'ivode' representing the non-stiff and stiff version
respectively. Defaults to None, which tries to choose the
integration method via eigenvalue analysis (only one) using
the initial conditions
"""
self._setIntegrateTime(t)
# if our parameters are stochastic, then we are going to generate
# another set of parameters to run
if self._stochasticParam is not None:
# this should always be true
if isinstance(self._stochasticParam, dict):
self.parameters = self._stochasticParam
return self._integrate2(self._odeTime, full_output, method)
def _setIntegrateTime(self, t):
"""
Set the full set of integration time including the origin
"""
assert self._t0 is not None, "Initial time not set"
if ode_utils.is_list_like(t):
if isinstance(t[0], Number):
t = np.append(self._t0, t)
else:
raise ArrayError("Expecting a list of numeric value")
elif isinstance(t, Number):
t = np.append(self._t0, np.array(t))
else:
raise ArrayError("Expecting an array like input or a single " +
"numeric value")
self._odeTime = t
def _integrate(self, t, full_output=True):
"""
Integrate using :class:`scipy.integrate.odeint` underneath
"""
assert self._t0 is not None, "Initial time not set"
f = ode_utils.integrate
self._odeSolution, self._odeOutput = f(self,
self._x0,
t,
full_output=True)
if full_output:
return self._odeSolution, self._odeOutput
else:
return self._odeSolution
def _integrate2(self, t, full_output=True, method=None):
"""
Integrate using :class:`scipy.integrate.ode` underneath
"""
assert self._x0 is not None, "Initial state not set"
f = ode_utils.integrateFuncJac
self._odeSolution, self._odeOutput = f(self.ode_T,
self.jacobian_T,
self._x0,
t[0], t[1::],
includeOrigin=True,
full_output=True,
method=method)
if full_output:
return self._odeSolution, self._odeOutput
else:
return self._odeSolution
def plot(self):
"""
Plot the results of the integration
Notes
-----
If we have 3 states or more, it will always be arrange such
that it has 3 columns. Uses the operation from
:mod:`odeutils`
"""
# just need to make sure that we have
# already gotten the solution to the integration
if self._odeSolution is None:
try:
self._integrate(self._odeTime)
ode_utils.plot(self._odeSolution, self._odeTime, self._stateList)
except:
raise IntegrationError("Have not performed the integration yet")
else:
ode_utils.plot(self._odeSolution, self._odeTime, self._stateList)
########################################################################
# Unrolling of the information from vector to sympy
# t
# state
########################################################################
def _addTimeEvalParam(self, eval_param, t):
eval_param.append((self._t, t))
return eval_param
def _addStateEvalParam(self, eval_param, state):
super(DeterministicOde, self).state = state
if self._state is not None:
eval_param += self._state
return eval_param
def _getEvalParam(self, state, time, parameters):
if state is None or time is None:
raise InputError("Have to input both state and time")
if parameters is not None:
self.parameters = parameters
elif self._parameters is None:
if self.num_param == 0:
pass
else:
raise InputError("Have not set the parameters yet")
if isinstance(state, list):
eval_param = state + [time]
elif hasattr(state, '__iter__'):
eval_param = list(state) + [time]
else:
eval_param = [state] + [time]
return eval_param + self._paramValue
| gpl-2.0 |
terkkila/scikit-learn | sklearn/cluster/mean_shift_.py | 106 | 14056 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None, got %f" %
bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
stop_thresh = 1e-3 * bandwidth # when mean has converged
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# For each seed, climb gradient until convergence or max_iter
for my_mean in seeds:
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
center_intensity_dict[tuple(my_mean)] = len(points_within)
break
completed_iterations += 1
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f, using data"
" points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
pnedunuri/scikit-learn | sklearn/covariance/tests/test_covariance.py | 69 | 11116 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# Create X with 1 sample and 5 features
X_1sample = np.arange(5).reshape(1, 5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0:1]
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# warning should be raised when using only 1 sample
X_1sample = np.arange(5).reshape(1, 5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
christinahedges/PyKE | setup.py | 2 | 3141 | #!/usr/bin/env python
import os
import sys
from setuptools import setup
# Prepare and send a new release to PyPI
if "release" in sys.argv[-1]:
os.system("python setup.py sdist")
os.system("twine upload dist/*")
os.system("rm -rf dist/pyketools*")
sys.exit()
# Load the __version__ variable without importing the package already
exec(open('pyke/version.py').read())
entry_points = {'console_scripts': [
'kepbls = pyke.kepbls:kepbls_main',
'kepclean = pyke.kepclean:kepclean_main',
'kepclip = pyke.kepclip:kepclip_main',
'kepconvert = pyke.kepconvert:kepconvert_main',
'kepcotrend = pyke.kepcotrend:kepcotrend_main',
'kepdetrend = pyke.kepdetrend:kepdetrend_main',
'kepdiffim = pyke.kepdiffim:kepdiffim_main',
'kepdraw = pyke.kepdraw:kepdraw_main',
'kepdynamic = pyke.kepdynamic:kepdynamic_main',
'kepextract = pyke.kepextract:kepextract_main',
'kepffi = pyke.kepffi:kepffi_main',
'kepfilter = pyke.kepfilter:kepfilter_main',
'kepflatten = pyke.kepflatten:kepflatten_main',
'kepfold = pyke.kepfold:kepfold_main',
'kepperiodogram = pyke.kepperiodogram:kepperiodogram_main',
'kephead = pyke.kephead:kephead_main',
'kepimages = pyke.kepimages:kepimages_main',
'kepmask = pyke.kepmask:kepmask_main',
'kepoutlier = pyke.kepoutlier:kepoutlier_main',
'keppca = pyke.keppca:keppca_main',
'keppixseries = pyke.keppixseries:keppixseries_main',
'kepprf = pyke.kepprf:kepprf_main',
'kepprfphot = pyke.kepprfphot:kepprfphot_main',
'keprange = pyke.keprange:keprange_main',
'kepsff = pyke.kepsff:kepsff_main',
'kepsmooth = pyke.kepsmooth:kepsmooth_main',
'kepstddev = pyke.kepstddev:kepstddev_main',
'kepstitch = pyke.kepstitch:kepstitch_main',
'keptimefix = pyke.keptimefix:keptimefix_main',
'keptrial = pyke.keptrial:keptrial_main',
'keptrim = pyke.keptrim:keptrim_main',
'kepwindow = pyke.kepwindow:kepwindow_main',
]}
setup(name='pyketools',
version=__version__,
description="Tools to inspect and analyze the pixels and lightcurves "
"obtained by NASA's Kepler, K2, and TESS space telescopes.",
long_description=open('README.rst').read(),
author='KeplerGO',
author_email='[email protected]',
license='MIT',
packages=['pyke'],
install_requires=['numpy>=1.11', 'astropy>=1.3', 'scipy>=0.17.0',
'matplotlib>=1.5.3', 'tqdm', 'oktopus', 'bs4',
'requests'],
setup_requires=['pytest-runner'],
tests_require=['pytest', 'pytest-cov'],
entry_points=entry_points,
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
| mit |
shangwuhencc/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
google-research/google-research | ravens/ravens/utils.py | 1 | 22236 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
"""Miscellaneous utilities."""
import cv2
import matplotlib
import matplotlib.pyplot as plt
import meshcat
import meshcat.geometry as g
import meshcat.transformations as mtf
import numpy as np
import pybullet as p
import transformations
#-----------------------------------------------------------------------------
# HEIGHTMAP UTILS
#-----------------------------------------------------------------------------
def get_heightmap(points, colors, bounds, pixel_size):
"""Get top-down (z-axis) orthographic heightmap image from 3D pointcloud.
Args:
points: HxWx3 float array of 3D points in world coordinates.
colors: HxWx3 uint8 array of values in range 0-255 aligned with points.
bounds: 3x2 float array of values (rows: X,Y,Z; columns: min,max) defining
region in 3D space to generate heightmap in world coordinates.
pixel_size: float defining size of each pixel in meters.
Returns:
heightmap: HxW float array of height (from lower z-bound) in meters.
colormap: HxWx3 uint8 array of backprojected color aligned with heightmap.
"""
width = int(np.round((bounds[0, 1] - bounds[0, 0]) / pixel_size))
height = int(np.round((bounds[1, 1] - bounds[1, 0]) / pixel_size))
heightmap = np.zeros((height, width), dtype=np.float32)
colormap = np.zeros((height, width, colors.shape[-1]), dtype=np.uint8)
# Filter out 3D points that are outside of the predefined bounds.
ix = (points[Ellipsis, 0] >= bounds[0, 0]) & (points[Ellipsis, 0] < bounds[0, 1])
iy = (points[Ellipsis, 1] >= bounds[1, 0]) & (points[Ellipsis, 1] < bounds[1, 1])
iz = (points[Ellipsis, 2] >= bounds[2, 0]) & (points[Ellipsis, 2] < bounds[2, 1])
valid = ix & iy & iz
points = points[valid]
colors = colors[valid]
# Sort 3D points by z-value, which works with array assignment to simulate
# z-buffering for rendering the heightmap image.
iz = np.argsort(points[:, -1])
points, colors = points[iz], colors[iz]
px = np.int32(np.floor((points[:, 0] - bounds[0, 0]) / pixel_size))
py = np.int32(np.floor((points[:, 1] - bounds[1, 0]) / pixel_size))
px = np.clip(px, 0, width - 1)
py = np.clip(py, 0, height - 1)
heightmap[py, px] = points[:, 2] - bounds[2, 0]
for c in range(colors.shape[-1]):
colormap[py, px, c] = colors[:, c]
return heightmap, colormap
def get_pointcloud(depth, intrinsics):
"""Get 3D pointcloud from perspective depth image.
Args:
depth: HxW float array of perspective depth in meters.
intrinsics: 3x3 float array of camera intrinsics matrix.
Returns:
points: HxWx3 float array of 3D points in camera coordinates.
"""
height, width = depth.shape
xlin = np.linspace(0, width - 1, width)
ylin = np.linspace(0, height - 1, height)
px, py = np.meshgrid(xlin, ylin)
px = (px - intrinsics[0, 2]) * (depth / intrinsics[0, 0])
py = (py - intrinsics[1, 2]) * (depth / intrinsics[1, 1])
points = np.float32([px, py, depth]).transpose(1, 2, 0)
return points
def transform_pointcloud(points, transform):
"""Apply rigid transformation to 3D pointcloud.
Args:
points: HxWx3 float array of 3D points in camera coordinates.
transform: 4x4 float array representing a rigid transformation matrix.
Returns:
points: HxWx3 float array of transformed 3D points.
"""
padding = ((0, 0), (0, 0), (0, 1))
homogen_points = np.pad(points.copy(), padding,
'constant', constant_values=1)
for i in range(3):
points[Ellipsis, i] = np.sum(transform[i, :] * homogen_points, axis=-1)
return points
def reconstruct_heightmaps(color, depth, configs, bounds, pixel_size):
"""Reconstruct top-down heightmap views from multiple 3D pointclouds."""
heightmaps, colormaps = [], []
for color, depth, config in zip(color, depth, configs):
intrinsics = np.array(config['intrinsics']).reshape(3, 3)
xyz = get_pointcloud(depth, intrinsics)
position = np.array(config['position']).reshape(3, 1)
rotation = p.getMatrixFromQuaternion(config['rotation'])
rotation = np.array(rotation).reshape(3, 3)
transform = np.eye(4)
transform[:3, :] = np.hstack((rotation, position))
xyz = transform_pointcloud(xyz, transform)
heightmap, colormap = get_heightmap(xyz, color, bounds, pixel_size)
heightmaps.append(heightmap)
colormaps.append(colormap)
return heightmaps, colormaps
def pix_to_xyz(pixel, height, bounds, pixel_size, skip_height=False):
"""Convert from pixel location on heightmap to 3D position."""
u, v = pixel
x = bounds[0, 0] + v * pixel_size
y = bounds[1, 0] + u * pixel_size
if not skip_height:
z = bounds[2, 0] + height[u, v]
else:
z = 0.0
return (x, y, z)
def xyz_to_pix(position, bounds, pixel_size):
"""Convert from 3D position to pixel location on heightmap."""
u = int(np.round((position[1] - bounds[1, 0]) / pixel_size))
v = int(np.round((position[0] - bounds[0, 0]) / pixel_size))
return (u, v)
def unproject_vectorized(uv_coordinates, depth_values,
intrinsic,
distortion):
"""Vectorized version of unproject(), for N points.
Args:
uv_coordinates: pixel coordinates to unproject of shape (n, 2).
depth_values: depth values corresponding index-wise to the uv_coordinates of
shape (n).
intrinsic: array of shape (3, 3). This is typically the return value of
intrinsics_to_matrix.
distortion: camera distortion parameters of shape (5,).
Returns:
xyz coordinates in camera frame of shape (n, 3).
"""
cam_mtx = intrinsic # shape [3, 3]
cam_dist = np.array(distortion) # shape [5]
# shape of points_undistorted is [N, 2] after the squeeze().
points_undistorted = cv2.undistortPoints(
uv_coordinates.reshape((-1, 1, 2)), cam_mtx, cam_dist).squeeze()
x = points_undistorted[:, 0] * depth_values
y = points_undistorted[:, 1] * depth_values
xyz = np.vstack((x, y, depth_values)).T
return xyz
def unproject_depth_vectorized(im_depth, depth_dist,
camera_mtx,
camera_dist):
"""Unproject depth image into 3D point cloud, using calibration.
Args:
im_depth: raw depth image, pre-calibration of shape (height, width).
depth_dist: depth distortion parameters of shape (8,)
camera_mtx: intrinsics matrix of shape (3, 3). This is typically the return
value of intrinsics_to_matrix.
camera_dist: camera distortion parameters shape (5,).
Returns:
numpy array of shape [3, H*W]. each column is xyz coordinates
"""
h, w = im_depth.shape
# shape of each u_map, v_map is [H, W].
u_map, v_map = np.meshgrid(np.linspace(
0, w - 1, w), np.linspace(0, h - 1, h))
adjusted_depth = depth_dist[0] + im_depth * depth_dist[1]
# shape after stack is [N, 2], where N = H * W.
uv_coordinates = np.stack((u_map.reshape(-1), v_map.reshape(-1)), axis=-1)
return unproject_vectorized(uv_coordinates, adjusted_depth.reshape(-1),
camera_mtx, camera_dist)
#-----------------------------------------------------------------------------
# MATH UTILS
#-----------------------------------------------------------------------------
def sample_distribution(prob, n_samples=1):
"""Sample data point from a custom distribution."""
flat_prob = np.ndarray.flatten(prob) / np.sum(prob)
rand_ind = np.random.choice(
np.arange(len(flat_prob)), n_samples, p=flat_prob, replace=False)
rand_ind_coords = np.array(np.unravel_index(rand_ind, prob.shape)).T
return np.int32(rand_ind_coords.squeeze())
#-------------------------------------------------------------------------
# Transformation Helper Functions
#-------------------------------------------------------------------------
def invert(pose):
return p.invertTransform(pose[0], pose[1])
def multiply(pose0, pose1):
return p.multiplyTransforms(pose0[0], pose0[1], pose1[0], pose1[1])
def apply(pose, position):
position = np.float32(position)
position_shape = position.shape
position = np.float32(position).reshape(3, -1)
rotation = np.float32(p.getMatrixFromQuaternion(pose[1])).reshape(3, 3)
translation = np.float32(pose[0]).reshape(3, 1)
position = rotation @ position + translation
return tuple(position.reshape(position_shape))
def eulerXYZ_to_quatXYZW(rotation): # pylint: disable=invalid-name
"""Abstraction for converting from a 3-parameter rotation to quaterion.
This will help us easily switch which rotation parameterization we use.
Quaternion should be in xyzw order for pybullet.
Args:
rotation: a 3-parameter rotation, in xyz order tuple of 3 floats
Returns:
quaternion, in xyzw order, tuple of 4 floats
"""
euler_zxy = (rotation[2], rotation[0], rotation[1])
quaternion_wxyz = transformations.quaternion_from_euler(
*euler_zxy, axes='szxy')
q = quaternion_wxyz
quaternion_xyzw = (q[1], q[2], q[3], q[0])
return quaternion_xyzw
def quatXYZW_to_eulerXYZ(quaternion_xyzw): # pylint: disable=invalid-name
"""Abstraction for converting from quaternion to a 3-parameter toation.
This will help us easily switch which rotation parameterization we use.
Quaternion should be in xyzw order for pybullet.
Args:
quaternion_xyzw: in xyzw order, tuple of 4 floats
Returns:
rotation: a 3-parameter rotation, in xyz order, tuple of 3 floats
"""
q = quaternion_xyzw
quaternion_wxyz = np.array([q[3], q[0], q[1], q[2]])
euler_zxy = transformations.euler_from_quaternion(
quaternion_wxyz, axes='szxy')
euler_xyz = (euler_zxy[1], euler_zxy[2], euler_zxy[0])
return euler_xyz
def apply_transform(transform_to_from, points_from):
r"""Transforms points (3D) into new frame.
Using transform_to_from notation.
Args:
transform_to_from: numpy.ndarray of shape [B,4,4], SE3
points_from: numpy.ndarray of shape [B,3,N]
Returns:
points_to: numpy.ndarray of shape [B,3,N]
"""
num_points = points_from.shape[-1]
# non-batched
if len(transform_to_from.shape) == 2:
ones = np.ones((1, num_points))
# makes these each into homogenous vectors
points_from = np.vstack((points_from, ones)) # [4,N]
points_to = transform_to_from @ points_from # [4,N]
return points_to[0:3, :] # [3,N]
# batched
else:
assert len(transform_to_from.shape) == 3
batch_size = transform_to_from.shape[0]
zeros = np.ones((batch_size, 1, num_points))
points_from = np.concatenate((points_from, zeros), axis=1)
assert points_from.shape[1] == 4
points_to = transform_to_from @ points_from
return points_to[:, 0:3, :]
#-----------------------------------------------------------------------------
# IMAGE UTILS
#-----------------------------------------------------------------------------
def preprocess(img):
"""Pre-process input (subtract mean, divide by std)."""
color_mean = 0.18877631
depth_mean = 0.00509261
color_std = 0.07276466
depth_std = 0.00903967
img[:, :, :3] = (img[:, :, :3] / 255 - color_mean) / color_std
img[:, :, 3:] = (img[:, :, 3:] - depth_mean) / depth_std
return img
def get_fused_heightmap(obs, configs, bounds, pix_size):
"""Reconstruct orthographic heightmaps with segmentation masks."""
heightmaps, colormaps = reconstruct_heightmaps(
obs['color'], obs['depth'], configs, bounds, pix_size)
colormaps = np.float32(colormaps)
heightmaps = np.float32(heightmaps)
# Fuse maps from different views.
valid = np.sum(colormaps, axis=3) > 0
repeat = np.sum(valid, axis=0)
repeat[repeat == 0] = 1
cmap = np.sum(colormaps, axis=0) / repeat[Ellipsis, None]
cmap = np.uint8(np.round(cmap))
hmap = np.max(heightmaps, axis=0) # Max to handle occlusions.
return cmap, hmap
def get_image_transform(theta, trans, pivot=(0, 0)):
"""Compute composite 2D rigid transformation matrix."""
# Get 2D rigid transformation matrix that rotates an image by theta (in
# radians) around pivot (in pixels) and translates by trans vector (in
# pixels)
pivot_t_image = np.array([[1., 0., -pivot[0]], [0., 1., -pivot[1]],
[0., 0., 1.]])
image_t_pivot = np.array([[1., 0., pivot[0]], [0., 1., pivot[1]],
[0., 0., 1.]])
transform = np.array([[np.cos(theta), -np.sin(theta), trans[0]],
[np.sin(theta), np.cos(theta), trans[1]], [0., 0., 1.]])
return np.dot(image_t_pivot, np.dot(transform, pivot_t_image))
def check_transform(image, pixel, transform):
"""Valid transform only if pixel locations are still in FoV after transform."""
new_pixel = np.flip(
np.int32(
np.round(
np.dot(transform,
np.float32([pixel[1], pixel[0],
1.]).reshape(3, 1))))[:2].squeeze())
valid = np.all(
new_pixel >= 0
) and new_pixel[0] < image.shape[0] and new_pixel[1] < image.shape[1]
return valid, new_pixel
def get_se3_from_image_transform(theta, trans, pivot, heightmap, bounds,
pixel_size):
"""Calculate SE3 from image transform."""
position_center = pix_to_xyz(
np.flip(np.int32(np.round(pivot))),
heightmap,
bounds,
pixel_size,
skip_height=False)
new_position_center = pix_to_xyz(
np.flip(np.int32(np.round(pivot + trans))),
heightmap,
bounds,
pixel_size,
skip_height=True)
# Don't look up the z height, it might get augmented out of frame
new_position_center = (new_position_center[0], new_position_center[1],
position_center[2])
delta_position = np.array(new_position_center) - np.array(position_center)
t_world_center = np.eye(4)
t_world_center[0:3, 3] = np.array(position_center)
t_centernew_center = np.eye(4)
euler_zxy = (-theta, 0, 0)
t_centernew_center[0:3, 0:3] = transformations.euler_matrix(
*euler_zxy, axes='szxy')[0:3, 0:3]
t_centernew_center_tonly = np.eye(4)
t_centernew_center_tonly[0:3, 3] = -delta_position
t_centernew_center = t_centernew_center @ t_centernew_center_tonly
t_world_centernew = t_world_center @ np.linalg.inv(t_centernew_center)
return t_world_center, t_world_centernew
def get_random_image_transform_params(image_size):
theta_sigma = 2 * np.pi / 6
theta = np.random.normal(0, theta_sigma)
trans_sigma = np.min(image_size) / 6
trans = np.random.normal(0, trans_sigma, size=2) # [x, y]
pivot = (image_size[1] / 2, image_size[0] / 2)
return theta, trans, pivot
def perturb(input_image, pixels, set_theta_zero=False):
"""Data augmentation on images."""
image_size = input_image.shape[:2]
# Compute random rigid transform.
while True:
theta, trans, pivot = get_random_image_transform_params(image_size)
if set_theta_zero:
theta = 0.
transform = get_image_transform(theta, trans, pivot)
transform_params = theta, trans, pivot
# Ensure pixels remain in the image after transform.
is_valid = True
new_pixels = []
new_rounded_pixels = []
for pixel in pixels:
pixel = np.float32([pixel[1], pixel[0], 1.]).reshape(3, 1)
rounded_pixel = np.int32(np.round(transform @ pixel))[:2].squeeze()
rounded_pixel = np.flip(rounded_pixel)
pixel = (transform @ pixel)[:2].squeeze()
pixel = np.flip(pixel)
in_fov_rounded = rounded_pixel[0] < image_size[0] and rounded_pixel[
1] < image_size[1]
in_fov = pixel[0] < image_size[0] and pixel[1] < image_size[1]
is_valid = is_valid and np.all(rounded_pixel >= 0) and np.all(
pixel >= 0) and in_fov_rounded and in_fov
new_pixels.append(pixel)
new_rounded_pixels.append(rounded_pixel)
if is_valid:
break
# Apply rigid transform to image and pixel labels.
input_image = cv2.warpAffine(
input_image,
transform[:2, :], (image_size[1], image_size[0]),
flags=cv2.INTER_NEAREST)
return input_image, new_pixels, new_rounded_pixels, transform_params
#-----------------------------------------------------------------------------
# PLOT UTILS
#-----------------------------------------------------------------------------
# Plot colors (Tableau palette).
COLORS = {
'blue': [078.0 / 255.0, 121.0 / 255.0, 167.0 / 255.0],
'red': [255.0 / 255.0, 087.0 / 255.0, 089.0 / 255.0],
'green': [089.0 / 255.0, 169.0 / 255.0, 079.0 / 255.0],
'orange': [242.0 / 255.0, 142.0 / 255.0, 043.0 / 255.0],
'yellow': [237.0 / 255.0, 201.0 / 255.0, 072.0 / 255.0],
'purple': [176.0 / 255.0, 122.0 / 255.0, 161.0 / 255.0],
'pink': [255.0 / 255.0, 157.0 / 255.0, 167.0 / 255.0],
'cyan': [118.0 / 255.0, 183.0 / 255.0, 178.0 / 255.0],
'brown': [156.0 / 255.0, 117.0 / 255.0, 095.0 / 255.0],
'gray': [186.0 / 255.0, 176.0 / 255.0, 172.0 / 255.0]
}
def plot(fname, # pylint: disable=dangerous-default-value
title,
ylabel,
xlabel,
data,
xlim=[-np.inf, 0],
xticks=None,
ylim=[np.inf, -np.inf],
show_std=True):
"""Plot frame data."""
# Data is a dictionary that maps experiment names to tuples with 3
# elements: x (size N array) and y (size N array) and y_std (size N array)
# Get data limits.
for name, (x, y, _) in data.items():
del name
y = np.array(y)
xlim[0] = max(xlim[0], np.min(x))
xlim[1] = max(xlim[1], np.max(x))
ylim[0] = min(ylim[0], np.min(y))
ylim[1] = max(ylim[1], np.max(y))
# Draw background.
plt.title(title, fontsize=14)
plt.ylim(ylim)
plt.ylabel(ylabel, fontsize=14)
plt.yticks(fontsize=14)
plt.xlim(xlim)
plt.xlabel(xlabel, fontsize=14)
plt.grid(True, linestyle='-', color=[0.8, 0.8, 0.8])
ax = plt.gca()
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_color('#000000')
plt.rcParams.update({'font.size': 14})
plt.rcParams['mathtext.default'] = 'regular'
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# Draw data.
color_iter = 0
for name, (x, y, std) in data.items():
del name
x, y, std = np.float32(x), np.float32(y), np.float32(std)
upper = np.clip(y + std, ylim[0], ylim[1])
lower = np.clip(y - std, ylim[0], ylim[1])
color = COLORS[list(COLORS.keys())[color_iter]]
if show_std:
plt.fill_between(x, upper, lower, color=color, linewidth=0, alpha=0.3)
plt.plot(x, y, color=color, linewidth=2, marker='o', alpha=1.)
color_iter += 1
if xticks:
plt.xticks(ticks=range(len(xticks)), labels=xticks, fontsize=14)
else:
plt.xticks(fontsize=14)
plt.legend([name for name, _ in data.items()],
loc='lower right', fontsize=14)
plt.tight_layout()
plt.savefig(fname)
plt.clf()
#-----------------------------------------------------------------------------
# MESHCAT UTILS
#-----------------------------------------------------------------------------
def create_visualizer(clear=True):
print('Waiting for meshcat server... have you started a server?')
vis = meshcat.Visualizer(zmq_url='tcp://127.0.0.1:6000')
if clear:
vis.delete()
return vis
def make_frame(vis, name, h, radius, o=1.0):
"""Add a red-green-blue triad to the Meschat visualizer.
Args:
vis (MeshCat Visualizer): the visualizer
name (string): name for this frame (should be unique)
h (float): height of frame visualization
radius (float): radius of frame visualization
o (float): opacity
"""
vis[name]['x'].set_object(
g.Cylinder(height=h, radius=radius),
g.MeshLambertMaterial(color=0xff0000, reflectivity=0.8, opacity=o))
rotate_x = mtf.rotation_matrix(np.pi / 2.0, [0, 0, 1])
rotate_x[0, 3] = h / 2
vis[name]['x'].set_transform(rotate_x)
vis[name]['y'].set_object(
g.Cylinder(height=h, radius=radius),
g.MeshLambertMaterial(color=0x00ff00, reflectivity=0.8, opacity=o))
rotate_y = mtf.rotation_matrix(np.pi / 2.0, [0, 1, 0])
rotate_y[1, 3] = h / 2
vis[name]['y'].set_transform(rotate_y)
vis[name]['z'].set_object(
g.Cylinder(height=h, radius=radius),
g.MeshLambertMaterial(color=0x0000ff, reflectivity=0.8, opacity=o))
rotate_z = mtf.rotation_matrix(np.pi / 2.0, [1, 0, 0])
rotate_z[2, 3] = h / 2
vis[name]['z'].set_transform(rotate_z)
def meshcat_visualize(vis, obs, act, info):
"""Visualize data using meshcat."""
for key in sorted(info.keys()):
pose = info[key]
pick_transform = np.eye(4)
pick_transform[0:3, 3] = pose[0]
quaternion_wxyz = np.asarray(
[pose[1][3], pose[1][0], pose[1][1], pose[1][2]])
pick_transform[0:3, 0:3] = mtf.quaternion_matrix(quaternion_wxyz)[0:3, 0:3]
label = 'obj_' + str(key)
make_frame(vis, label, h=0.05, radius=0.0012, o=1.0)
vis[label].set_transform(pick_transform)
for cam_index in range(len(act['camera_config'])):
verts = unproject_depth_vectorized(
obs['depth'][cam_index], [0, 1],
np.array(act['camera_config'][cam_index]['intrinsics']).reshape(3, 3),
np.zeros(5))
# switch from [N,3] to [3,N]
verts = verts.T
cam_transform = np.eye(4)
cam_transform[0:3, 3] = act['camera_config'][cam_index]['position']
quaternion_xyzw = act['camera_config'][cam_index]['rotation']
quaternion_wxyz = np.asarray([
quaternion_xyzw[3], quaternion_xyzw[0], quaternion_xyzw[1],
quaternion_xyzw[2]
])
cam_transform[0:3, 0:3] = mtf.quaternion_matrix(quaternion_wxyz)[0:3, 0:3]
verts = apply_transform(cam_transform, verts)
colors = obs['color'][cam_index].reshape(-1, 3).T / 255.0
vis['pointclouds/' + str(cam_index)].set_object(
g.PointCloud(position=verts, color=colors))
| apache-2.0 |
michaelaye/planet4 | tests/old_test_clustering.py | 1 | 3094 | # setup
import tempfile
from pathlib import Path
import pandas as pd
import pkg_resources as pr
import pytest
from planet4 import clustering
with pr.resource_stream('planet4', 'data/test_db.csv') as f:
data = pd.read_csv(f)
# import warnings
# with warnings.catch_warnings():
# warnings.filterwarnings("ignore",
# category=DeprecationWarning)
#
@pytest.fixture(autouse=True, scope='module')
def tdir():
_tdir = tempfile.mkdtemp()
yield Path(_tdir.name)
# teardown
_tdir.cleanup()
imid1 = 'APF000012w'
imid2 = 'APF000012q'
imid1data = data[data.image_id == imid1]
imid2data = data[data.image_id == imid2]
fans1 = imid1data[imid1data.marking == 'fan']
blotches1 = imid1data[imid1data.marking == 'blotch']
fans2 = imid2data[imid2data.marking == 'fan']
blotches2 = imid2data[imid2data.marking == 'blotch']
# basic clustering manager
cm = clustering.ClusteringManager(dbname='ignore', output_dir=tdir)
def test_calc_fnotch():
actual = clustering.calc_fnotch(4, 4)
assert actual == 0.5
actual = clustering.calc_fnotch(4, 0)
assert actual == 1
actual = clustering.calc_fnotch(0, 4)
assert actual == 0
def test_dbscan_xy_blotch():
# using only x and y (or image_x,image_y)
coords = ['image_x', 'image_y']
X = blotches1[coords].values
dbscanner = clustering.DBScanner(X, min_samples=2)
assert dbscanner.n_clusters == 26
assert dbscanner.n_rejected == 25
def test_dbscan_xy_fan():
# using only x and y (or image_x,image_y)
coords = ['image_x', 'image_y']
X = fans1[coords].values
dbscanner = clustering.DBScanner(X, min_samples=2)
assert dbscanner.n_clusters == 7
assert dbscanner.n_rejected == 11
def test_dbscan_xy_angle_blotch():
coords = ['image_x', 'image_y', 'angle']
X = blotches1[coords].values
dbscanner = clustering.DBScanner(X, min_samples=2)
assert dbscanner.n_clusters == 35
assert dbscanner.n_rejected == 102
def test_dbscan_xy_angle_fan():
coords = ['image_x', 'image_y', 'angle']
X = fans1[coords].values
dbscanner = clustering.DBScanner(X, min_samples=2)
assert dbscanner.n_clusters == 6
assert dbscanner.n_rejected == 15
def test_clustering_basics():
cm.cluster_image_id(imid1, data=imid1data)
assert cm.n_classifications == 94
cm.cluster_image_id(imid2, data=imid2data)
assert cm.n_classifications == 121
for subdir in ['applied_cut_0.5', 'just_clustering']:
expected = tdir / subdir
assert expected.exists() and expected.is_dir()
def test_output_file_creation():
for marking in ['blotches', 'fans', 'fnotches']:
for ftype in ['.csv']:
expected = tdir / (imid1 + '_' + marking + ftype)
assert expected.exists()
for marking in ['blotches']:
for ftype in ['.csv']:
expected = tdir / (imid2 + '_' + marking + ftype)
if marking == 'blotches':
assert expected.exists()
else: # 12q,i.e. imdid2 only has blotches
assert not expected.exists()
| isc |
daodaoliang/neural-network-animation | matplotlib/tests/test_style.py | 10 | 1977 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
import shutil
import tempfile
from contextlib import contextmanager
import matplotlib as mpl
from matplotlib import style
from matplotlib.style.core import USER_LIBRARY_PATHS, STYLE_EXTENSION
import six
PARAM = 'image.cmap'
VALUE = 'pink'
DUMMY_SETTINGS = {PARAM: VALUE}
@contextmanager
def temp_style(style_name, settings=None):
"""Context manager to create a style sheet in a temporary directory."""
settings = DUMMY_SETTINGS
temp_file = '%s.%s' % (style_name, STYLE_EXTENSION)
# Write style settings to file in the temp directory.
tempdir = tempfile.mkdtemp()
with open(os.path.join(tempdir, temp_file), 'w') as f:
for k, v in six.iteritems(settings):
f.write('%s: %s' % (k, v))
# Add temp directory to style path and reload so we can access this style.
USER_LIBRARY_PATHS.append(tempdir)
style.reload_library()
try:
yield
finally:
shutil.rmtree(tempdir)
style.reload_library()
def test_available():
with temp_style('_test_', DUMMY_SETTINGS):
assert '_test_' in style.available
def test_use():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
def test_use_url():
with temp_style('test', DUMMY_SETTINGS):
with style.context('https://gist.github.com/adrn/6590261/raw'):
assert mpl.rcParams['axes.facecolor'] == "#adeade"
def test_context():
mpl.rcParams[PARAM] = 'gray'
with temp_style('test', DUMMY_SETTINGS):
with style.context('test'):
assert mpl.rcParams[PARAM] == VALUE
# Check that this value is reset after the exiting the context.
assert mpl.rcParams[PARAM] == 'gray'
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| mit |
AlexisEidelman/Til | til/data/data/Cohort.py | 2 | 2269 | # -*- coding:utf-8 -*-
'''
Created on 13 december 2013
@author: a.eidelman
Ce programme :
-
Input :
Output :
'''
from til.data.DataTil import DataTil, variables_til
import numpy as np
from pandas import DataFrame
class Cohort(DataTil):
"""
La classe qui permet de lancer le travail sur les données
La structure de classe n'est peut-être pas nécessaire pour l'instant
"""
def __init__(self, size=1000):
DataTil.__init__(self)
self.survey_date = 100*2009 + 1
self.survey_year = 2009
self.size = size
self.name = 'cohort'
def load(self):
print "création de l'importation des données"
size = self.size
for name_table in ['men','foy','ind']:
vars_int, vars_float = variables_til[name_table]
vars = ['id','period','pond'] + vars_int + vars_float
table = DataFrame(index=range(size), columns=vars)
for var in vars_int:
table[var] = 0
for var in vars_float:
table[var] = table[var].astype(float)
table['pond'] = 1.0
table['period'] = self.survey_date
table['id'] = range(size)
self.__setattr__(name_table, table)
print "fin de la créations des données"
def _output_name(self):
return 'Cohort_' + str(self.size) + '.h5'
def imputations(self):
#TODO: findet ?
self.ind['sexe'] = np.random.random_integers(0, 1, size = self.size)
self.ind['civilstate'] = 2
self.ind['workstate'] = 11
def links(self):
size = self.size
rg = range(size)
self.ind['men'] = rg
self.ind['foy'] = rg
self.ind[['pere','mere','conj']] = -1
self.foy['vous'] = rg
self.men['pref'] = rg
# special household
self.ind['men'] += 10
self.ind['foy'] += 10
self.foy['id'] += 10
self.men['id'] += 10
if __name__ == '__main__':
import time
start_t = time.time()
data = Cohort(1000)
data.load()
data.imputations()
data.links()
data.format_to_liam()
data.final_check()
data.store_to_liam()
| gpl-3.0 |
onecodex/onecodex | onecodex/analyses.py | 2 | 15702 | import six
import warnings
from onecodex.exceptions import OneCodexException
from onecodex.lib.enums import AbundanceMetric, Rank
from onecodex.viz import (
VizPCAMixin,
VizHeatmapMixin,
VizMetadataMixin,
VizDistanceMixin,
VizBargraphMixin,
)
class AnalysisMixin(
VizPCAMixin, VizHeatmapMixin, VizMetadataMixin, VizDistanceMixin, VizBargraphMixin
):
"""Contains methods for analyzing Classifications results.
Notes
-----
Three DataFrames are required by most methods: collated counts, collated metadata, and taxonomy.
This data is obtained from either a `ClassificationsDataFrame` or a `SampleCollection`. Both
classes use this mixin. `AnalysisMixin` pulls additional methods in from `onecodex.distance`,
`onecodex.taxonomy`, and `onecodex.viz`.
"""
def _get_auto_rank(self, rank):
"""Attempt to figure out what rank we should use for analyses."""
if rank == Rank.Auto:
# if we're an accessor for a ClassificationsDataFrame, use its _rank property
if self.__class__.__name__ == "OneCodexAccessor":
return self._rank
if AbundanceMetric.has_value(self._metric) or self._is_metagenomic:
return Rank.Species
else:
return Rank.Genus
else:
return rank
def _guess_normalized(self):
"""Return True if the collated counts in `self._results` appear to be normalized.
Notes
-----
It's possible that the _results df has already been normalized, which can cause some
methods to fail. This method lets us guess whether that's true and act accordingly.
"""
return (
getattr(self, "_normalized", False)
or AbundanceMetric.has_value(self._metric)
or bool((self._results.sum(axis=1).round(4) == 1.0).all())
) # noqa
def _metadata_fetch(self, metadata_fields, label=None):
"""Fetch and transform given metadata fields from `self.metadata`.
Takes a list of metadata fields, some of which can contain taxon names or taxon IDs, and
returns a DataFrame with transformed data that can be used for plotting.
Parameters
----------
metadata_fields : `list` of `string`
A list of metadata fields, taxon names, or taxon IDs to fetch and transform for display.
label : `string` or `callable`, optional
A metadata field (or function) used to label each analysis. If passing a function, a
dict containing the metadata for each analysis is passed as the first and only
positional argument. The callable function must return a string.
If this argument is not given, and "Label" is in `metadata_fields`, "Label" will be set
to the filename associated with an analysis.
Notes
-----
Taxon names and IDs are transformed into the relative abundances of those taxa within their
own rank. For example, 'Bacteroides' will return the relative abundances of 'Bacteroides'
among all taxa of rank genus. Taxon IDs are stored as strings in `ClassificationsDataFrame`
and are coerced to strings if integers are given.
Metadata fields are returned as is, from the `self.metadata` DataFrame. If multiple metadata
fields are specified in a tuple, their values are joined as strings separated by underscore.
Multiple metadata fields in a tuple must both be categorical. That is, a numerical field and
boolean can not be joined, or the result would be something like '87.4_True'.
Returns
-------
`pandas.DataFrame`
Columns are renamed (if applicable) metadata fields and rows are `Classifications.id`.
Elements are transformed values. Not all metadata fields will have been renamed, but will
be present in the below `dict` nonetheless.
`dict`
Keys are metadata fields and values are renamed metadata fields. This can be used to map
metadata fields which were passed to this function, to prettier names. For example, if
'bacteroid' is passed, it will be matched with the Bacteroides genus and renamed to
'Bacteroides (816)', which includes its taxon ID.
"""
import pandas as pd
help_metadata = ", ".join(self.metadata.keys())
magic_metadata = pd.DataFrame({"classification_id": self._results.index}).set_index(
"classification_id"
)
# if user passed label kwarg but didn't put "Label" in the fields, assume the user wants
# that field added
if label is not None and "Label" not in metadata_fields:
metadata_fields.append("Label")
# if we magically rename fields, keep track
magic_fields = {}
for f in set([f for f in metadata_fields if f]):
if isinstance(f, tuple):
# joined categorical metadata
for field in f:
if field not in self.metadata:
raise OneCodexException(
"Metric {} not found. Choose from: {}".format(field, help_metadata)
)
if not (
pd.api.types.is_bool_dtype(self.metadata[field])
or pd.api.types.is_categorical_dtype(self.metadata[field]) # noqa
or pd.api.types.is_object_dtype(self.metadata[field]) # noqa
):
raise OneCodexException(
"When specifying multiple metadata fields, all must be categorical"
)
# concatenate the columns together with underscores
composite_field = "_".join(f)
magic_metadata[composite_field] = ""
magic_metadata[composite_field] = (
magic_metadata[composite_field]
.str.cat([self.metadata[field].astype(str) for field in f], sep="_")
.str.lstrip("_")
)
magic_fields[f] = composite_field
else:
str_f = str(f)
if str_f == "Label":
magic_metadata[str_f] = self.metadata["filename"]
magic_fields[f] = str_f
if isinstance(label, six.string_types):
if label in self.metadata.columns:
magic_metadata[str_f] = self.metadata[label].astype(str)
else:
raise OneCodexException(
"Label field {} not found. Choose from: {}".format(
label, help_metadata
)
)
elif callable(label):
for classification_id, metadata in self.metadata.to_dict(
orient="index"
).items():
c_id_label = label(metadata)
if not isinstance(c_id_label, six.string_types):
raise OneCodexException(
"Expected string from label function, got: {}".format(
type(c_id_label).__name__
)
)
magic_metadata.loc[classification_id, "Label"] = c_id_label
elif label is not None:
raise OneCodexException(
"Expected string or callable for label, got: {}".format(
type(label).__name__
)
)
# add an incremented number to duplicate labels (e.g., same filename)
duplicate_labels = (
magic_metadata[str_f]
.where(magic_metadata[str_f].duplicated(keep=False))
.dropna()
)
if not duplicate_labels.empty:
duplicate_counts = {label: 1 for label in duplicate_labels}
for c_id in magic_metadata.index:
label = magic_metadata[str_f][c_id]
if duplicate_labels.isin([label]).any():
magic_metadata[str_f][c_id] = "{} ({})".format(
label, duplicate_counts[label]
)
duplicate_counts[label] += 1
elif str_f in self.metadata:
# exactly matches existing metadata field
magic_metadata[f] = self.metadata[str_f]
magic_fields[f] = str_f
elif str_f in self._results.keys():
# is a tax_id
tax_name = self.taxonomy["name"][str_f]
# report within-rank abundance
df = self.to_df(rank=self.taxonomy["rank"][str_f])
renamed_field = "{} ({})".format(tax_name, str_f)
magic_metadata[renamed_field] = df[str_f]
magic_fields[f] = renamed_field
else:
# try to match it up with a taxon name
hits = []
# don't both searching if the query is really short
if len(str_f) > 4:
for tax_id, tax_name in zip(self.taxonomy.index, self.taxonomy["name"]):
# if it's an exact match, use that and skip the rest
if str_f.lower() == tax_name.lower():
hits = [(tax_id, tax_name)]
break
# otherwise, keep trying to match
elif str_f.lower() in tax_name.lower():
hits.append((tax_id, tax_name))
# take the hit with the lowest tax_id
hits = sorted(hits, key=lambda x: int(x[0]))
if hits:
# report within-rank abundance
df = self.to_df(rank=self.taxonomy["rank"][hits[0][0]])
renamed_field = "{} ({})".format(hits[0][1], hits[0][0])
magic_metadata[renamed_field] = df[hits[0][0]]
magic_fields[f] = renamed_field
else:
# matched nothing
raise OneCodexException(
"Metric or taxon {} not found. Choose from: {}".format(
str_f, help_metadata
)
)
return magic_metadata, magic_fields
def to_df(
self,
rank=Rank.Auto,
top_n=None,
threshold=None,
remove_zeros=True,
normalize="auto",
table_format="wide",
):
"""Generate a ClassificationDataFrame, performing any specified transformations.
Takes the ClassificationsDataFrame associated with these samples, or SampleCollection,
does some filtering, and returns a ClassificationsDataFrame copy.
Parameters
----------
rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional
Analysis will be restricted to abundances of taxa at the specified level.
top_n : `integer`, optional
Return only the top N most abundant taxa.
threshold : `float`, optional
Return only taxa more abundant than this threshold in one or more samples.
remove_zeros : `bool`, optional
Do not return taxa that have zero abundance in every sample.
normalize : {'auto', True, False}
Convert read counts to relative abundances (each sample sums to 1.0).
table_format : {'long', 'wide'}
If wide, rows are classifications, cols are taxa, elements are counts. If long, rows are
observations with three cols each: classification_id, tax_id, and count.
Returns
-------
`ClassificationsDataFrame`
"""
from onecodex.dataframes import ClassificationsDataFrame
rank = self._get_auto_rank(rank)
df = self._results.copy()
# subset by taxa
if rank:
if rank == "kingdom":
warnings.warn(
"Did you mean to specify rank=kingdom? Use rank=superkingdom to see Bacteria, "
"Archaea and Eukaryota."
)
tax_ids_to_keep = []
for tax_id in df.keys():
if self.taxonomy["rank"][tax_id] == rank:
tax_ids_to_keep.append(tax_id)
if len(tax_ids_to_keep) == 0:
raise OneCodexException("No taxa kept--is rank ({}) correct?".format(rank))
df = df.loc[:, tax_ids_to_keep]
# normalize
if normalize is False and self._guess_normalized():
raise OneCodexException("Data has already been normalized and this can not be undone.")
if normalize is True or (normalize == "auto" and rank):
if not self._guess_normalized():
# Replace nans with zeros for samples that have a total abundance of zero.
df = df.div(df.sum(axis=1), axis=0).fillna(0.0)
# remove columns (tax_ids) with no values that are > 0
if remove_zeros:
df = df.loc[:, (df != 0).any(axis=0)]
# restrict to taxa appearing in one or more samples at the given threshold
if threshold:
df = df.loc[:, df.max() >= threshold]
# restrict to N most abundant taxa
if top_n:
idx = df.sum(axis=0).sort_values(ascending=False).head(top_n).index
df = df.loc[:, idx]
# additional data to copy into the ClassificationsDataFrame
ocx_data = {
"ocx_metadata": self.metadata.copy(),
"ocx_rank": rank,
"ocx_metric": self._metric,
"ocx_taxonomy": self.taxonomy.copy(),
"ocx_normalized": normalize,
}
# generate long-format table
if table_format == "long":
pretty_metric_name = self._make_pretty_metric_name(self._metric, normalize)
long_df = {"classification_id": [], "tax_id": [], pretty_metric_name: []}
for t_id in df:
for c_id, count in df[t_id].iteritems():
long_df["classification_id"].append(c_id)
long_df["tax_id"].append(t_id)
long_df[pretty_metric_name].append(count)
results_df = ClassificationsDataFrame(long_df, **ocx_data)
elif table_format == "wide":
results_df = ClassificationsDataFrame(df, **ocx_data)
else:
raise OneCodexException("table_format must be one of: long, wide")
return results_df
@staticmethod
def _make_pretty_metric_name(metric, normalized):
if AbundanceMetric.has_value(metric):
return "Relative Abundance"
if normalized:
return "Reads (Normalized)"
else:
return "Reads"
@property
def metric(self):
return self._make_pretty_metric_name(self._metric, self._guess_normalized())
| mit |
petosegan/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
anurag313/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
TomAugspurger/pandas | pandas/tests/reshape/merge/test_merge.py | 1 | 77688 | from collections import OrderedDict
from datetime import date, datetime, timedelta
import random
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import is_categorical_dtype, is_object_dtype
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Int64Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
)
import pandas._testing as tm
from pandas.api.types import CategoricalDtype as CDT
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import MergeError, merge
N = 50
NGROUPS = 8
def get_test_data(ngroups=NGROUPS, n=N):
unique_groups = list(range(ngroups))
arr = np.asarray(np.tile(unique_groups, n // ngroups))
if len(arr) < n:
arr = np.asarray(list(arr) + unique_groups[: n - len(arr)])
random.shuffle(arr)
return arr
def get_series():
return [
pd.Series([1], dtype="int64"),
pd.Series([1], dtype="Int64"),
pd.Series([1.23]),
pd.Series(["foo"]),
pd.Series([True]),
pd.Series([pd.Timestamp("2018-01-01")]),
pd.Series([pd.Timestamp("2018-01-01", tz="US/Eastern")]),
]
def get_series_na():
return [
pd.Series([np.nan], dtype="Int64"),
pd.Series([np.nan], dtype="float"),
pd.Series([np.nan], dtype="object"),
pd.Series([pd.NaT]),
]
@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
def series_of_dtype(request):
"""
A parametrized fixture returning a variety of Series of different
dtypes
"""
return request.param
@pytest.fixture(params=get_series(), ids=lambda x: x.dtype.name)
def series_of_dtype2(request):
"""
A duplicate of the series_of_dtype fixture, so that it can be used
twice by a single function
"""
return request.param
@pytest.fixture(params=get_series_na(), ids=lambda x: x.dtype.name)
def series_of_dtype_all_na(request):
"""
A parametrized fixture returning a variety of Series with all NA
values
"""
return request.param
class TestMerge:
def setup_method(self, method):
# aggregate multiple columns
self.df = DataFrame(
{
"key1": get_test_data(),
"key2": get_test_data(),
"data1": np.random.randn(N),
"data2": np.random.randn(N),
}
)
# exclude a couple keys for fun
self.df = self.df[self.df["key2"] > 1]
self.df2 = DataFrame(
{
"key1": get_test_data(n=N // 5),
"key2": get_test_data(ngroups=NGROUPS // 2, n=N // 5),
"value": np.random.randn(N // 5),
}
)
self.left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
self.right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
def test_merge_inner_join_empty(self):
# GH 15328
df_empty = pd.DataFrame()
df_a = pd.DataFrame({"a": [1, 2]}, index=[0, 1], dtype="int64")
result = pd.merge(df_empty, df_a, left_index=True, right_index=True)
expected = pd.DataFrame({"a": []}, index=[], dtype="int64")
tm.assert_frame_equal(result, expected)
def test_merge_common(self):
joined = merge(self.df, self.df2)
exp = merge(self.df, self.df2, on=["key1", "key2"])
tm.assert_frame_equal(joined, exp)
def test_merge_non_string_columns(self):
# https://github.com/pandas-dev/pandas/issues/17962
# Checks that method runs for non string column names
left = pd.DataFrame(
{0: [1, 0, 1, 0], 1: [0, 1, 0, 0], 2: [0, 0, 2, 0], 3: [1, 0, 0, 3]}
)
right = left.astype(float)
expected = left
result = pd.merge(left, right)
tm.assert_frame_equal(expected, result)
def test_merge_index_as_on_arg(self):
# GH14355
left = self.df.set_index("key1")
right = self.df2.set_index("key1")
result = merge(left, right, on="key1")
expected = merge(self.df, self.df2, on="key1").set_index("key1")
tm.assert_frame_equal(result, expected)
def test_merge_index_singlekey_right_vs_left(self):
left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
merged1 = merge(
left, right, left_on="key", right_index=True, how="left", sort=False
)
merged2 = merge(
right, left, right_on="key", left_index=True, how="right", sort=False
)
tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
merged1 = merge(
left, right, left_on="key", right_index=True, how="left", sort=True
)
merged2 = merge(
right, left, right_on="key", left_index=True, how="right", sort=True
)
tm.assert_frame_equal(merged1, merged2.loc[:, merged1.columns])
def test_merge_index_singlekey_inner(self):
left = DataFrame(
{"key": ["a", "b", "c", "d", "e", "e", "a"], "v1": np.random.randn(7)}
)
right = DataFrame({"v2": np.random.randn(4)}, index=["d", "b", "c", "a"])
# inner join
result = merge(left, right, left_on="key", right_index=True, how="inner")
expected = left.join(right, on="key").loc[result.index]
tm.assert_frame_equal(result, expected)
result = merge(right, left, right_on="key", left_index=True, how="inner")
expected = left.join(right, on="key").loc[result.index]
tm.assert_frame_equal(result, expected.loc[:, result.columns])
def test_merge_misspecified(self):
msg = "Must pass right_on or right_index=True"
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.right, left_index=True)
msg = "Must pass left_on or left_index=True"
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.right, right_index=True)
msg = (
'Can only pass argument "on" OR "left_on" and "right_on", not '
"a combination of both"
)
with pytest.raises(pd.errors.MergeError, match=msg):
merge(self.left, self.left, left_on="key", on="key")
msg = r"len\(right_on\) must equal len\(left_on\)"
with pytest.raises(ValueError, match=msg):
merge(self.df, self.df2, left_on=["key1"], right_on=["key1", "key2"])
def test_index_and_on_parameters_confusion(self):
msg = "right_index parameter must be of type bool, not <class 'list'>"
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=False,
right_index=["key1", "key2"],
)
msg = "left_index parameter must be of type bool, not <class 'list'>"
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=["key1", "key2"],
right_index=False,
)
with pytest.raises(ValueError, match=msg):
merge(
self.df,
self.df2,
how="left",
left_index=["key1", "key2"],
right_index=["key1", "key2"],
)
def test_merge_overlap(self):
merged = merge(self.left, self.left, on="key")
exp_len = (self.left["key"].value_counts() ** 2).sum()
assert len(merged) == exp_len
assert "v1_x" in merged
assert "v1_y" in merged
def test_merge_different_column_key_names(self):
left = DataFrame({"lkey": ["foo", "bar", "baz", "foo"], "value": [1, 2, 3, 4]})
right = DataFrame({"rkey": ["foo", "bar", "qux", "foo"], "value": [5, 6, 7, 8]})
merged = left.merge(
right, left_on="lkey", right_on="rkey", how="outer", sort=True
)
exp = pd.Series(["bar", "baz", "foo", "foo", "foo", "foo", np.nan], name="lkey")
tm.assert_series_equal(merged["lkey"], exp)
exp = pd.Series(["bar", np.nan, "foo", "foo", "foo", "foo", "qux"], name="rkey")
tm.assert_series_equal(merged["rkey"], exp)
exp = pd.Series([2, 3, 1, 1, 4, 4, np.nan], name="value_x")
tm.assert_series_equal(merged["value_x"], exp)
exp = pd.Series([6, np.nan, 5, 8, 5, 8, 7], name="value_y")
tm.assert_series_equal(merged["value_y"], exp)
def test_merge_copy(self):
left = DataFrame({"a": 0, "b": 1}, index=range(10))
right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
merged = merge(left, right, left_index=True, right_index=True, copy=True)
merged["a"] = 6
assert (left["a"] == 0).all()
merged["d"] = "peekaboo"
assert (right["d"] == "bar").all()
def test_merge_nocopy(self):
left = DataFrame({"a": 0, "b": 1}, index=range(10))
right = DataFrame({"c": "foo", "d": "bar"}, index=range(10))
merged = merge(left, right, left_index=True, right_index=True, copy=False)
merged["a"] = 6
assert (left["a"] == 6).all()
merged["d"] = "peekaboo"
assert (right["d"] == "peekaboo").all()
def test_intelligently_handle_join_key(self):
# #733, be a bit more 1337 about not returning unconsolidated DataFrame
left = DataFrame(
{"key": [1, 1, 2, 2, 3], "value": list(range(5))}, columns=["value", "key"]
)
right = DataFrame({"key": [1, 1, 2, 3, 4, 5], "rvalue": list(range(6))})
joined = merge(left, right, on="key", how="outer")
expected = DataFrame(
{
"key": [1, 1, 1, 1, 2, 2, 3, 4, 5],
"value": np.array([0, 0, 1, 1, 2, 3, 4, np.nan, np.nan]),
"rvalue": [0, 1, 0, 1, 2, 2, 3, 4, 5],
},
columns=["value", "key", "rvalue"],
)
tm.assert_frame_equal(joined, expected)
def test_merge_join_key_dtype_cast(self):
# #8596
df1 = DataFrame({"key": [1], "v1": [10]})
df2 = DataFrame({"key": [2], "v1": [20]})
df = merge(df1, df2, how="outer")
assert df["key"].dtype == "int64"
df1 = DataFrame({"key": [True], "v1": [1]})
df2 = DataFrame({"key": [False], "v1": [0]})
df = merge(df1, df2, how="outer")
# GH13169
# this really should be bool
assert df["key"].dtype == "object"
df1 = DataFrame({"val": [1]})
df2 = DataFrame({"val": [2]})
lkey = np.array([1])
rkey = np.array([2])
df = merge(df1, df2, left_on=lkey, right_on=rkey, how="outer")
assert df["key_0"].dtype == "int64"
def test_handle_join_key_pass_array(self):
left = DataFrame(
{"key": [1, 1, 2, 2, 3], "value": np.arange(5)}, columns=["value", "key"]
)
right = DataFrame({"rvalue": np.arange(6)})
key = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on="key", right_on=key, how="outer")
merged2 = merge(right, left, left_on=key, right_on="key", how="outer")
tm.assert_series_equal(merged["key"], merged2["key"])
assert merged["key"].notna().all()
assert merged2["key"].notna().all()
left = DataFrame({"value": np.arange(5)}, columns=["value"])
right = DataFrame({"rvalue": np.arange(6)})
lkey = np.array([1, 1, 2, 2, 3])
rkey = np.array([1, 1, 2, 3, 4, 5])
merged = merge(left, right, left_on=lkey, right_on=rkey, how="outer")
tm.assert_series_equal(
merged["key_0"], Series([1, 1, 1, 1, 2, 2, 3, 4, 5], name="key_0")
)
left = DataFrame({"value": np.arange(3)})
right = DataFrame({"rvalue": np.arange(6)})
key = np.array([0, 1, 1, 2, 2, 3], dtype=np.int64)
merged = merge(left, right, left_index=True, right_on=key, how="outer")
tm.assert_series_equal(merged["key_0"], Series(key, name="key_0"))
def test_no_overlap_more_informative_error(self):
dt = datetime.now()
df1 = DataFrame({"x": ["a"]}, index=[dt])
df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
msg = (
"No common columns to perform merge on. "
f"Merge options: left_on={None}, right_on={None}, "
f"left_index={False}, right_index={False}"
)
with pytest.raises(MergeError, match=msg):
merge(df1, df2)
def test_merge_non_unique_indexes(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
dt4 = datetime(2012, 5, 4)
df1 = DataFrame({"x": ["a"]}, index=[dt])
df2 = DataFrame({"y": ["b", "c"]}, index=[dt, dt])
_check_merge(df1, df2)
# Not monotonic
df1 = DataFrame({"x": ["a", "b", "q"]}, index=[dt2, dt, dt4])
df2 = DataFrame(
{"y": ["c", "d", "e", "f", "g", "h"]}, index=[dt3, dt3, dt2, dt2, dt, dt]
)
_check_merge(df1, df2)
df1 = DataFrame({"x": ["a", "b"]}, index=[dt, dt])
df2 = DataFrame({"y": ["c", "d"]}, index=[dt, dt])
_check_merge(df1, df2)
def test_merge_non_unique_index_many_to_many(self):
dt = datetime(2012, 5, 1)
dt2 = datetime(2012, 5, 2)
dt3 = datetime(2012, 5, 3)
df1 = DataFrame({"x": ["a", "b", "c", "d"]}, index=[dt2, dt2, dt, dt])
df2 = DataFrame(
{"y": ["e", "f", "g", " h", "i"]}, index=[dt2, dt2, dt3, dt, dt]
)
_check_merge(df1, df2)
def test_left_merge_empty_dataframe(self):
left = DataFrame({"key": [1], "value": [2]})
right = DataFrame({"key": []})
result = merge(left, right, on="key", how="left")
tm.assert_frame_equal(result, left)
result = merge(right, left, on="key", how="right")
tm.assert_frame_equal(result, left)
@pytest.mark.parametrize(
"kwarg",
[
dict(left_index=True, right_index=True),
dict(left_index=True, right_on="x"),
dict(left_on="a", right_index=True),
dict(left_on="a", right_on="x"),
],
)
def test_merge_left_empty_right_empty(self, join_type, kwarg):
# GH 10824
left = pd.DataFrame(columns=["a", "b", "c"])
right = pd.DataFrame(columns=["x", "y", "z"])
exp_in = pd.DataFrame(
columns=["a", "b", "c", "x", "y", "z"],
index=pd.Index([], dtype=object),
dtype=object,
)
result = pd.merge(left, right, how=join_type, **kwarg)
tm.assert_frame_equal(result, exp_in)
def test_merge_left_empty_right_notempty(self):
# GH 10824
left = pd.DataFrame(columns=["a", "b", "c"])
right = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["x", "y", "z"])
exp_out = pd.DataFrame(
{
"a": np.array([np.nan] * 3, dtype=object),
"b": np.array([np.nan] * 3, dtype=object),
"c": np.array([np.nan] * 3, dtype=object),
"x": [1, 4, 7],
"y": [2, 5, 8],
"z": [3, 6, 9],
},
columns=["a", "b", "c", "x", "y", "z"],
)
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how="inner", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="left", **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how="right", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="outer", **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [
dict(left_index=True, right_index=True),
dict(left_index=True, right_on="x"),
]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
kwarg = dict(left_on="a", right_index=True)
check1(exp_in, kwarg)
exp_out["a"] = [0, 1, 2]
check2(exp_out, kwarg)
kwarg = dict(left_on="a", right_on="x")
check1(exp_in, kwarg)
exp_out["a"] = np.array([np.nan] * 3, dtype=object)
check2(exp_out, kwarg)
def test_merge_left_notempty_right_empty(self):
# GH 10824
left = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])
right = pd.DataFrame(columns=["x", "y", "z"])
exp_out = pd.DataFrame(
{
"a": [1, 4, 7],
"b": [2, 5, 8],
"c": [3, 6, 9],
"x": np.array([np.nan] * 3, dtype=object),
"y": np.array([np.nan] * 3, dtype=object),
"z": np.array([np.nan] * 3, dtype=object),
},
columns=["a", "b", "c", "x", "y", "z"],
)
exp_in = exp_out[0:0] # make empty DataFrame keeping dtype
# result will have object dtype
exp_in.index = exp_in.index.astype(object)
def check1(exp, kwarg):
result = pd.merge(left, right, how="inner", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="right", **kwarg)
tm.assert_frame_equal(result, exp)
def check2(exp, kwarg):
result = pd.merge(left, right, how="left", **kwarg)
tm.assert_frame_equal(result, exp)
result = pd.merge(left, right, how="outer", **kwarg)
tm.assert_frame_equal(result, exp)
for kwarg in [
dict(left_index=True, right_index=True),
dict(left_index=True, right_on="x"),
dict(left_on="a", right_index=True),
dict(left_on="a", right_on="x"),
]:
check1(exp_in, kwarg)
check2(exp_out, kwarg)
def test_merge_empty_frame(self, series_of_dtype, series_of_dtype2):
# GH 25183
df = pd.DataFrame(
{"key": series_of_dtype, "value": series_of_dtype2},
columns=["key", "value"],
)
df_empty = df[:0]
expected = pd.DataFrame(
{
"value_x": pd.Series(dtype=df.dtypes["value"]),
"key": pd.Series(dtype=df.dtypes["key"]),
"value_y": pd.Series(dtype=df.dtypes["value"]),
},
columns=["value_x", "key", "value_y"],
)
actual = df_empty.merge(df, on="key")
tm.assert_frame_equal(actual, expected)
def test_merge_all_na_column(self, series_of_dtype, series_of_dtype_all_na):
# GH 25183
df_left = pd.DataFrame(
{"key": series_of_dtype, "value": series_of_dtype_all_na},
columns=["key", "value"],
)
df_right = pd.DataFrame(
{"key": series_of_dtype, "value": series_of_dtype_all_na},
columns=["key", "value"],
)
expected = pd.DataFrame(
{
"key": series_of_dtype,
"value_x": series_of_dtype_all_na,
"value_y": series_of_dtype_all_na,
},
columns=["key", "value_x", "value_y"],
)
actual = df_left.merge(df_right, on="key")
tm.assert_frame_equal(actual, expected)
def test_merge_nosort(self):
# GH#2098, TODO: anything to do?
d = {
"var1": np.random.randint(0, 10, size=10),
"var2": np.random.randint(0, 10, size=10),
"var3": [
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2010, 2, 3),
datetime(2012, 1, 12),
datetime(2011, 2, 4),
datetime(2012, 4, 3),
datetime(2012, 3, 4),
datetime(2008, 5, 1),
datetime(2010, 2, 3),
datetime(2012, 2, 3),
],
}
df = DataFrame.from_dict(d)
var3 = df.var3.unique()
var3.sort()
new = DataFrame.from_dict({"var3": var3, "var8": np.random.random(7)})
result = df.merge(new, on="var3", sort=False)
exp = merge(df, new, on="var3", sort=False)
tm.assert_frame_equal(result, exp)
assert (df.var3.unique() == result.var3.unique()).all()
def test_merge_nan_right(self):
df1 = DataFrame({"i1": [0, 1], "i2": [0, 1]})
df2 = DataFrame({"i1": [0], "i3": [0]})
result = df1.join(df2, on="i1", rsuffix="_")
expected = (
DataFrame(
{
"i1": {0: 0.0, 1: 1},
"i2": {0: 0, 1: 1},
"i1_": {0: 0, 1: np.nan},
"i3": {0: 0.0, 1: np.nan},
None: {0: 0, 1: 0},
}
)
.set_index(None)
.reset_index()[["i1", "i2", "i1_", "i3"]]
)
tm.assert_frame_equal(result, expected, check_dtype=False)
df1 = DataFrame({"i1": [0, 1], "i2": [0.5, 1.5]})
df2 = DataFrame({"i1": [0], "i3": [0.7]})
result = df1.join(df2, rsuffix="_", on="i1")
expected = DataFrame(
{
"i1": {0: 0, 1: 1},
"i1_": {0: 0.0, 1: np.nan},
"i2": {0: 0.5, 1: 1.5},
"i3": {0: 0.69999999999999996, 1: np.nan},
}
)[["i1", "i2", "i1_", "i3"]]
tm.assert_frame_equal(result, expected)
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.df)
result = nad.merge(self.df2, on="key1")
assert isinstance(result, NotADataFrame)
def test_join_append_timedeltas(self):
# timedelta64 issues with join/merge
# GH 5695
d = {"d": datetime(2013, 11, 5, 5, 56), "t": timedelta(0, 22500)}
df = DataFrame(columns=list("dt"))
df = df.append(d, ignore_index=True)
result = df.append(d, ignore_index=True)
expected = DataFrame(
{
"d": [datetime(2013, 11, 5, 5, 56), datetime(2013, 11, 5, 5, 56)],
"t": [timedelta(0, 22500), timedelta(0, 22500)],
}
)
tm.assert_frame_equal(result, expected)
td = np.timedelta64(300000000)
lhs = DataFrame(Series([td, td], index=["A", "B"]))
rhs = DataFrame(Series([td], index=["A"]))
result = lhs.join(rhs, rsuffix="r", how="left")
expected = DataFrame(
{
"0": Series([td, td], index=list("AB")),
"0r": Series([td, pd.NaT], index=list("AB")),
}
)
tm.assert_frame_equal(result, expected)
def test_other_datetime_unit(self):
# GH 13389
df1 = pd.DataFrame({"entity_id": [101, 102]})
s = pd.Series([None, None], index=[101, 102], name="days")
for dtype in [
"datetime64[D]",
"datetime64[h]",
"datetime64[m]",
"datetime64[s]",
"datetime64[ms]",
"datetime64[us]",
"datetime64[ns]",
]:
df2 = s.astype(dtype).to_frame("days")
# coerces to datetime64[ns], thus should not be affected
assert df2["days"].dtype == "datetime64[ns]"
result = df1.merge(df2, left_on="entity_id", right_index=True)
exp = pd.DataFrame(
{
"entity_id": [101, 102],
"days": np.array(["nat", "nat"], dtype="datetime64[ns]"),
},
columns=["entity_id", "days"],
)
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"])
def test_other_timedelta_unit(self, unit):
# GH 13389
df1 = pd.DataFrame({"entity_id": [101, 102]})
s = pd.Series([None, None], index=[101, 102], name="days")
dtype = f"m8[{unit}]"
df2 = s.astype(dtype).to_frame("days")
assert df2["days"].dtype == "m8[ns]"
result = df1.merge(df2, left_on="entity_id", right_index=True)
exp = pd.DataFrame(
{"entity_id": [101, 102], "days": np.array(["nat", "nat"], dtype=dtype)},
columns=["entity_id", "days"],
)
tm.assert_frame_equal(result, exp)
def test_overlapping_columns_error_message(self):
df = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
df2 = DataFrame({"key": [1, 2, 3], "v1": [4, 5, 6], "v2": [7, 8, 9]})
df.columns = ["key", "foo", "foo"]
df2.columns = ["key", "bar", "bar"]
expected = DataFrame(
{
"key": [1, 2, 3],
"v1": [4, 5, 6],
"v2": [7, 8, 9],
"v3": [4, 5, 6],
"v4": [7, 8, 9],
}
)
expected.columns = ["key", "foo", "foo", "bar", "bar"]
tm.assert_frame_equal(merge(df, df2), expected)
# #2649, #10639
df2.columns = ["key1", "foo", "foo"]
msg = r"Data columns not unique: Index\(\['foo', 'foo'\], dtype='object'\)"
with pytest.raises(MergeError, match=msg):
merge(df, df2)
def test_merge_on_datetime64tz(self):
# GH11405
left = pd.DataFrame(
{
"key": pd.date_range("20151010", periods=2, tz="US/Eastern"),
"value": [1, 2],
}
)
right = pd.DataFrame(
{
"key": pd.date_range("20151011", periods=3, tz="US/Eastern"),
"value": [1, 2, 3],
}
)
expected = DataFrame(
{
"key": pd.date_range("20151010", periods=4, tz="US/Eastern"),
"value_x": [1, 2, np.nan, np.nan],
"value_y": [np.nan, 1, 2, 3],
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
left = pd.DataFrame(
{
"key": [1, 2],
"value": pd.date_range("20151010", periods=2, tz="US/Eastern"),
}
)
right = pd.DataFrame(
{
"key": [2, 3],
"value": pd.date_range("20151011", periods=2, tz="US/Eastern"),
}
)
expected = DataFrame(
{
"key": [1, 2, 3],
"value_x": list(pd.date_range("20151010", periods=2, tz="US/Eastern"))
+ [pd.NaT],
"value_y": [pd.NaT]
+ list(pd.date_range("20151011", periods=2, tz="US/Eastern")),
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
assert result["value_x"].dtype == "datetime64[ns, US/Eastern]"
assert result["value_y"].dtype == "datetime64[ns, US/Eastern]"
def test_merge_on_datetime64tz_empty(self):
# https://github.com/pandas-dev/pandas/issues/25014
dtz = pd.DatetimeTZDtype(tz="UTC")
right = pd.DataFrame(
{
"date": [pd.Timestamp("2018", tz=dtz.tz)],
"value": [4.0],
"date2": [pd.Timestamp("2019", tz=dtz.tz)],
},
columns=["date", "value", "date2"],
)
left = right[:0]
result = left.merge(right, on="date")
expected = pd.DataFrame(
{
"value_x": pd.Series(dtype=float),
"date2_x": pd.Series(dtype=dtz),
"date": pd.Series(dtype=dtz),
"value_y": pd.Series(dtype=float),
"date2_y": pd.Series(dtype=dtz),
},
columns=["value_x", "date2_x", "date", "value_y", "date2_y"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datetime64tz_with_dst_transition(self):
# GH 18885
df1 = pd.DataFrame(
pd.date_range("2017-10-29 01:00", periods=4, freq="H", tz="Europe/Madrid"),
columns=["date"],
)
df1["value"] = 1
df2 = pd.DataFrame(
{
"date": pd.to_datetime(
[
"2017-10-29 03:00:00",
"2017-10-29 04:00:00",
"2017-10-29 05:00:00",
]
),
"value": 2,
}
)
df2["date"] = df2["date"].dt.tz_localize("UTC").dt.tz_convert("Europe/Madrid")
result = pd.merge(df1, df2, how="outer", on="date")
expected = pd.DataFrame(
{
"date": pd.date_range(
"2017-10-29 01:00", periods=7, freq="H", tz="Europe/Madrid"
),
"value_x": [1] * 4 + [np.nan] * 3,
"value_y": [np.nan] * 4 + [2] * 3,
}
)
tm.assert_frame_equal(result, expected)
def test_merge_non_unique_period_index(self):
# GH #16871
index = pd.period_range("2016-01-01", periods=16, freq="M")
df = DataFrame(list(range(len(index))), index=index, columns=["pnum"])
df2 = concat([df, df])
result = df.merge(df2, left_index=True, right_index=True, how="inner")
expected = DataFrame(
np.tile(np.arange(16, dtype=np.int64).repeat(2).reshape(-1, 1), 2),
columns=["pnum_x", "pnum_y"],
index=df2.sort_index().index,
)
tm.assert_frame_equal(result, expected)
def test_merge_on_periods(self):
left = pd.DataFrame(
{"key": pd.period_range("20151010", periods=2, freq="D"), "value": [1, 2]}
)
right = pd.DataFrame(
{
"key": pd.period_range("20151011", periods=3, freq="D"),
"value": [1, 2, 3],
}
)
expected = DataFrame(
{
"key": pd.period_range("20151010", periods=4, freq="D"),
"value_x": [1, 2, np.nan, np.nan],
"value_y": [np.nan, 1, 2, 3],
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
left = pd.DataFrame(
{"key": [1, 2], "value": pd.period_range("20151010", periods=2, freq="D")}
)
right = pd.DataFrame(
{"key": [2, 3], "value": pd.period_range("20151011", periods=2, freq="D")}
)
exp_x = pd.period_range("20151010", periods=2, freq="D")
exp_y = pd.period_range("20151011", periods=2, freq="D")
expected = DataFrame(
{
"key": [1, 2, 3],
"value_x": list(exp_x) + [pd.NaT],
"value_y": [pd.NaT] + list(exp_y),
}
)
result = pd.merge(left, right, on="key", how="outer")
tm.assert_frame_equal(result, expected)
assert result["value_x"].dtype == "Period[D]"
assert result["value_y"].dtype == "Period[D]"
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
df1 = DataFrame(
{"col1": [0, 1], "col_conflict": [1, 2], "col_left": ["a", "b"]}
)
df1_copy = df1.copy()
df2 = DataFrame(
{
"col1": [1, 2, 3, 4, 5],
"col_conflict": [1, 2, 3, 4, 5],
"col_right": [2, 2, 2, 2, 2],
}
)
df2_copy = df2.copy()
df_result = DataFrame(
{
"col1": [0, 1, 2, 3, 4, 5],
"col_conflict_x": [1, 2, np.nan, np.nan, np.nan, np.nan],
"col_left": ["a", "b", np.nan, np.nan, np.nan, np.nan],
"col_conflict_y": [np.nan, 1, 2, 3, 4, 5],
"col_right": [np.nan, 2, 2, 2, 2, 2],
}
)
df_result["_merge"] = Categorical(
[
"left_only",
"both",
"right_only",
"right_only",
"right_only",
"right_only",
],
categories=["left_only", "right_only", "both"],
)
df_result = df_result[
[
"col1",
"col_conflict_x",
"col_left",
"col_conflict_y",
"col_right",
"_merge",
]
]
test = merge(df1, df2, on="col1", how="outer", indicator=True)
tm.assert_frame_equal(test, df_result)
test = df1.merge(df2, on="col1", how="outer", indicator=True)
tm.assert_frame_equal(test, df_result)
# No side effects
tm.assert_frame_equal(df1, df1_copy)
tm.assert_frame_equal(df2, df2_copy)
# Check with custom name
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(
columns={"_merge": "custom_name"}
)
test_custom_name = merge(
df1, df2, on="col1", how="outer", indicator="custom_name"
)
tm.assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(
df2, on="col1", how="outer", indicator="custom_name"
)
tm.assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
msg = "indicator option can only accept boolean or string arguments"
with pytest.raises(ValueError, match=msg):
merge(df1, df2, on="col1", how="outer", indicator=5)
with pytest.raises(ValueError, match=msg):
df1.merge(df2, on="col1", how="outer", indicator=5)
# Check result integrity
test2 = merge(df1, df2, on="col1", how="left", indicator=True)
assert (test2._merge != "right_only").all()
test2 = df1.merge(df2, on="col1", how="left", indicator=True)
assert (test2._merge != "right_only").all()
test3 = merge(df1, df2, on="col1", how="right", indicator=True)
assert (test3._merge != "left_only").all()
test3 = df1.merge(df2, on="col1", how="right", indicator=True)
assert (test3._merge != "left_only").all()
test4 = merge(df1, df2, on="col1", how="inner", indicator=True)
assert (test4._merge == "both").all()
test4 = df1.merge(df2, on="col1", how="inner", indicator=True)
assert (test4._merge == "both").all()
# Check if working name in df
for i in ["_right_indicator", "_left_indicator", "_merge"]:
df_badcolumn = DataFrame({"col1": [1, 2], i: [2, 2]})
msg = (
"Cannot use `indicator=True` option when data contains a "
f"column named {i}|"
"Cannot use name of an existing column for indicator column"
)
with pytest.raises(ValueError, match=msg):
merge(df1, df_badcolumn, on="col1", how="outer", indicator=True)
with pytest.raises(ValueError, match=msg):
df1.merge(df_badcolumn, on="col1", how="outer", indicator=True)
# Check for name conflict with custom name
df_badcolumn = DataFrame({"col1": [1, 2], "custom_column_name": [2, 2]})
msg = "Cannot use name of an existing column for indicator column"
with pytest.raises(ValueError, match=msg):
merge(
df1,
df_badcolumn,
on="col1",
how="outer",
indicator="custom_column_name",
)
with pytest.raises(ValueError, match=msg):
df1.merge(
df_badcolumn, on="col1", how="outer", indicator="custom_column_name"
)
# Merge on multiple columns
df3 = DataFrame({"col1": [0, 1], "col2": ["a", "b"]})
df4 = DataFrame({"col1": [1, 1, 3], "col2": ["b", "x", "y"]})
hand_coded_result = DataFrame(
{"col1": [0, 1, 1, 3], "col2": ["a", "b", "x", "y"]}
)
hand_coded_result["_merge"] = Categorical(
["left_only", "both", "right_only", "right_only"],
categories=["left_only", "right_only", "both"],
)
test5 = merge(df3, df4, on=["col1", "col2"], how="outer", indicator=True)
tm.assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=["col1", "col2"], how="outer", indicator=True)
tm.assert_frame_equal(test5, hand_coded_result)
def test_validation(self):
left = DataFrame(
{"a": ["a", "b", "c", "d"], "b": ["cat", "dog", "weasel", "horse"]},
index=range(4),
)
right = DataFrame(
{
"a": ["a", "b", "c", "d", "e"],
"c": ["meow", "bark", "um... weasel noise?", "nay", "chirp"],
},
index=range(5),
)
# Make sure no side effects.
left_copy = left.copy()
right_copy = right.copy()
result = merge(left, right, left_index=True, right_index=True, validate="1:1")
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
# make sure merge still correct
expected = DataFrame(
{
"a_x": ["a", "b", "c", "d"],
"b": ["cat", "dog", "weasel", "horse"],
"a_y": ["a", "b", "c", "d"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
index=range(4),
columns=["a_x", "b", "a_y", "c"],
)
result = merge(
left, right, left_index=True, right_index=True, validate="one_to_one"
)
tm.assert_frame_equal(result, expected)
expected_2 = DataFrame(
{
"a": ["a", "b", "c", "d"],
"b": ["cat", "dog", "weasel", "horse"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
index=range(4),
)
result = merge(left, right, on="a", validate="1:1")
tm.assert_frame_equal(left, left_copy)
tm.assert_frame_equal(right, right_copy)
tm.assert_frame_equal(result, expected_2)
result = merge(left, right, on="a", validate="one_to_one")
tm.assert_frame_equal(result, expected_2)
# One index, one column
expected_3 = DataFrame(
{
"b": ["cat", "dog", "weasel", "horse"],
"a": ["a", "b", "c", "d"],
"c": ["meow", "bark", "um... weasel noise?", "nay"],
},
columns=["b", "a", "c"],
index=range(4),
)
left_index_reset = left.set_index("a")
result = merge(
left_index_reset,
right,
left_index=True,
right_on="a",
validate="one_to_one",
)
tm.assert_frame_equal(result, expected_3)
# Dups on right
right_w_dups = right.append(pd.DataFrame({"a": ["e"], "c": ["moo"]}, index=[4]))
merge(
left,
right_w_dups,
left_index=True,
right_index=True,
validate="one_to_many",
)
msg = "Merge keys are not unique in right dataset; not a one-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left,
right_w_dups,
left_index=True,
right_index=True,
validate="one_to_one",
)
with pytest.raises(MergeError, match=msg):
merge(left, right_w_dups, on="a", validate="one_to_one")
# Dups on left
left_w_dups = left.append(
pd.DataFrame({"a": ["a"], "c": ["cow"]}, index=[3]), sort=True
)
merge(
left_w_dups,
right,
left_index=True,
right_index=True,
validate="many_to_one",
)
msg = "Merge keys are not unique in left dataset; not a one-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left_w_dups,
right,
left_index=True,
right_index=True,
validate="one_to_one",
)
with pytest.raises(MergeError, match=msg):
merge(left_w_dups, right, on="a", validate="one_to_one")
# Dups on both
merge(left_w_dups, right_w_dups, on="a", validate="many_to_many")
msg = "Merge keys are not unique in right dataset; not a many-to-one merge"
with pytest.raises(MergeError, match=msg):
merge(
left_w_dups,
right_w_dups,
left_index=True,
right_index=True,
validate="many_to_one",
)
msg = "Merge keys are not unique in left dataset; not a one-to-many merge"
with pytest.raises(MergeError, match=msg):
merge(left_w_dups, right_w_dups, on="a", validate="one_to_many")
# Check invalid arguments
msg = "Not a valid argument for validate"
with pytest.raises(ValueError, match=msg):
merge(left, right, on="a", validate="jibberish")
# Two column merge, dups in both, but jointly no dups.
left = DataFrame(
{
"a": ["a", "a", "b", "b"],
"b": [0, 1, 0, 1],
"c": ["cat", "dog", "weasel", "horse"],
},
index=range(4),
)
right = DataFrame(
{
"a": ["a", "a", "b"],
"b": [0, 1, 0],
"d": ["meow", "bark", "um... weasel noise?"],
},
index=range(3),
)
expected_multi = DataFrame(
{
"a": ["a", "a", "b"],
"b": [0, 1, 0],
"c": ["cat", "dog", "weasel"],
"d": ["meow", "bark", "um... weasel noise?"],
},
index=range(3),
)
msg = (
"Merge keys are not unique in either left or right dataset; "
"not a one-to-one merge"
)
with pytest.raises(MergeError, match=msg):
merge(left, right, on="a", validate="1:1")
result = merge(left, right, on=["a", "b"], validate="1:1")
tm.assert_frame_equal(result, expected_multi)
def test_merge_two_empty_df_no_division_error(self):
# GH17776, PR #17846
a = pd.DataFrame({"a": [], "b": [], "c": []})
with np.errstate(divide="raise"):
merge(a, a, on=("a", "b"))
@pytest.mark.parametrize("how", ["right", "outer"])
@pytest.mark.parametrize(
"index,expected_index",
[
(
CategoricalIndex([1, 2, 4]),
CategoricalIndex([1, 2, 4, None, None, None]),
),
(
DatetimeIndex(["2001-01-01", "2002-02-02", "2003-03-03"]),
DatetimeIndex(
["2001-01-01", "2002-02-02", "2003-03-03", pd.NaT, pd.NaT, pd.NaT]
),
),
(Float64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
(Int64Index([1, 2, 3]), Float64Index([1, 2, 3, None, None, None])),
(
IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]),
IntervalIndex.from_tuples(
[(1, 2), (2, 3), (3, 4), np.nan, np.nan, np.nan]
),
),
(
PeriodIndex(["2001-01-01", "2001-01-02", "2001-01-03"], freq="D"),
PeriodIndex(
["2001-01-01", "2001-01-02", "2001-01-03", pd.NaT, pd.NaT, pd.NaT],
freq="D",
),
),
(
TimedeltaIndex(["1d", "2d", "3d"]),
TimedeltaIndex(["1d", "2d", "3d", pd.NaT, pd.NaT, pd.NaT]),
),
],
)
def test_merge_on_index_with_more_values(self, how, index, expected_index):
# GH 24212
# pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that
# -1 is interpreted as a missing value instead of the last element
df1 = pd.DataFrame({"a": [0, 1, 2], "key": [0, 1, 2]}, index=index)
df2 = pd.DataFrame({"b": [0, 1, 2, 3, 4, 5]})
result = df1.merge(df2, left_on="key", right_index=True, how=how)
expected = pd.DataFrame(
[
[0, 0, 0],
[1, 1, 1],
[2, 2, 2],
[np.nan, 3, 3],
[np.nan, 4, 4],
[np.nan, 5, 5],
],
columns=["a", "key", "b"],
)
expected.set_index(expected_index, inplace=True)
tm.assert_frame_equal(result, expected)
def test_merge_right_index_right(self):
# Note: the expected output here is probably incorrect.
# See https://github.com/pandas-dev/pandas/issues/17257 for more.
# We include this as a regression test for GH-24897.
left = pd.DataFrame({"a": [1, 2, 3], "key": [0, 1, 1]})
right = pd.DataFrame({"b": [1, 2, 3]})
expected = pd.DataFrame(
{"a": [1, 2, 3, None], "key": [0, 1, 1, 2], "b": [1, 2, 2, 3]},
columns=["a", "key", "b"],
index=[0, 1, 2, np.nan],
)
result = left.merge(right, left_on="key", right_index=True, how="right")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("how", ["left", "right"])
def test_merge_preserves_row_order(self, how):
# GH 27453
left_df = pd.DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
right_df = pd.DataFrame({"animal": ["quetzal", "pig"], "max_speed": [80, 11]})
result = left_df.merge(right_df, on=["animal", "max_speed"], how=how)
if how == "right":
expected = pd.DataFrame(
{"animal": ["quetzal", "pig"], "max_speed": [80, 11]}
)
else:
expected = pd.DataFrame({"animal": ["dog", "pig"], "max_speed": [40, 11]})
tm.assert_frame_equal(result, expected)
def test_merge_take_missing_values_from_index_of_other_dtype(self):
# GH 24212
left = pd.DataFrame(
{
"a": [1, 2, 3],
"key": pd.Categorical(["a", "a", "b"], categories=list("abc")),
}
)
right = pd.DataFrame(
{"b": [1, 2, 3]}, index=pd.CategoricalIndex(["a", "b", "c"])
)
result = left.merge(right, left_on="key", right_index=True, how="right")
expected = pd.DataFrame(
{
"a": [1, 2, 3, None],
"key": pd.Categorical(["a", "a", "b", "c"]),
"b": [1, 1, 2, 3],
},
index=[0, 1, 2, np.nan],
)
expected = expected.reindex(columns=["a", "key", "b"])
tm.assert_frame_equal(result, expected)
def test_merge_readonly(self):
# https://github.com/pandas-dev/pandas/issues/27943
data1 = pd.DataFrame(
np.arange(20).reshape((4, 5)) + 1, columns=["a", "b", "c", "d", "e"]
)
data2 = pd.DataFrame(
np.arange(20).reshape((5, 4)) + 1, columns=["a", "b", "x", "y"]
)
data1._mgr.blocks[0].values.flags.writeable = False
data1.merge(data2) # no error
def _check_merge(x, y):
for how in ["inner", "left", "outer"]:
result = x.join(y, how=how)
expected = merge(x.reset_index(), y.reset_index(), how=how, sort=True)
expected = expected.set_index("index")
# TODO check_names on merge?
tm.assert_frame_equal(result, expected, check_names=False)
class TestMergeDtypes:
@pytest.mark.parametrize(
"right_vals", [["foo", "bar"], Series(["foo", "bar"]).astype("category")]
)
def test_different(self, right_vals):
left = DataFrame(
{
"A": ["foo", "bar"],
"B": Series(["foo", "bar"]).astype("category"),
"C": [1, 2],
"D": [1.0, 2.0],
"E": Series([1, 2], dtype="uint64"),
"F": Series([1, 2], dtype="int32"),
}
)
right = DataFrame({"A": right_vals})
# GH 9780
# We allow merging on object and categorical cols and cast
# categorical cols to object
result = pd.merge(left, right, on="A")
assert is_object_dtype(result.A.dtype)
@pytest.mark.parametrize("d1", [np.int64, np.int32, np.int16, np.int8, np.uint8])
@pytest.mark.parametrize("d2", [np.int64, np.float64, np.float32, np.float16])
def test_join_multi_dtypes(self, d1, d2):
dtype1 = np.dtype(d1)
dtype2 = np.dtype(d2)
left = DataFrame(
{
"k1": np.array([0, 1, 2] * 8, dtype=dtype1),
"k2": ["foo", "bar"] * 12,
"v": np.array(np.arange(24), dtype=np.int64),
}
)
index = MultiIndex.from_tuples([(2, "bar"), (1, "foo")])
right = DataFrame({"v2": np.array([5, 7], dtype=dtype2)}, index=index)
result = left.join(right, on=["k1", "k2"])
expected = left.copy()
if dtype2.kind == "i":
dtype2 = np.dtype("float64")
expected["v2"] = np.array(np.nan, dtype=dtype2)
expected.loc[(expected.k1 == 2) & (expected.k2 == "bar"), "v2"] = 5
expected.loc[(expected.k1 == 1) & (expected.k2 == "foo"), "v2"] = 7
tm.assert_frame_equal(result, expected)
result = left.join(right, on=["k1", "k2"], sort=True)
expected.sort_values(["k1", "k2"], kind="mergesort", inplace=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"int_vals, float_vals, exp_vals",
[
([1, 2, 3], [1.0, 2.0, 3.0], {"X": [1, 2, 3], "Y": [1.0, 2.0, 3.0]}),
([1, 2, 3], [1.0, 3.0], {"X": [1, 3], "Y": [1.0, 3.0]}),
([1, 2], [1.0, 2.0, 3.0], {"X": [1, 2], "Y": [1.0, 2.0]}),
],
)
def test_merge_on_ints_floats(self, int_vals, float_vals, exp_vals):
# GH 16572
# Check that float column is not cast to object if
# merging on float and int columns
A = DataFrame({"X": int_vals})
B = DataFrame({"Y": float_vals})
expected = DataFrame(exp_vals)
result = A.merge(B, left_on="X", right_on="Y")
tm.assert_frame_equal(result, expected)
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
def test_merge_key_dtype_cast(self):
# GH 17044
df1 = DataFrame({"key": [1.0, 2.0], "v1": [10, 20]}, columns=["key", "v1"])
df2 = DataFrame({"key": [2], "v2": [200]}, columns=["key", "v2"])
result = df1.merge(df2, on="key", how="left")
expected = DataFrame(
{"key": [1.0, 2.0], "v1": [10, 20], "v2": [np.nan, 200.0]},
columns=["key", "v1", "v2"],
)
tm.assert_frame_equal(result, expected)
def test_merge_on_ints_floats_warning(self):
# GH 16572
# merge will produce a warning when merging on int and
# float columns where the float values are not exactly
# equal to their int representation
A = DataFrame({"X": [1, 2, 3]})
B = DataFrame({"Y": [1.1, 2.5, 3.0]})
expected = DataFrame({"X": [3], "Y": [3.0]})
with tm.assert_produces_warning(UserWarning):
result = A.merge(B, left_on="X", right_on="Y")
tm.assert_frame_equal(result, expected)
with tm.assert_produces_warning(UserWarning):
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
# test no warning if float has NaNs
B = DataFrame({"Y": [np.nan, np.nan, 3.0]})
with tm.assert_produces_warning(None):
result = B.merge(A, left_on="Y", right_on="X")
tm.assert_frame_equal(result, expected[["Y", "X"]])
def test_merge_incompat_infer_boolean_object(self):
# GH21119: bool + object bool merge OK
df1 = DataFrame({"key": Series([True, False], dtype=object)})
df2 = DataFrame({"key": [True, False]})
expected = DataFrame({"key": [True, False]}, dtype=object)
result = pd.merge(df1, df2, on="key")
tm.assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on="key")
tm.assert_frame_equal(result, expected)
# with missing value
df1 = DataFrame({"key": Series([True, False, np.nan], dtype=object)})
df2 = DataFrame({"key": [True, False]})
expected = DataFrame({"key": [True, False]}, dtype=object)
result = pd.merge(df1, df2, on="key")
tm.assert_frame_equal(result, expected)
result = pd.merge(df2, df1, on="key")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"df1_vals, df2_vals",
[
# merge on category coerces to object
([0, 1, 2], Series(["a", "b", "a"]).astype("category")),
([0.0, 1.0, 2.0], Series(["a", "b", "a"]).astype("category")),
# no not infer
([0, 1], pd.Series([False, True], dtype=object)),
([0, 1], pd.Series([False, True], dtype=bool)),
],
)
def test_merge_incompat_dtypes_are_ok(self, df1_vals, df2_vals):
# these are explicitly allowed incompat merges, that pass thru
# the result type is dependent on if the values on the rhs are
# inferred, otherwise these will be coerced to object
df1 = DataFrame({"A": df1_vals})
df2 = DataFrame({"A": df2_vals})
result = pd.merge(df1, df2, on=["A"])
assert is_object_dtype(result.A.dtype)
result = pd.merge(df2, df1, on=["A"])
assert is_object_dtype(result.A.dtype)
@pytest.mark.parametrize(
"df1_vals, df2_vals",
[
# do not infer to numeric
(Series([1, 2], dtype="uint64"), ["a", "b", "c"]),
(Series([1, 2], dtype="int32"), ["a", "b", "c"]),
([0, 1, 2], ["0", "1", "2"]),
([0.0, 1.0, 2.0], ["0", "1", "2"]),
([0, 1, 2], ["0", "1", "2"]),
(
pd.date_range("1/1/2011", periods=2, freq="D"),
["2011-01-01", "2011-01-02"],
),
(pd.date_range("1/1/2011", periods=2, freq="D"), [0, 1]),
(pd.date_range("1/1/2011", periods=2, freq="D"), [0.0, 1.0]),
(
pd.date_range("20130101", periods=3),
pd.date_range("20130101", periods=3, tz="US/Eastern"),
),
],
)
def test_merge_incompat_dtypes_error(self, df1_vals, df2_vals):
# GH 9780, GH 15800
# Raise a ValueError when a user tries to merge on
# dtypes that are incompatible (e.g., obj and int/float)
df1 = DataFrame({"A": df1_vals})
df2 = DataFrame({"A": df2_vals})
msg = (
f"You are trying to merge on {df1['A'].dtype} and "
f"{df2['A'].dtype} columns. If you wish to proceed "
"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df1, df2, on=["A"])
# Check that error still raised when swapping order of dataframes
msg = (
f"You are trying to merge on {df2['A'].dtype} and "
f"{df1['A'].dtype} columns. If you wish to proceed "
"you should use pd.concat"
)
msg = re.escape(msg)
with pytest.raises(ValueError, match=msg):
pd.merge(df2, df1, on=["A"])
@pytest.fixture
def left():
np.random.seed(1234)
return DataFrame(
{
"X": Series(np.random.choice(["foo", "bar"], size=(10,))).astype(
CDT(["foo", "bar"])
),
"Y": np.random.choice(["one", "two", "three"], size=(10,)),
}
)
@pytest.fixture
def right():
np.random.seed(1234)
return DataFrame(
{"X": Series(["foo", "bar"]).astype(CDT(["foo", "bar"])), "Z": [1, 2]}
)
class TestMergeCategorical:
def test_identical(self, left):
# merging on the same, should preserve dtypes
merged = pd.merge(left, left, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(), np.dtype("O"), np.dtype("O")],
index=["X", "Y_x", "Y_y"],
)
tm.assert_series_equal(result, expected)
def test_basic(self, left, right):
# we have matching Categorical dtypes in X
# so should preserve the merged column
merged = pd.merge(left, right, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(), np.dtype("O"), np.dtype("int64")],
index=["X", "Y", "Z"],
)
tm.assert_series_equal(result, expected)
def test_merge_categorical(self):
# GH 9426
right = DataFrame(
{
"c": {0: "a", 1: "b", 2: "c", 3: "d", 4: "e"},
"d": {0: "null", 1: "null", 2: "null", 3: "null", 4: "null"},
}
)
left = DataFrame(
{
"a": {0: "f", 1: "f", 2: "f", 3: "f", 4: "f"},
"b": {0: "g", 1: "g", 2: "g", 3: "g", 4: "g"},
}
)
df = pd.merge(left, right, how="left", left_on="b", right_on="c")
# object-object
expected = df.copy()
# object-cat
# note that we propagate the category
# because we don't have any matching rows
cright = right.copy()
cright["d"] = cright["d"].astype("category")
result = pd.merge(left, cright, how="left", left_on="b", right_on="c")
expected["d"] = expected["d"].astype(CategoricalDtype(["null"]))
tm.assert_frame_equal(result, expected)
# cat-object
cleft = left.copy()
cleft["b"] = cleft["b"].astype("category")
result = pd.merge(cleft, cright, how="left", left_on="b", right_on="c")
tm.assert_frame_equal(result, expected)
# cat-cat
cright = right.copy()
cright["d"] = cright["d"].astype("category")
cleft = left.copy()
cleft["b"] = cleft["b"].astype("category")
result = pd.merge(cleft, cright, how="left", left_on="b", right_on="c")
tm.assert_frame_equal(result, expected)
def tests_merge_categorical_unordered_equal(self):
# GH-19551
df1 = DataFrame(
{
"Foo": Categorical(["A", "B", "C"], categories=["A", "B", "C"]),
"Left": ["A0", "B0", "C0"],
}
)
df2 = DataFrame(
{
"Foo": Categorical(["C", "B", "A"], categories=["C", "B", "A"]),
"Right": ["C1", "B1", "A1"],
}
)
result = pd.merge(df1, df2, on=["Foo"])
expected = DataFrame(
{
"Foo": pd.Categorical(["A", "B", "C"]),
"Left": ["A0", "B0", "C0"],
"Right": ["A1", "B1", "C1"],
}
)
tm.assert_frame_equal(result, expected)
def test_other_columns(self, left, right):
# non-merge columns should preserve if possible
right = right.assign(Z=right.Z.astype("category"))
merged = pd.merge(left, right, on="X")
result = merged.dtypes.sort_index()
expected = Series(
[CategoricalDtype(), np.dtype("O"), CategoricalDtype()],
index=["X", "Y", "Z"],
)
tm.assert_series_equal(result, expected)
# categories are preserved
assert left.X.values.is_dtype_equal(merged.X.values)
assert right.Z.values.is_dtype_equal(merged.Z.values)
@pytest.mark.parametrize(
"change",
[
lambda x: x,
lambda x: x.astype(CDT(["foo", "bar", "bah"])),
lambda x: x.astype(CDT(ordered=True)),
],
)
def test_dtype_on_merged_different(self, change, join_type, left, right):
# our merging columns, X now has 2 different dtypes
# so we must be object as a result
X = change(right.X.astype("object"))
right = right.assign(X=X)
assert is_categorical_dtype(left.X.values.dtype)
# assert not left.X.values.is_dtype_equal(right.X.values)
merged = pd.merge(left, right, on="X", how=join_type)
result = merged.dtypes.sort_index()
expected = Series(
[np.dtype("O"), np.dtype("O"), np.dtype("int64")], index=["X", "Y", "Z"]
)
tm.assert_series_equal(result, expected)
def test_self_join_multiple_categories(self):
# GH 16767
# non-duplicates should work with multiple categories
m = 5
df = pd.DataFrame(
{
"a": ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"] * m,
"b": ["t", "w", "x", "y", "z"] * 2 * m,
"c": [
letter
for each in ["m", "n", "u", "p", "o"]
for letter in [each] * 2 * m
],
"d": [
letter
for each in [
"aa",
"bb",
"cc",
"dd",
"ee",
"ff",
"gg",
"hh",
"ii",
"jj",
]
for letter in [each] * m
],
}
)
# change them all to categorical variables
df = df.apply(lambda x: x.astype("category"))
# self-join should equal ourselves
result = pd.merge(df, df, on=list(df.columns))
tm.assert_frame_equal(result, df)
def test_dtype_on_categorical_dates(self):
# GH 16900
# dates should not be coerced to ints
df = pd.DataFrame(
[[date(2001, 1, 1), 1.1], [date(2001, 1, 2), 1.3]], columns=["date", "num2"]
)
df["date"] = df["date"].astype("category")
df2 = pd.DataFrame(
[[date(2001, 1, 1), 1.3], [date(2001, 1, 3), 1.4]], columns=["date", "num4"]
)
df2["date"] = df2["date"].astype("category")
expected_outer = pd.DataFrame(
[
[pd.Timestamp("2001-01-01"), 1.1, 1.3],
[pd.Timestamp("2001-01-02"), 1.3, np.nan],
[pd.Timestamp("2001-01-03"), np.nan, 1.4],
],
columns=["date", "num2", "num4"],
)
result_outer = pd.merge(df, df2, how="outer", on=["date"])
tm.assert_frame_equal(result_outer, expected_outer)
expected_inner = pd.DataFrame(
[[pd.Timestamp("2001-01-01"), 1.1, 1.3]], columns=["date", "num2", "num4"]
)
result_inner = pd.merge(df, df2, how="inner", on=["date"])
tm.assert_frame_equal(result_inner, expected_inner)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize(
"category_column,categories,expected_categories",
[
([False, True, True, False], [True, False], [True, False]),
([2, 1, 1, 2], [1, 2], [1, 2]),
(["False", "True", "True", "False"], ["True", "False"], ["True", "False"]),
],
)
def test_merging_with_bool_or_int_cateorical_column(
self, category_column, categories, expected_categories, ordered
):
# GH 17187
# merging with a boolean/int categorical column
df1 = pd.DataFrame({"id": [1, 2, 3, 4], "cat": category_column})
df1["cat"] = df1["cat"].astype(CDT(categories, ordered=ordered))
df2 = pd.DataFrame({"id": [2, 4], "num": [1, 9]})
result = df1.merge(df2)
expected = pd.DataFrame(
{"id": [2, 4], "cat": expected_categories, "num": [1, 9]}
)
expected["cat"] = expected["cat"].astype(CDT(categories, ordered=ordered))
tm.assert_frame_equal(expected, result)
def test_merge_on_int_array(self):
# GH 23020
df = pd.DataFrame({"A": pd.Series([1, 2, np.nan], dtype="Int64"), "B": 1})
result = pd.merge(df, df, on="A")
expected = pd.DataFrame(
{"A": pd.Series([1, 2, np.nan], dtype="Int64"), "B_x": 1, "B_y": 1}
)
tm.assert_frame_equal(result, expected)
@pytest.fixture
def left_df():
return DataFrame({"a": [20, 10, 0]}, index=[2, 1, 0])
@pytest.fixture
def right_df():
return DataFrame({"b": [300, 100, 200]}, index=[3, 1, 2])
class TestMergeOnIndexes:
@pytest.mark.parametrize(
"how, sort, expected",
[
("inner", False, DataFrame({"a": [20, 10], "b": [200, 100]}, index=[2, 1])),
("inner", True, DataFrame({"a": [10, 20], "b": [100, 200]}, index=[1, 2])),
(
"left",
False,
DataFrame({"a": [20, 10, 0], "b": [200, 100, np.nan]}, index=[2, 1, 0]),
),
(
"left",
True,
DataFrame({"a": [0, 10, 20], "b": [np.nan, 100, 200]}, index=[0, 1, 2]),
),
(
"right",
False,
DataFrame(
{"a": [np.nan, 10, 20], "b": [300, 100, 200]}, index=[3, 1, 2]
),
),
(
"right",
True,
DataFrame(
{"a": [10, 20, np.nan], "b": [100, 200, 300]}, index=[1, 2, 3]
),
),
(
"outer",
False,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
(
"outer",
True,
DataFrame(
{"a": [0, 10, 20, np.nan], "b": [np.nan, 100, 200, 300]},
index=[0, 1, 2, 3],
),
),
],
)
def test_merge_on_indexes(self, left_df, right_df, how, sort, expected):
result = pd.merge(
left_df, right_df, left_index=True, right_index=True, how=how, sort=sort
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
CategoricalIndex(["A", "B"], categories=["A", "B"], name="index_col"),
Float64Index([1.0, 2.0], name="index_col"),
Int64Index([1, 2], name="index_col"),
UInt64Index([1, 2], name="index_col"),
RangeIndex(start=0, stop=2, name="index_col"),
DatetimeIndex(["2018-01-01", "2018-01-02"], name="index_col"),
],
ids=lambda x: type(x).__name__,
)
def test_merge_index_types(index):
# gh-20777
# assert key access is consistent across index types
left = DataFrame({"left_data": [1, 2]}, index=index)
right = DataFrame({"right_data": [1.0, 2.0]}, index=index)
result = left.merge(right, on=["index_col"])
expected = DataFrame(
OrderedDict([("left_data", [1, 2]), ("right_data", [1.0, 2.0])]), index=index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"on,left_on,right_on,left_index,right_index,nm",
[
(["outer", "inner"], None, None, False, False, "B"),
(None, None, None, True, True, "B"),
(None, ["outer", "inner"], None, False, True, "B"),
(None, None, ["outer", "inner"], True, False, "B"),
(["outer", "inner"], None, None, False, False, None),
(None, None, None, True, True, None),
(None, ["outer", "inner"], None, False, True, None),
(None, None, ["outer", "inner"], True, False, None),
],
)
def test_merge_series(on, left_on, right_on, left_index, right_index, nm):
# GH 21220
a = pd.DataFrame(
{"A": [1, 2, 3, 4]},
index=pd.MultiIndex.from_product(
[["a", "b"], [0, 1]], names=["outer", "inner"]
),
)
b = pd.Series(
[1, 2, 3, 4],
index=pd.MultiIndex.from_product(
[["a", "b"], [1, 2]], names=["outer", "inner"]
),
name=nm,
)
expected = pd.DataFrame(
{"A": [2, 4], "B": [1, 3]},
index=pd.MultiIndex.from_product([["a", "b"], [1]], names=["outer", "inner"]),
)
if nm is not None:
result = pd.merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
tm.assert_frame_equal(result, expected)
else:
msg = "Cannot merge a Series without a name"
with pytest.raises(ValueError, match=msg):
result = pd.merge(
a,
b,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
)
@pytest.mark.parametrize(
"col1, col2, kwargs, expected_cols",
[
(0, 0, dict(suffixes=("", "_dup")), ["0", "0_dup"]),
(0, 0, dict(suffixes=(None, "_dup")), [0, "0_dup"]),
(0, 0, dict(suffixes=("_x", "_y")), ["0_x", "0_y"]),
("a", 0, dict(suffixes=(None, "_y")), ["a", 0]),
(0.0, 0.0, dict(suffixes=("_x", None)), ["0.0_x", 0.0]),
("b", "b", dict(suffixes=(None, "_y")), ["b", "b_y"]),
("a", "a", dict(suffixes=("_x", None)), ["a_x", "a"]),
("a", "b", dict(suffixes=("_x", None)), ["a", "b"]),
("a", "a", dict(suffixes=[None, "_x"]), ["a", "a_x"]),
(0, 0, dict(suffixes=["_a", None]), ["0_a", 0]),
("a", "a", dict(), ["a_x", "a_y"]),
(0, 0, dict(), ["0_x", "0_y"]),
],
)
def test_merge_suffix(col1, col2, kwargs, expected_cols):
# issue: 24782
a = pd.DataFrame({col1: [1, 2, 3]})
b = pd.DataFrame({col2: [4, 5, 6]})
expected = pd.DataFrame([[1, 4], [2, 5], [3, 6]], columns=expected_cols)
result = a.merge(b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
result = pd.merge(a, b, left_index=True, right_index=True, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"how,expected",
[
(
"right",
DataFrame(
{"A": [100, 200, 300], "B1": [60, 70, np.nan], "B2": [600, 700, 800]}
),
),
(
"outer",
DataFrame(
{
"A": [100, 200, 1, 300],
"B1": [60, 70, 80, np.nan],
"B2": [600, 700, np.nan, 800],
}
),
),
],
)
def test_merge_duplicate_suffix(how, expected):
left_df = DataFrame({"A": [100, 200, 1], "B": [60, 70, 80]})
right_df = DataFrame({"A": [100, 200, 300], "B": [600, 700, 800]})
result = merge(left_df, right_df, on="A", how=how, suffixes=("_x", "_x"))
expected.columns = ["A", "B_x", "B_x"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"col1, col2, suffixes",
[
("a", "a", [None, None]),
("a", "a", (None, None)),
("a", "a", ("", None)),
(0, 0, [None, None]),
(0, 0, (None, "")),
],
)
def test_merge_suffix_error(col1, col2, suffixes):
# issue: 24782
a = pd.DataFrame({col1: [1, 2, 3]})
b = pd.DataFrame({col2: [3, 4, 5]})
# TODO: might reconsider current raise behaviour, see issue 24782
msg = "columns overlap but no suffix specified"
with pytest.raises(ValueError, match=msg):
pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("col1, col2, suffixes", [("a", "a", None), (0, 0, None)])
def test_merge_suffix_none_error(col1, col2, suffixes):
# issue: 24782
a = pd.DataFrame({col1: [1, 2, 3]})
b = pd.DataFrame({col2: [3, 4, 5]})
# TODO: might reconsider current raise behaviour, see GH24782
msg = "iterable"
with pytest.raises(TypeError, match=msg):
pd.merge(a, b, left_index=True, right_index=True, suffixes=suffixes)
@pytest.mark.parametrize("cat_dtype", ["one", "two"])
@pytest.mark.parametrize("reverse", [True, False])
def test_merge_equal_cat_dtypes(cat_dtype, reverse):
# see gh-22501
cat_dtypes = {
"one": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
"two": CategoricalDtype(categories=["a", "b", "c"], ordered=False),
}
df1 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]), "left": [1, 2, 3]}
).set_index("foo")
data_foo = ["a", "b", "c"]
data_right = [1, 2, 3]
if reverse:
data_foo.reverse()
data_right.reverse()
df2 = DataFrame(
{"foo": Series(data_foo).astype(cat_dtypes[cat_dtype]), "right": data_right}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{
"left": [1, 2, 3],
"right": [1, 2, 3],
"foo": Series(["a", "b", "c"]).astype(cat_dtypes["one"]),
}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_equal_cat_dtypes2():
# see gh-22501
cat_dtype = CategoricalDtype(categories=["a", "b", "c"], ordered=False)
# Test Data
df1 = DataFrame(
{"foo": Series(["a", "b"]).astype(cat_dtype), "left": [1, 2]}
).set_index("foo")
df2 = DataFrame(
{"foo": Series(["a", "b", "c"]).astype(cat_dtype), "right": [3, 2, 1]}
).set_index("foo")
result = df1.merge(df2, left_index=True, right_index=True)
expected = DataFrame(
{"left": [1, 2], "right": [3, 2], "foo": Series(["a", "b"]).astype(cat_dtype)}
).set_index("foo")
tm.assert_frame_equal(result, expected)
def test_merge_on_cat_and_ext_array():
# GH 28668
right = DataFrame(
{"a": Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")}
)
left = right.copy()
left["a"] = left["a"].astype("category")
result = pd.merge(left, right, how="inner", on="a")
expected = right.copy()
tm.assert_frame_equal(result, expected)
def test_merge_multiindex_columns():
# Issue #28518
# Verify that merging two dataframes give the expected labels
# The original cause of this issue come from a bug lexsort_depth and is tested in
# test_lexsort_depth
letters = ["a", "b", "c", "d"]
numbers = ["1", "2", "3"]
index = pd.MultiIndex.from_product((letters, numbers), names=["outer", "inner"])
frame_x = pd.DataFrame(columns=index)
frame_x["id"] = ""
frame_y = pd.DataFrame(columns=index)
frame_y["id"] = ""
l_suf = "_x"
r_suf = "_y"
result = frame_x.merge(frame_y, on="id", suffixes=((l_suf, r_suf)))
# Constructing the expected results
expected_labels = [l + l_suf for l in letters] + [l + r_suf for l in letters]
expected_index = pd.MultiIndex.from_product(
[expected_labels, numbers], names=["outer", "inner"]
)
expected = pd.DataFrame(columns=expected_index)
expected["id"] = ""
tm.assert_frame_equal(result, expected)
def test_merge_datetime_upcast_dtype():
# https://github.com/pandas-dev/pandas/issues/31208
df1 = pd.DataFrame({"x": ["a", "b", "c"], "y": ["1", "2", "4"]})
df2 = pd.DataFrame(
{"y": ["1", "2", "3"], "z": pd.to_datetime(["2000", "2001", "2002"])}
)
result = pd.merge(df1, df2, how="left", on="y")
expected = pd.DataFrame(
{
"x": ["a", "b", "c"],
"y": ["1", "2", "4"],
"z": pd.to_datetime(["2000", "2001", "NaT"]),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("n_categories", [5, 128])
def test_categorical_non_unique_monotonic(n_categories):
# GH 28189
# With n_categories as 5, we test the int8 case is hit in libjoin,
# with n_categories as 128 we test the int16 case.
left_index = CategoricalIndex([0] + list(range(n_categories)))
df1 = DataFrame(range(n_categories + 1), columns=["value"], index=left_index)
df2 = DataFrame(
[[6]],
columns=["value"],
index=CategoricalIndex([0], categories=np.arange(n_categories)),
)
result = merge(df1, df2, how="left", left_index=True, right_index=True)
expected = DataFrame(
[[i, 6.0] if i < 2 else [i, np.nan] for i in range(n_categories + 1)],
columns=["value_x", "value_y"],
index=left_index,
)
tm.assert_frame_equal(expected, result)
| bsd-3-clause |
PatrickOReilly/scikit-learn | benchmarks/bench_lasso.py | 111 | 3364 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import matplotlib.pyplot as plt
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
plt.figure('scikit-learn LASSO benchmark results')
plt.subplot(211)
plt.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
plt.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
plt.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features,
alpha))
plt.legend(loc='upper left')
plt.xlabel('number of samples')
plt.ylabel('Time (s)')
plt.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
plt.subplot(212)
plt.plot(list_n_features, lasso_results, 'b-', label='Lasso')
plt.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
plt.title('%d samples, alpha=%s' % (n_samples, alpha))
plt.legend(loc='upper left')
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
anne-urai/serialDDM | graphicalModels/examples/weaklensing.py | 7 | 1465 | """
A model for weak lensing
========================
This is (**Daft** co-author) Hogg's model for the obsevational
cosmology method known as *weak gravitational lensing*, if that method
were properly probabilistic (which it usually isn't). Hogg put the
model here for one very important reason: *Because he can*. Oh, and
it demonstrates that you can represent non-trivial scientific projects
with **Daft**.
"""
from matplotlib import rc
rc("font", family="serif", size=12)
rc("text", usetex=True)
import daft
pgm = daft.PGM([4.7, 2.35], origin=[-1.35, 2.2])
pgm.add_node(daft.Node("Omega", r"$\Omega$", -1, 4))
pgm.add_node(daft.Node("gamma", r"$\gamma$", 0, 4))
pgm.add_node(daft.Node("obs", r"$\epsilon^{\mathrm{obs}}_n$", 1, 4,
observed=True))
pgm.add_node(daft.Node("alpha", r"$\alpha$", 3, 4))
pgm.add_node(daft.Node("true", r"$\epsilon^{\mathrm{true}}_n$", 2, 4))
pgm.add_node(daft.Node("sigma", r"$\sigma_n$", 1, 3))
pgm.add_node(daft.Node("Sigma", r"$\Sigma$", 0, 3))
pgm.add_node(daft.Node("x", r"$x_n$", 2, 3, observed=True))
pgm.add_plate(daft.Plate([0.5, 2.25, 2, 2.25],
label=r"galaxies $n$"))
pgm.add_edge("Omega", "gamma")
pgm.add_edge("gamma", "obs")
pgm.add_edge("alpha", "true")
pgm.add_edge("true", "obs")
pgm.add_edge("x", "obs")
pgm.add_edge("Sigma", "sigma")
pgm.add_edge("sigma", "obs")
pgm.render()
pgm.figure.savefig("weaklensing.pdf")
pgm.figure.savefig("weaklensing.png", dpi=150)
| mit |
ericdill/bokeh | bokeh/charts/builder/tests/test_dot_builder.py | 33 | 3939 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Dot
from bokeh.charts.builder.tests._utils import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestDot(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python']=[2, 5]
xyvalues['pypy']=[12, 40]
xyvalues['jython']=[22, 30]
xyvaluesdf = pd.DataFrame(xyvalues, index=['lists', 'loops'])
cat = ['lists', 'loops']
catjython = ['lists:0.75', 'loops:0.75']
catpypy = ['lists:0.5', 'loops:0.5']
catpython = ['lists:0.25', 'loops:0.25']
python = seg_top_python = [2, 5]
pypy = seg_top_pypy = [12, 40]
jython = seg_top_jython = [22, 30]
zero = [0, 0]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['catjython'], catjython)
assert_array_equal(builder._data['catpython'], catpython)
assert_array_equal(builder._data['catpypy'], catpypy)
assert_array_equal(builder._data['python'], python)
assert_array_equal(builder._data['jython'], jython)
assert_array_equal(builder._data['pypy'], pypy)
assert_array_equal(builder._data['seg_top_python'], seg_top_python)
assert_array_equal(builder._data['seg_top_jython'], seg_top_jython)
assert_array_equal(builder._data['seg_top_pypy'], seg_top_pypy)
assert_array_equal(builder._data['z_python'], zero)
assert_array_equal(builder._data['z_pypy'], zero)
assert_array_equal(builder._data['z_jython'], zero)
assert_array_equal(builder._data['zero'], zero)
lvalues = [[2, 5], [12, 40], [22, 30]]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Dot, _xy, cat=cat)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['cat'], cat)
assert_array_equal(builder._data['cat0'], catpython)
assert_array_equal(builder._data['cat1'], catpypy)
assert_array_equal(builder._data['cat2'], catjython)
assert_array_equal(builder._data['0'], python)
assert_array_equal(builder._data['1'], pypy)
assert_array_equal(builder._data['2'], jython)
assert_array_equal(builder._data['seg_top_0'], seg_top_python)
assert_array_equal(builder._data['seg_top_1'], seg_top_pypy)
assert_array_equal(builder._data['seg_top_2'], seg_top_jython)
assert_array_equal(builder._data['z_0'], zero)
assert_array_equal(builder._data['z_1'], zero)
assert_array_equal(builder._data['z_2'], zero)
assert_array_equal(builder._data['zero'], zero)
| bsd-3-clause |
dbaranchuk/hnsw | plots/graphic_PQ8_R10.py | 1 | 2425 | OIMI_PQ8_R10_txt = '''
0.2790
0.3233
0.3838
0.4227
0.4701
0.4844
0.4984
0.5040
'''
OIMI_PQ8_T_txt = '''
1.10
1.12
1.35
1.65
2.65
3.73
6.79
10.8
'''
IVF2M_PQ8_R10_txt = '''
0.3329
0.3926
0.4625
0.5019
0.5413
0.5531
0.5615
0.5648
'''
IVF2M_PQ8_T_txt = '''
0.22
0.24
0.33
0.42
0.79
1.22
2.15
3.10
'''
IVF4M_PQ8_R10_txt = '''
0.3754
0.4351
0.4981
0.5321
0.5591
0.5684
0.5743
0.5772
'''
IVF4M_PQ8_T_txt = '''
0.32
0.37
0.60
0.88
1.90
3.15
6.15
9.14
'''
IVFG_PQ8_R10_txt = '''
0.2997
0.3607
0.4497
0.5054
0.5669
0.5805
0.5924
0.5961
'''
IVFG_PQ8_T_txt = '''
0.22
0.24
0.34
0.45
0.92
1.36
2.50
3.64
'''
IVFGP_PQ8_R10_txt = '''
0.3493
0.4152
0.4996
0.5440
0.5819
0.5908
0.5987
0.6037
'''
IVFGP_PQ8_T_txt = '''
0.25
0.28
0.39
0.53
1.10
1.65
2.91
4.26
'''
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy
import re
import seaborn as sns
sns.set(style='ticks', palette='Set2')
sns.despine()
dataset = "DEEP"
if dataset == "DEEP":
OIMI_PQ8_R10 = re.findall(r"[0-9.]+", OIMI_PQ8_R10_txt)
OIMI_PQ8_T = re.findall(r"[0-9.]+", OIMI_PQ8_T_txt)
IVF2M_PQ8_R10 = re.findall(r"[0-9.]+", IVF2M_PQ8_R10_txt)
IVF2M_PQ8_T = re.findall(r"[0-9.]+", IVF2M_PQ8_T_txt)
IVF4M_PQ8_R10 = re.findall(r"[0-9.]+", IVF4M_PQ8_R10_txt)
IVF4M_PQ8_T = re.findall(r"[0-9.]+", IVF4M_PQ8_T_txt)
IVFG_PQ8_R10 = re.findall(r"[0-9.]+", IVFG_PQ8_R10_txt)
IVFG_PQ8_T = re.findall(r"[0-9.]+", IVFG_PQ8_T_txt)
IVFGP_PQ8_R10 = re.findall(r"[0-9.]+", IVFGP_PQ8_R10_txt)
IVFGP_PQ8_T = re.findall(r"[0-9.]+", IVFGP_PQ8_T_txt)
plt.figure(figsize=[5,4])
lineOIMI, = plt.plot(OIMI_PQ8_T, OIMI_PQ8_R10, 'r', label = 'OIMI-D-OADC $K{=}2^{14}$')
lineIVF2M, = plt.plot(IVF2M_PQ8_T, IVF2M_PQ8_R10, '-g', label = 'IVFOADC $K{=}2^{21}$')
lineIVF4M, = plt.plot(IVF4M_PQ8_T, IVF4M_PQ8_R10, '-m', label = 'IVFOADC $K{=}2^{22}$')
lineIVFG, = plt.plot(IVFG_PQ8_T, IVFG_PQ8_R10, '-c', label = 'IVFOADC+G $K{=}2^{20}$')
lineIVFGP, = plt.plot(IVFGP_PQ8_T, IVFGP_PQ8_R10, '--b', label = 'IVFOADC+G+P $K{=}2^{20}$')
plt.xticks(numpy.arange(0.2, 2.01, 0.2))
plt.yticks(numpy.arange(0.3, 0.61, 0.05))
plt.axis([0.2, 2.0, 0.30, 0.601])
#plt.xlabel('Time (ms)', fontsize=12)
plt.ylabel('R@10, 8 bytes', fontsize=12)
#plt.legend(frameon = True, fontsize=9, loc=4)
pp = PdfPages('recallR10_PQ8.pdf')
pp.savefig(bbox_inches='tight')
pp.close()
| apache-2.0 |
robin-lai/scikit-learn | sklearn/tests/test_kernel_ridge.py | 342 | 3027 | import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_array_almost_equal
X, y = make_regression(n_features=10)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsr, y).predict(Xcsr)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = Ridge(alpha=1, fit_intercept=False,
solver="cholesky").fit(Xcsc, y).predict(Xcsc)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1,
fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear",
alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = KernelRidge(kernel="precomputed",
alpha=1).fit(K, y, sample_weight=sw).predict(K)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
| bsd-3-clause |
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks | Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/future/utils/__init__.py | 8 | 20325 | """
A selection of cross-compatible functions for Python 2 and 3.
This module exports useful functions for 2/3 compatible code:
* bind_method: binds functions to classes
* ``native_str_to_bytes`` and ``bytes_to_native_str``
* ``native_str``: always equal to the native platform string object (because
this may be shadowed by imports from future.builtins)
* lists: lrange(), lmap(), lzip(), lfilter()
* iterable method compatibility:
- iteritems, iterkeys, itervalues
- viewitems, viewkeys, viewvalues
These use the original method if available, otherwise they use items,
keys, values.
* types:
* text_type: unicode in Python 2, str in Python 3
* binary_type: str in Python 2, bytes in Python 3
* string_types: basestring in Python 2, str in Python 3
* bchr(c):
Take an integer and make a 1-character byte string
* bord(c)
Take the result of indexing on a byte string and make an integer
* tobytes(s)
Take a text string, a byte string, or a sequence of characters taken
from a byte string, and make a byte string.
* raise_from()
* raise_with_traceback()
This module also defines these decorators:
* ``python_2_unicode_compatible``
* ``with_metaclass``
* ``implements_iterator``
Some of the functions in this module come from the following sources:
* Jinja2 (BSD licensed: see
https://github.com/mitsuhiko/jinja2/blob/master/LICENSE)
* Pandas compatibility module pandas.compat
* six.py by Benjamin Peterson
* Django
"""
import types
import sys
import numbers
import functools
import copy
import inspect
PY3 = sys.version_info[0] == 3
PY35_PLUS = sys.version_info[0:2] >= (3, 5)
PY36_PLUS = sys.version_info[0:2] >= (3, 6)
PY2 = sys.version_info[0] == 2
PY26 = sys.version_info[0:2] == (2, 6)
PY27 = sys.version_info[0:2] == (2, 7)
PYPY = hasattr(sys, 'pypy_translation_info')
def python_2_unicode_compatible(cls):
"""
A decorator that defines __unicode__ and __str__ methods under Python
2. Under Python 3, this decorator is a no-op.
To support Python 2 and 3 with a single code base, define a __str__
method returning unicode text and apply this decorator to the class, like
this::
>>> from future.utils import python_2_unicode_compatible
>>> @python_2_unicode_compatible
... class MyClass(object):
... def __str__(self):
... return u'Unicode string: \u5b54\u5b50'
>>> a = MyClass()
Then, after this import:
>>> from future.builtins import str
the following is ``True`` on both Python 3 and 2::
>>> str(a) == a.encode('utf-8').decode('utf-8')
True
and, on a Unicode-enabled terminal with the right fonts, these both print the
Chinese characters for Confucius::
>>> print(a)
>>> print(str(a))
The implementation comes from django.utils.encoding.
"""
if not PY3:
cls.__unicode__ = cls.__str__
cls.__str__ = lambda self: self.__unicode__().encode('utf-8')
return cls
def with_metaclass(meta, *bases):
"""
Function from jinja2/_compat.py. License: BSD.
Use it like this::
class BaseForm(object):
pass
class FormType(type):
pass
class Form(with_metaclass(FormType, BaseForm)):
pass
This requires a bit of explanation: the basic idea is to make a
dummy metaclass for one level of class instantiation that replaces
itself with the actual metaclass. Because of internal type checks
we also need to make sure that we downgrade the custom metaclass
for one level to something closer to type (that's why __call__ and
__init__ comes back from type etc.).
This has the advantage over six.with_metaclass of not introducing
dummy classes into the final MRO.
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
# Definitions from pandas.compat and six.py follow:
if PY3:
def bchr(s):
return bytes([s])
def bstr(s):
if isinstance(s, str):
return bytes(s, 'latin-1')
else:
return bytes(s)
def bord(s):
return s
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
else:
# Python 2
def bchr(s):
return chr(s)
def bstr(s):
return str(s)
def bord(s):
return ord(s)
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
###
if PY3:
def tobytes(s):
if isinstance(s, bytes):
return s
else:
if isinstance(s, str):
return s.encode('latin-1')
else:
return bytes(s)
else:
# Python 2
def tobytes(s):
if isinstance(s, unicode):
return s.encode('latin-1')
else:
return ''.join(s)
tobytes.__doc__ = """
Encodes to latin-1 (where the first 256 chars are the same as
ASCII.)
"""
if PY3:
def native_str_to_bytes(s, encoding='utf-8'):
return s.encode(encoding)
def bytes_to_native_str(b, encoding='utf-8'):
return b.decode(encoding)
def text_to_native_str(t, encoding=None):
return t
else:
# Python 2
def native_str_to_bytes(s, encoding=None):
from future.types import newbytes # to avoid a circular import
return newbytes(s)
def bytes_to_native_str(b, encoding=None):
return native(b)
def text_to_native_str(t, encoding='ascii'):
"""
Use this to create a Py2 native string when "from __future__ import
unicode_literals" is in effect.
"""
return unicode(t).encode(encoding)
native_str_to_bytes.__doc__ = """
On Py3, returns an encoded string.
On Py2, returns a newbytes type, ignoring the ``encoding`` argument.
"""
if PY3:
# list-producing versions of the major Python iterating functions
def lrange(*args, **kwargs):
return list(range(*args, **kwargs))
def lzip(*args, **kwargs):
return list(zip(*args, **kwargs))
def lmap(*args, **kwargs):
return list(map(*args, **kwargs))
def lfilter(*args, **kwargs):
return list(filter(*args, **kwargs))
else:
import __builtin__
# Python 2-builtin ranges produce lists
lrange = __builtin__.range
lzip = __builtin__.zip
lmap = __builtin__.map
lfilter = __builtin__.filter
def isidentifier(s, dotted=False):
'''
A function equivalent to the str.isidentifier method on Py3
'''
if dotted:
return all(isidentifier(a) for a in s.split('.'))
if PY3:
return s.isidentifier()
else:
import re
_name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$")
return bool(_name_re.match(s))
def viewitems(obj, **kwargs):
"""
Function for iterating over dictionary items with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewitems", None)
if not func:
func = obj.items
return func(**kwargs)
def viewkeys(obj, **kwargs):
"""
Function for iterating over dictionary keys with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def viewvalues(obj, **kwargs):
"""
Function for iterating over dictionary values with the same set-like
behaviour on Py2.7 as on Py3.
Passes kwargs to method."""
func = getattr(obj, "viewvalues", None)
if not func:
func = obj.values
return func(**kwargs)
def iteritems(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewitems().
"""
func = getattr(obj, "iteritems", None)
if not func:
func = obj.items
return func(**kwargs)
def iterkeys(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewkeys().
"""
func = getattr(obj, "iterkeys", None)
if not func:
func = obj.keys
return func(**kwargs)
def itervalues(obj, **kwargs):
"""Use this only if compatibility with Python versions before 2.7 is
required. Otherwise, prefer viewvalues().
"""
func = getattr(obj, "itervalues", None)
if not func:
func = obj.values
return func(**kwargs)
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has an issue with bound/unbound methods
if not PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
def getexception():
return sys.exc_info()[1]
def _get_caller_globals_and_locals():
"""
Returns the globals and locals of the calling frame.
Is there an alternative to frame hacking here?
"""
caller_frame = inspect.stack()[2]
myglobals = caller_frame[0].f_globals
mylocals = caller_frame[0].f_locals
return myglobals, mylocals
def _repr_strip(mystring):
"""
Returns the string without any initial or final quotes.
"""
r = repr(mystring)
if r.startswith("'") and r.endswith("'"):
return r[1:-1]
else:
return r
if PY3:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
myglobals, mylocals = _get_caller_globals_and_locals()
# We pass the exception and cause along with other globals
# when we exec():
myglobals = myglobals.copy()
myglobals['__python_future_raise_from_exc'] = exc
myglobals['__python_future_raise_from_cause'] = cause
execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause"
exec(execstr, myglobals, mylocals)
def raise_(tp, value=None, tb=None):
"""
A function that matches the Python 2.x ``raise`` statement. This
allows re-raising exceptions with the cls value and traceback on
Python 2 and 3.
"""
if value is not None and isinstance(tp, Exception):
raise TypeError("instance exception may not have a separate value")
if value is not None:
exc = tp(value)
else:
exc = tp
if exc.__traceback__ is not tb:
raise exc.with_traceback(tb)
raise exc
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc.with_traceback(traceback)
else:
def raise_from(exc, cause):
"""
Equivalent to:
raise EXCEPTION from CAUSE
on Python 3. (See PEP 3134).
"""
# Is either arg an exception class (e.g. IndexError) rather than
# instance (e.g. IndexError('my message here')? If so, pass the
# name of the class undisturbed through to "raise ... from ...".
if isinstance(exc, type) and issubclass(exc, Exception):
e = exc()
# exc = exc.__name__
# execstr = "e = " + _repr_strip(exc) + "()"
# myglobals, mylocals = _get_caller_globals_and_locals()
# exec(execstr, myglobals, mylocals)
else:
e = exc
e.__suppress_context__ = False
if isinstance(cause, type) and issubclass(cause, Exception):
e.__cause__ = cause()
e.__suppress_context__ = True
elif cause is None:
e.__cause__ = None
e.__suppress_context__ = True
elif isinstance(cause, BaseException):
e.__cause__ = cause
e.__suppress_context__ = True
else:
raise TypeError("exception causes must derive from BaseException")
e.__context__ = sys.exc_info()[1]
raise e
exec('''
def raise_(tp, value=None, tb=None):
raise tp, value, tb
def raise_with_traceback(exc, traceback=Ellipsis):
if traceback == Ellipsis:
_, _, traceback = sys.exc_info()
raise exc, None, traceback
'''.strip())
raise_with_traceback.__doc__ = (
"""Raise exception with existing traceback.
If traceback is not passed, uses sys.exc_info() to get traceback."""
)
# Deprecated alias for backward compatibility with ``future`` versions < 0.11:
reraise = raise_
def implements_iterator(cls):
'''
From jinja2/_compat.py. License: BSD.
Use as a decorator like this::
@implements_iterator
class UppercasingIterator(object):
def __init__(self, iterable):
self._iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
return next(self._iter).upper()
'''
if PY3:
return cls
else:
cls.next = cls.__next__
del cls.__next__
return cls
if PY3:
get_next = lambda x: x.next
else:
get_next = lambda x: x.__next__
def encode_filename(filename):
if PY3:
return filename
else:
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
def is_new_style(cls):
"""
Python 2.7 has both new-style and old-style classes. Old-style classes can
be pesky in some circumstances, such as when using inheritance. Use this
function to test for whether a class is new-style. (Python 3 only has
new-style classes.)
"""
return hasattr(cls, '__class__') and ('__dict__' in dir(cls)
or hasattr(cls, '__slots__'))
# The native platform string and bytes types. Useful because ``str`` and
# ``bytes`` are redefined on Py2 by ``from future.builtins import *``.
native_str = str
native_bytes = bytes
def istext(obj):
"""
Deprecated. Use::
>>> isinstance(obj, str)
after this import:
>>> from future.builtins import str
"""
return isinstance(obj, type(u''))
def isbytes(obj):
"""
Deprecated. Use::
>>> isinstance(obj, bytes)
after this import:
>>> from future.builtins import bytes
"""
return isinstance(obj, type(b''))
def isnewbytes(obj):
"""
Equivalent to the result of ``isinstance(obj, newbytes)`` were
``__instancecheck__`` not overridden on the newbytes subclass. In
other words, it is REALLY a newbytes instance, not a Py2 native str
object?
"""
# TODO: generalize this so that it works with subclasses of newbytes
# Import is here to avoid circular imports:
from future.types.newbytes import newbytes
return type(obj) == newbytes
def isint(obj):
"""
Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or
``long``.
Instead of using this function, you can use:
>>> from future.builtins import int
>>> isinstance(obj, int)
The following idiom is equivalent:
>>> from numbers import Integral
>>> isinstance(obj, Integral)
"""
return isinstance(obj, numbers.Integral)
def native(obj):
"""
On Py3, this is a no-op: native(obj) -> obj
On Py2, returns the corresponding native Py2 types that are
superclasses for backported objects from Py3:
>>> from builtins import str, bytes, int
>>> native(str(u'ABC'))
u'ABC'
>>> type(native(str(u'ABC')))
unicode
>>> native(bytes(b'ABC'))
b'ABC'
>>> type(native(bytes(b'ABC')))
bytes
>>> native(int(10**20))
100000000000000000000L
>>> type(native(int(10**20)))
long
Existing native types on Py2 will be returned unchanged:
>>> type(native(u'ABC'))
unicode
"""
if hasattr(obj, '__native__'):
return obj.__native__()
else:
return obj
# Implementation of exec_ is from ``six``:
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
# Defined here for backward compatibility:
def old_div(a, b):
"""
DEPRECATED: import ``old_div`` from ``past.utils`` instead.
Equivalent to ``a / b`` on Python 2 without ``from __future__ import
division``.
TODO: generalize this to other objects (like arrays etc.)
"""
if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral):
return a // b
else:
return a / b
def as_native_str(encoding='utf-8'):
'''
A decorator to turn a function or method call that returns text, i.e.
unicode, into one that returns a native platform str.
Use it as a decorator like this::
from __future__ import unicode_literals
class MyClass(object):
@as_native_str(encoding='ascii')
def __repr__(self):
return next(self._iter).upper()
'''
if PY3:
return lambda f: f
else:
def encoder(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs).encode(encoding=encoding)
return wrapper
return encoder
# listvalues and listitems definitions from Nick Coghlan's (withdrawn)
# PEP 496:
try:
dict.iteritems
except AttributeError:
# Python 3
def listvalues(d):
return list(d.values())
def listitems(d):
return list(d.items())
else:
# Python 2
def listvalues(d):
return d.values()
def listitems(d):
return d.items()
if PY3:
def ensure_new_type(obj):
return obj
else:
def ensure_new_type(obj):
from future.types.newbytes import newbytes
from future.types.newstr import newstr
from future.types.newint import newint
from future.types.newdict import newdict
native_type = type(native(obj))
# Upcast only if the type is already a native (non-future) type
if issubclass(native_type, type(obj)):
# Upcast
if native_type == str: # i.e. Py2 8-bit str
return newbytes(obj)
elif native_type == unicode:
return newstr(obj)
elif native_type == int:
return newint(obj)
elif native_type == long:
return newint(obj)
elif native_type == dict:
return newdict(obj)
else:
return obj
else:
# Already a new type
assert type(obj) in [newbytes, newstr]
return obj
__all__ = ['PY2', 'PY26', 'PY3', 'PYPY',
'as_native_str', 'bind_method', 'bord', 'bstr',
'bytes_to_native_str', 'encode_filename', 'ensure_new_type',
'exec_', 'get_next', 'getexception', 'implements_iterator',
'is_new_style', 'isbytes', 'isidentifier', 'isint',
'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues',
'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange',
'lzip', 'native', 'native_bytes', 'native_str',
'native_str_to_bytes', 'old_div',
'python_2_unicode_compatible', 'raise_',
'raise_with_traceback', 'reraise', 'text_to_native_str',
'tobytes', 'viewitems', 'viewkeys', 'viewvalues',
'with_metaclass'
]
| isc |
bigdataelephants/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 31 | 3340 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
"""Tests the FastMCD algorithm implementation
"""
### Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
### Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
### Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
### 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
print(clf.threshold)
assert_raises(Exception, clf.predict, X)
assert_raises(Exception, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
tzonghao/influxdb-python | influxdb/influxdb08/dataframe_client.py | 4 | 7291 | # -*- coding: utf-8 -*-
"""DataFrame client for InfluxDB v0.8."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import warnings
from .client import InfluxDBClient
class DataFrameClient(InfluxDBClient):
"""Primary defintion of the DataFrameClient for v0.8.
The ``DataFrameClient`` object holds information necessary to connect
to InfluxDB. Requests can be made to InfluxDB directly through the client.
The client reads and writes from pandas DataFrames.
"""
def __init__(self, ignore_nan=True, *args, **kwargs):
"""Initialize an instance of the DataFrameClient."""
super(DataFrameClient, self).__init__(*args, **kwargs)
try:
global pd
import pandas as pd
except ImportError as ex:
raise ImportError('DataFrameClient requires Pandas, '
'"{ex}" problem importing'.format(ex=str(ex)))
self.EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
self.ignore_nan = ignore_nan
def write_points(self, data, *args, **kwargs):
"""Write to multiple time series names.
:param data: A dictionary mapping series names to pandas DataFrames
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param batch_size: [Optional] Value to write the points in batches
instead of all at one time. Useful for when doing data dumps from
one database to another or when doing a massive write operation
:type batch_size: int
"""
batch_size = kwargs.get('batch_size')
time_precision = kwargs.get('time_precision', 's')
if batch_size:
kwargs.pop('batch_size') # don't hand over to InfluxDBClient
for key, data_frame in data.items():
number_batches = int(math.ceil(
len(data_frame) / float(batch_size)))
for batch in range(number_batches):
start_index = batch * batch_size
end_index = (batch + 1) * batch_size
outdata = [
self._convert_dataframe_to_json(
name=key,
dataframe=data_frame
.iloc[start_index:end_index].copy(),
time_precision=time_precision)]
InfluxDBClient.write_points(self, outdata, *args, **kwargs)
return True
outdata = [
self._convert_dataframe_to_json(name=key, dataframe=dataframe,
time_precision=time_precision)
for key, dataframe in data.items()]
return InfluxDBClient.write_points(self, outdata, *args, **kwargs)
def write_points_with_precision(self, data, time_precision='s'):
"""Write to multiple time series names.
DEPRECATED
"""
warnings.warn(
"write_points_with_precision is deprecated, and will be removed "
"in future versions. Please use "
"``DataFrameClient.write_points(time_precision='..')`` instead.",
FutureWarning)
return self.write_points(data, time_precision='s')
def query(self, query, time_precision='s', chunked=False):
"""Query data into DataFrames.
Returns a DataFrame for a single time series and a map for multiple
time series with the time series as value and its name as key.
:param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
or 'u'.
:param chunked: [Optional, default=False] True if the data shall be
retrieved in chunks, False otherwise.
"""
result = InfluxDBClient.query(self, query=query,
time_precision=time_precision,
chunked=chunked)
if len(result) == 0:
return result
elif len(result) == 1:
return self._to_dataframe(result[0], time_precision)
else:
ret = {}
for time_series in result:
ret[time_series['name']] = self._to_dataframe(time_series,
time_precision)
return ret
@staticmethod
def _to_dataframe(json_result, time_precision):
dataframe = pd.DataFrame(data=json_result['points'],
columns=json_result['columns'])
if 'sequence_number' in dataframe.keys():
dataframe.sort_values(['time', 'sequence_number'], inplace=True)
else:
dataframe.sort_values(['time'], inplace=True)
pandas_time_unit = time_precision
if time_precision == 'm':
pandas_time_unit = 'ms'
elif time_precision == 'u':
pandas_time_unit = 'us'
dataframe.index = pd.to_datetime(list(dataframe['time']),
unit=pandas_time_unit,
utc=True)
del dataframe['time']
return dataframe
def _convert_dataframe_to_json(self, dataframe, name, time_precision='s'):
if not isinstance(dataframe, pd.DataFrame):
raise TypeError('Must be DataFrame, but type was: {0}.'
.format(type(dataframe)))
if not (isinstance(dataframe.index, pd.PeriodIndex) or
isinstance(dataframe.index, pd.DatetimeIndex)):
raise TypeError('Must be DataFrame with DatetimeIndex or \
PeriodIndex.')
if isinstance(dataframe.index, pd.PeriodIndex):
dataframe.index = dataframe.index.to_timestamp()
else:
dataframe.index = pd.to_datetime(dataframe.index)
if dataframe.index.tzinfo is None:
dataframe.index = dataframe.index.tz_localize('UTC')
dataframe['time'] = [self._datetime_to_epoch(dt, time_precision)
for dt in dataframe.index]
data = {'name': name,
'columns': [str(column) for column in dataframe.columns],
'points': [self._convert_array(x) for x in dataframe.values]}
return data
def _convert_array(self, array):
try:
global np
import numpy as np
except ImportError as ex:
raise ImportError('DataFrameClient requires Numpy, '
'"{ex}" problem importing'.format(ex=str(ex)))
if self.ignore_nan:
number_types = (int, float, np.number)
condition = (all(isinstance(el, number_types) for el in array) and
np.isnan(array))
return list(np.where(condition, None, array))
return list(array)
def _datetime_to_epoch(self, datetime, time_precision='s'):
seconds = (datetime - self.EPOCH).total_seconds()
if time_precision == 's':
return seconds
elif time_precision == 'm' or time_precision == 'ms':
return seconds * 1000
elif time_precision == 'u':
return seconds * 1000000
| mit |
zronyj/TC3Q | Data/wu.py | 2 | 1324 | import urllib2
import json
import pandas as pd
data = {}
def num_or_dump(x):
try:
t = float(x)
except ValueError as e:
t = -9999.
return t
llave = raw_input("Ingresa la llave para la API de WUnderground: ")
for a in range(2008,2018):
d = urllib2.urlopen('http://api.wunderground.com/api/' + llave + '/history_' + str(a) + '0622/q/MGGT.json')
j = d.read()
with open(str(a) + ".json","w") as f:
f.write(j)
pj = json.loads(j)
temp = []
ttime = []
restantes = 24 - len(pj["history"]["observations"])
for o in pj["history"]["observations"]:
hora = int(o["date"]["hour"])
if not(hora in ttime):
ttime.append(hora)
temp.append({"hora":hora,
"temperatura":num_or_dump(o["tempm"]),
"humedad":num_or_dump(o["hum"]),
"presion":num_or_dump(o["pressurem"]),
"lluvia":bool(int(o["rain"])),
"visibilidad":num_or_dump(o["vism"])})
totales = set(range(24))
dados = set(ttime)
restantes = list(totales.difference(dados))
for i in restantes:
temp.append({"hora":i,
"temperatura":-9999.,
"humedad":-9999.,
"presion":-9999.,
"lluvia":False,
"visibilidad":-9999.})
data[str(a)] = temp[:]
d.close()
for p in ["temperatura", "humedad", "presion", "lluvia", "visibilidad"]:
v = pd.DataFrame.from_dict({a:[t[p] for t in data[a]] for a in data.keys()})
v.to_csv(p + ".csv")
| gpl-2.0 |
kaffeebrauer/Lean | Algorithm.Framework/Portfolio/MeanVarianceOptimizationPortfolioConstructionModel.py | 1 | 6913 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Indicators import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from Portfolio.MinimumVariancePortfolioOptimizer import MinimumVariancePortfolioOptimizer
from datetime import timedelta
import numpy as np
import pandas as pd
### <summary>
### Provides an implementation of Mean-Variance portfolio optimization based on modern portfolio theory.
### The default model uses the MinimumVariancePortfolioOptimizer that accepts a 63-row matrix of 1-day returns.
### </summary>
class MeanVarianceOptimizationPortfolioConstructionModel(PortfolioConstructionModel):
def __init__(self,
lookback = 1,
period = 63,
resolution = Resolution.Daily,
optimizer = None):
"""Initialize the model
Args:
lookback(int): Historical return lookback period
period(int): The time interval of history price to calculate the weight
resolution: The resolution of the history price
optimizer(class): Method used to compute the portfolio weights"""
self.lookback = lookback
self.period = period
self.resolution = resolution
self.optimizer = MinimumVariancePortfolioOptimizer() if optimizer is None else optimizer
self.symbolDataBySymbol = {}
self.pendingRemoval = []
def CreateTargets(self, algorithm, insights):
"""
Create portfolio targets from the specified insights
Args:
algorithm: The algorithm instance
insights: The insights to create portoflio targets from
Returns:
An enumerable of portoflio targets to be sent to the execution model
"""
targets = []
for symbol in self.pendingRemoval:
targets.append(PortfolioTarget.Percent(algorithm, symbol, 0))
self.pendingRemoval.clear()
symbols = [insight.Symbol for insight in insights]
if len(symbols) == 0 or all([insight.Magnitude == 0 for insight in insights]):
return targets
for insight in insights:
symbolData = self.symbolDataBySymbol.get(insight.Symbol)
if insight.Magnitude is None:
algorithm.SetRunTimeError(ArgumentNullException('MeanVarianceOptimizationPortfolioConstructionModel does not accept \'None\' as Insight.Magnitude. Please checkout the selected Alpha Model specifications.'))
symbolData.Add(algorithm.Time, insight.Magnitude)
# Create a dictionary keyed by the symbols in the insights with an pandas.Series as value to create a data frame
returns = { str(symbol) : data.Return for symbol, data in self.symbolDataBySymbol.items() if symbol in symbols }
returns = pd.DataFrame(returns)
# The portfolio optimizer finds the optional weights for the given data
weights = self.optimizer.Optimize(returns)
weights = pd.Series(weights, index = returns.columns)
# Create portfolio targets from the specified insights
for insight in insights:
weight = weights[str(insight.Symbol)]
target = PortfolioTarget.Percent(algorithm, insight.Symbol, weight)
if target is not None:
targets.append(target)
return targets
def OnSecuritiesChanged(self, algorithm, changes):
'''Event fired each time the we add/remove securities from the data feed
Args:
algorithm: The algorithm instance that experienced the change in securities
changes: The security additions and removals from the algorithm'''
# clean up data for removed securities
for removed in changes.RemovedSecurities:
self.pendingRemoval.append(removed.Symbol)
symbolData = self.symbolDataBySymbol.pop(removed.Symbol, None)
symbolData.Reset()
# initialize data for added securities
symbols = [ x.Symbol for x in changes.AddedSecurities ]
history = algorithm.History(symbols, self.lookback * self.period, self.resolution)
if history.empty: return
tickers = history.index.levels[0]
for ticker in tickers:
symbol = SymbolCache.GetSymbol(ticker)
if symbol not in self.symbolDataBySymbol:
symbolData = self.MeanVarianceSymbolData(symbol, self.lookback, self.period)
symbolData.WarmUpIndicators(history.loc[ticker])
self.symbolDataBySymbol[symbol] = symbolData
class MeanVarianceSymbolData:
'''Contains data specific to a symbol required by this model'''
def __init__(self, symbol, lookback, period):
self.symbol = symbol
self.roc = RateOfChange(f'{symbol}.ROC({lookback})', lookback)
self.roc.Updated += self.OnRateOfChangeUpdated
self.window = RollingWindow[IndicatorDataPoint](period)
def Reset(self):
self.roc.Updated -= self.OnRateOfChangeUpdated
self.roc.Reset()
self.window.Reset()
def WarmUpIndicators(self, history):
for tuple in history.itertuples():
self.roc.Update(tuple.Index, tuple.close)
def OnRateOfChangeUpdated(self, roc, value):
if roc.IsReady:
self.window.Add(value)
def Add(self, time, value):
item = IndicatorDataPoint(self.symbol, time, value)
self.window.Add(item)
@property
def Return(self):
return pd.Series(
data = [(1 + float(x.Value))**252 - 1 for x in self.window],
index = [x.EndTime for x in self.window])
@property
def IsReady(self):
return self.window.IsReady
def __str__(self, **kwargs):
return '{}: {:.2%}'.format(self.roc.Name, (1 + self.window[0])**252 - 1) | apache-2.0 |
ldirer/scikit-learn | sklearn/model_selection/tests/test_search.py | 5 | 51772 | """Test the search module"""
from collections import Iterable, Sized
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.externals.six.moves import xrange
from sklearn.externals.joblib._compat import PY3_OR_LATER
from itertools import chain, product
import pickle
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import sp_version
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
from scipy.stats import bernoulli, expon, uniform
from sklearn.externals.six.moves import zip
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import ParameterSampler
from sklearn.model_selection._validation import FitFailedWarning
from sklearn.svm import LinearSVC, SVC
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import DecisionTreeClassifier
from sklearn.cluster import KMeans
from sklearn.neighbors import KernelDensity
from sklearn.metrics import f1_score
from sklearn.metrics import make_scorer
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import Ridge, SGDClassifier
from sklearn.model_selection.tests.common import OneTimeSplitter
# Neither of the following two estimators inherit from BaseEstimator,
# to test hyperparameter search on user-defined classifiers.
class MockClassifier(object):
"""Dummy classifier to test the parameter search algorithms"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.classes_ = np.unique(Y)
return self
def predict(self, T):
return T.shape[0]
def transform(self, X):
return X + self.foo_param
def inverse_transform(self, X):
return X - self.foo_param
predict_proba = predict
predict_log_proba = predict
decision_function = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=False):
return {'foo_param': self.foo_param}
def set_params(self, **params):
self.foo_param = params['foo_param']
return self
class LinearSVCNoScore(LinearSVC):
"""An LinearSVC classifier that has no score method."""
@property
def score(self):
raise AttributeError
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
def assert_grid_iter_equals_getitem(grid):
assert_equal(list(grid), [grid[i] for i in range(len(grid))])
def test_parameter_grid():
# Test basic properties of ParameterGrid.
params1 = {"foo": [1, 2, 3]}
grid1 = ParameterGrid(params1)
assert_true(isinstance(grid1, Iterable))
assert_true(isinstance(grid1, Sized))
assert_equal(len(grid1), 3)
assert_grid_iter_equals_getitem(grid1)
params2 = {"foo": [4, 2],
"bar": ["ham", "spam", "eggs"]}
grid2 = ParameterGrid(params2)
assert_equal(len(grid2), 6)
# loop to assert we can iterate over the grid multiple times
for i in xrange(2):
# tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
assert_equal(points,
set(("bar", x, "foo", y)
for x, y in product(params2["bar"], params2["foo"])))
assert_grid_iter_equals_getitem(grid2)
# Special case: empty grid (useful to get default estimator settings)
empty = ParameterGrid({})
assert_equal(len(empty), 1)
assert_equal(list(empty), [{}])
assert_grid_iter_equals_getitem(empty)
assert_raises(IndexError, lambda: empty[1])
has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}])
assert_equal(len(has_empty), 4)
assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}])
assert_grid_iter_equals_getitem(has_empty)
def test_grid_search():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
# make sure it selects the smallest parameter in case of ties
old_stdout = sys.stdout
sys.stdout = StringIO()
grid_search.fit(X, y)
sys.stdout = old_stdout
assert_equal(grid_search.best_estimator_.foo_param, 2)
assert_array_equal(grid_search.cv_results_["param_foo_param"].data,
[1, 2, 3])
# Smoke test the score etc:
grid_search.score(X, y)
grid_search.predict_proba(X)
grid_search.decision_function(X)
grid_search.transform(X)
# Test exception handling on scoring
grid_search.scoring = 'sklearn'
assert_raises(ValueError, grid_search.fit, X, y)
def check_hyperparameter_searcher_with_fit_params(klass, **klass_kwargs):
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam', 'eggs'])
searcher = klass(clf, {'foo_param': [1, 2, 3]}, cv=2, **klass_kwargs)
# The CheckingClassifer generates an assertion error if
# a parameter is missing or has length != len(X).
assert_raise_message(AssertionError,
"Expected fit parameter(s) ['eggs'] not seen.",
searcher.fit, X, y, spam=np.ones(10))
assert_raise_message(AssertionError,
"Fit parameter spam has length 1; expected 4.",
searcher.fit, X, y, spam=np.ones(1),
eggs=np.zeros(10))
searcher.fit(X, y, spam=np.ones(10), eggs=np.zeros(10))
def test_grid_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(GridSearchCV)
def test_random_search_with_fit_params():
check_hyperparameter_searcher_with_fit_params(RandomizedSearchCV, n_iter=1)
def test_grid_search_fit_params_deprecation():
# NOTE: Remove this test in v0.21
# Use of `fit_params` in the class constructor is deprecated,
# but will still work until v0.21.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_warns(DeprecationWarning, grid_search.fit, X, y)
def test_grid_search_fit_params_two_places():
# NOTE: Remove this test in v0.21
# If users try to input fit parameters in both
# the constructor (deprecated use) and the `fit`
# method, we'll ignore the values passed to the constructor.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(expected_fit_params=['spam'])
# The "spam" array is too short and will raise an
# error in the CheckingClassifier if used.
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(1)})
expected_warning = ('Ignoring fit_params passed as a constructor '
'argument in favor of keyword arguments to '
'the "fit" method.')
assert_warns_message(RuntimeWarning, expected_warning,
grid_search.fit, X, y, spam=np.ones(10))
# Verify that `fit` prefers its own kwargs by giving valid
# kwargs in the constructor and invalid in the method call
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]},
fit_params={'spam': np.ones(10)})
assert_raise_message(AssertionError, "Fit parameter spam has length 1",
grid_search.fit, X, y, spam=np.ones(1))
@ignore_warnings
def test_grid_search_no_score():
# Test grid-search on classifier that has no score function.
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
clf_no_score = LinearSVCNoScore(random_state=0)
grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy')
grid_search.fit(X, y)
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs},
scoring='accuracy')
# smoketest grid search
grid_search_no_score.fit(X, y)
# check that best params are equal
assert_equal(grid_search_no_score.best_params_, grid_search.best_params_)
# check that we can call score and that it gives the correct result
assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y))
# giving no scoring function raises an error
grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs})
assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit,
[[1]])
def test_grid_search_score_method():
X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2,
random_state=0)
clf = LinearSVC(random_state=0)
grid = {'C': [.1]}
search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y)
search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y)
search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid,
scoring='roc_auc').fit(X, y)
search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y)
# Check warning only occurs in situation where behavior changed:
# estimator requires score method to compete with scoring parameter
score_no_scoring = search_no_scoring.score(X, y)
score_accuracy = search_accuracy.score(X, y)
score_no_score_auc = search_no_score_method_auc.score(X, y)
score_auc = search_auc.score(X, y)
# ensure the test is sane
assert_true(score_auc < 1.0)
assert_true(score_accuracy < 1.0)
assert_not_equal(score_auc, score_accuracy)
assert_almost_equal(score_accuracy, score_no_scoring)
assert_almost_equal(score_auc, score_no_score_auc)
def test_grid_search_groups():
# Check if ValueError (when groups is None) propagates to GridSearchCV
# And also check if groups is correctly passed to the cv object
rng = np.random.RandomState(0)
X, y = make_classification(n_samples=15, n_classes=2, random_state=0)
groups = rng.randint(0, 3, 15)
clf = LinearSVC(random_state=0)
grid = {'C': [1]}
group_cvs = [LeaveOneGroupOut(), LeavePGroupsOut(2), GroupKFold(),
GroupShuffleSplit()]
for cv in group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
assert_raise_message(ValueError,
"The 'groups' parameter should not be None.",
gs.fit, X, y)
gs.fit(X, y, groups=groups)
non_group_cvs = [StratifiedKFold(), StratifiedShuffleSplit()]
for cv in non_group_cvs:
gs = GridSearchCV(clf, grid, cv=cv)
# Should not raise an error
gs.fit(X, y)
def test_classes__property():
# Test that classes_ property matches best_estimator_.classes_
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
Cs = [.1, 1, 10]
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
grid_search.fit(X, y)
assert_array_equal(grid_search.best_estimator_.classes_,
grid_search.classes_)
# Test that regressors do not have a classes_ attribute
grid_search = GridSearchCV(Ridge(), {'alpha': [1.0, 2.0]})
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute before it's fit
grid_search = GridSearchCV(LinearSVC(random_state=0), {'C': Cs})
assert_false(hasattr(grid_search, 'classes_'))
# Test that the grid searcher has no classes_ attribute without a refit
grid_search = GridSearchCV(LinearSVC(random_state=0),
{'C': Cs}, refit=False)
grid_search.fit(X, y)
assert_false(hasattr(grid_search, 'classes_'))
def test_trivial_cv_results_attr():
# Test search over a "grid" with only one point.
# Non-regression test: grid_scores_ wouldn't be set by GridSearchCV.
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1]})
grid_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1)
random_search.fit(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_no_refit():
# Test that GSCV can be used for model selection alone without refitting
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False)
grid_search.fit(X, y)
assert_true(not hasattr(grid_search, "best_estimator_") and
hasattr(grid_search, "best_index_") and
hasattr(grid_search, "best_params_"))
# Make sure the predict/transform etc fns raise meaningfull error msg
for fn_name in ('predict', 'predict_proba', 'predict_log_proba',
'transform', 'inverse_transform'):
assert_raise_message(NotFittedError,
('refit=False. %s is available only after '
'refitting on the best parameters' % fn_name),
getattr(grid_search, fn_name), X)
def test_grid_search_error():
# Test that grid search will capture errors on data with different length
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, X_[:180], y_)
def test_grid_search_one_grid_point():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]}
clf = SVC()
cv = GridSearchCV(clf, param_dict)
cv.fit(X_, y_)
clf = SVC(C=1.0, kernel="rbf", gamma=0.1)
clf.fit(X_, y_)
assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_)
def test_grid_search_when_param_grid_includes_range():
# Test that the best estimator contains the right value for foo_param
clf = MockClassifier()
grid_search = None
if PY3_OR_LATER:
grid_search = GridSearchCV(clf, {'foo_param': range(1, 4)})
else:
grid_search = GridSearchCV(clf, {'foo_param': xrange(1, 4)})
grid_search.fit(X, y)
assert_equal(grid_search.best_estimator_.foo_param, 2)
def test_grid_search_bad_param_grid():
param_dict = {"C": 1.0}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": []}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a non-empty sequence.",
GridSearchCV, clf, param_dict)
param_dict = {"C": "1,2,3"}
clf = SVC()
assert_raise_message(
ValueError,
"Parameter values for parameter (C) need to be a sequence"
"(but not a string) or np.ndarray.",
GridSearchCV, clf, param_dict)
param_dict = {"C": np.ones(6).reshape(3, 2)}
clf = SVC()
assert_raises(ValueError, GridSearchCV, clf, param_dict)
def test_grid_search_sparse():
# Test that grid search works with both dense and sparse matrices
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(X_[:180].tocoo(), y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_true(np.mean(y_pred == y_pred2) >= .9)
assert_equal(C, C2)
def test_grid_search_sparse_scoring():
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred = cv.predict(X_[180:])
C = cv.best_estimator_.C
X_ = sp.csr_matrix(X_)
clf = LinearSVC()
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1")
cv.fit(X_[:180], y_[:180])
y_pred2 = cv.predict(X_[180:])
C2 = cv.best_estimator_.C
assert_array_equal(y_pred, y_pred2)
assert_equal(C, C2)
# Smoke test the score
# np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]),
# cv.score(X_[:180], y[:180]))
# test loss where greater is worse
def f1_loss(y_true_, y_pred_):
return -f1_score(y_true_, y_pred_)
F1Loss = make_scorer(f1_loss, greater_is_better=False)
cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss)
cv.fit(X_[:180], y_[:180])
y_pred3 = cv.predict(X_[180:])
C3 = cv.best_estimator_.C
assert_equal(C, C3)
assert_array_equal(y_pred, y_pred3)
def test_grid_search_precomputed_kernel():
# Test that grid search works when the input features are given in the
# form of a precomputed kernel matrix
X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0)
# compute the training kernel matrix corresponding to the linear kernel
K_train = np.dot(X_[:180], X_[:180].T)
y_train = y_[:180]
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
cv.fit(K_train, y_train)
assert_true(cv.best_score_ >= 0)
# compute the test kernel matrix
K_test = np.dot(X_[180:], X_[:180].T)
y_test = y_[180:]
y_pred = cv.predict(K_test)
assert_true(np.mean(y_pred == y_test) >= 0)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cv.fit, K_train.tolist(), y_train)
def test_grid_search_precomputed_kernel_error_nonsquare():
# Test that grid search returns an error with a non-square precomputed
# training kernel matrix
K_train = np.zeros((10, 20))
y_train = np.ones((10, ))
clf = SVC(kernel='precomputed')
cv = GridSearchCV(clf, {'C': [0.1, 1.0]})
assert_raises(ValueError, cv.fit, K_train, y_train)
class BrokenClassifier(BaseEstimator):
"""Broken classifier that cannot be fit twice"""
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y):
assert_true(not hasattr(self, 'has_been_fit_'))
self.has_been_fit_ = True
def predict(self, X):
return np.zeros(X.shape[0])
@ignore_warnings
def test_refit():
# Regression test for bug in refitting
# Simulates re-fitting a broken estimator; this used to break with
# sparse SVMs.
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}],
scoring="precision", refit=True)
clf.fit(X, y)
def test_gridsearch_nd():
# Pass X as list in GridSearchCV
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
check_X = lambda x: x.shape[1:] == (5, 3, 2)
check_y = lambda x: x.shape[1:] == (7, 11)
clf = CheckingClassifier(check_X=check_X, check_y=check_y)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_4d, y_3d).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_X_as_list():
# Pass X as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_X=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X.tolist(), y).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
def test_y_as_list():
# Pass y as list in GridSearchCV
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
clf = CheckingClassifier(check_y=lambda x: isinstance(x, list))
cv = KFold(n_splits=3)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv)
grid_search.fit(X, y.tolist()).score(X, y)
assert_true(hasattr(grid_search, "cv_results_"))
@ignore_warnings
def test_pandas_input():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((DataFrame, Series))
except ImportError:
pass
X = np.arange(100).reshape(10, 10)
y = np.array([0] * 5 + [1] * 5)
for InputFeatureType, TargetType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]})
grid_search.fit(X_df, y_ser).score(X_df, y_ser)
grid_search.predict(X_df)
assert_true(hasattr(grid_search, "cv_results_"))
def test_unsupervised_grid_search():
# test grid-search with unsupervised estimator
X, y = make_blobs(random_state=0)
km = KMeans(random_state=0)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='adjusted_rand_score')
grid_search.fit(X, y)
# ARI can find the right number :)
assert_equal(grid_search.best_params_["n_clusters"], 3)
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]),
scoring='fowlkes_mallows_score')
grid_search.fit(X, y)
# So can FMS ;)
assert_equal(grid_search.best_params_["n_clusters"], 3)
# Now without a score, and without y
grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]))
grid_search.fit(X)
assert_equal(grid_search.best_params_["n_clusters"], 4)
def test_gridsearch_no_predict():
# test grid-search with an estimator without predict.
# slight duplication of a test from KDE
def custom_scoring(estimator, X):
return 42 if estimator.bandwidth == .1 else 0
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
search = GridSearchCV(KernelDensity(),
param_grid=dict(bandwidth=[.01, .1, 1]),
scoring=custom_scoring)
search.fit(X)
assert_equal(search.best_params_['bandwidth'], .1)
assert_equal(search.best_score_, 42)
def test_param_sampler():
# test basic properties of param sampler
param_distributions = {"kernel": ["rbf", "linear"],
"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
samples = [x for x in sampler]
assert_equal(len(samples), 10)
for sample in samples:
assert_true(sample["kernel"] in ["rbf", "linear"])
assert_true(0 <= sample["C"] <= 1)
# test that repeated calls yield identical parameters
param_distributions = {"C": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=3, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
if sp_version >= (0, 16):
param_distributions = {"C": uniform(0, 1)}
sampler = ParameterSampler(param_distributions=param_distributions,
n_iter=10, random_state=0)
assert_equal([x for x in sampler], [x for x in sampler])
def check_cv_results_array_types(cv_results, param_keys, score_keys):
# Check if the search `cv_results`'s array are of correct types
assert_true(all(isinstance(cv_results[param], np.ma.MaskedArray)
for param in param_keys))
assert_true(all(cv_results[key].dtype == object for key in param_keys))
assert_false(any(isinstance(cv_results[key], np.ma.MaskedArray)
for key in score_keys))
assert_true(all(cv_results[key].dtype == np.float64
for key in score_keys if not key.startswith('rank')))
assert_true(cv_results['rank_test_score'].dtype == np.int32)
def check_cv_results_keys(cv_results, param_keys, score_keys, n_cand):
# Test the search.cv_results_ contains all the required results
assert_array_equal(sorted(cv_results.keys()),
sorted(param_keys + score_keys + ('params',)))
assert_true(all(cv_results[key].shape == (n_cand,)
for key in param_keys + score_keys))
def check_cv_results_grid_scores_consistency(search):
# TODO Remove in 0.20
cv_results = search.cv_results_
res_scores = np.vstack(list([cv_results["split%d_test_score" % i]
for i in range(search.n_splits_)])).T
res_means = cv_results["mean_test_score"]
res_params = cv_results["params"]
n_cand = len(res_params)
grid_scores = assert_warns(DeprecationWarning, getattr,
search, 'grid_scores_')
assert_equal(len(grid_scores), n_cand)
# Check consistency of the structure of grid_scores
for i in range(n_cand):
assert_equal(grid_scores[i].parameters, res_params[i])
assert_array_equal(grid_scores[i].cv_validation_scores,
res_scores[i, :])
assert_array_equal(grid_scores[i].mean_validation_score, res_means[i])
def test_grid_search_cv_results():
X, y = make_classification(n_samples=50, n_features=4,
random_state=42)
n_splits = 3
n_grid_points = 6
params = [dict(kernel=['rbf', ], C=[1, 10], gamma=[0.1, 1]),
dict(kernel=['poly', ], degree=[1, 2])]
grid_search = GridSearchCV(SVC(), cv=n_splits, iid=False,
param_grid=params)
grid_search.fit(X, y)
grid_search_iid = GridSearchCV(SVC(), cv=n_splits, iid=True,
param_grid=params)
grid_search_iid.fit(X, y)
param_keys = ('param_C', 'param_degree', 'param_gamma', 'param_kernel')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_candidates = n_grid_points
for search, iid in zip((grid_search, grid_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check if score and timing are reasonable
assert_true(all(cv_results['rank_test_score'] >= 1))
assert_true(all(cv_results[k] >= 0) for k in score_keys
if k is not 'rank_test_score')
assert_true(all(cv_results[k] <= 1) for k in score_keys
if 'time' not in k and
k is not 'rank_test_score')
# Check cv_results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_candidates)
# Check masking
cv_results = grid_search.cv_results_
n_candidates = len(grid_search.cv_results_['params'])
assert_true(all((cv_results['param_C'].mask[i] and
cv_results['param_gamma'].mask[i] and
not cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'linear'))
assert_true(all((not cv_results['param_C'].mask[i] and
not cv_results['param_gamma'].mask[i] and
cv_results['param_degree'].mask[i])
for i in range(n_candidates)
if cv_results['param_kernel'][i] == 'rbf'))
check_cv_results_grid_scores_consistency(search)
def test_random_search_cv_results():
# Make a dataset with a lot of noise to get various kind of prediction
# errors across CV folds and parameter settings
X, y = make_classification(n_samples=200, n_features=100, n_informative=3,
random_state=0)
# scipy.stats dists now supports `seed` but we still support scipy 0.12
# which doesn't support the seed. Hence the assertions in the test for
# random_search alone should not depend on randomization.
n_splits = 3
n_search_iter = 30
params = dict(C=expon(scale=10), gamma=expon(scale=0.1))
random_search = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=False,
param_distributions=params)
random_search.fit(X, y)
random_search_iid = RandomizedSearchCV(SVC(), n_iter=n_search_iter,
cv=n_splits, iid=True,
param_distributions=params)
random_search_iid.fit(X, y)
param_keys = ('param_C', 'param_gamma')
score_keys = ('mean_test_score', 'mean_train_score',
'rank_test_score',
'split0_test_score', 'split1_test_score',
'split2_test_score',
'split0_train_score', 'split1_train_score',
'split2_train_score',
'std_test_score', 'std_train_score',
'mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time')
n_cand = n_search_iter
for search, iid in zip((random_search, random_search_iid), (False, True)):
assert_equal(iid, search.iid)
cv_results = search.cv_results_
# Check results structure
check_cv_results_array_types(cv_results, param_keys, score_keys)
check_cv_results_keys(cv_results, param_keys, score_keys, n_cand)
# For random_search, all the param array vals should be unmasked
assert_false(any(cv_results['param_C'].mask) or
any(cv_results['param_gamma'].mask))
check_cv_results_grid_scores_consistency(search)
def test_search_iid_param():
# Test the IID parameter
# noise-free simple 2d-data
X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0,
cluster_std=0.1, shuffle=False, n_samples=80)
# split dataset into two folds that are not iid
# first one contains data of all 4 blobs, second only from two.
mask = np.ones(X.shape[0], dtype=np.bool)
mask[np.where(y == 1)[0][::2]] = 0
mask[np.where(y == 2)[0][::2]] = 0
# this leads to perfect classification on one fold and a score of 1/3 on
# the other
# create "cv" for splits
cv = [[mask, ~mask], [~mask, mask]]
# once with iid=True (default)
grid_search = GridSearchCV(SVC(), param_grid={'C': [1, 10]}, cv=cv)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv)
for search in (grid_search, random_search):
search.fit(X, y)
assert_true(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s_i][0]
for s_i in range(search.n_splits_)))
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s_i][0]
for s_i in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
# Test the first candidate
assert_equal(search.cv_results_['param_C'][0], 1)
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
assert_array_almost_equal(train_cv_scores, [1, 1])
# for first split, 1/4 of dataset is in test, for second 3/4.
# take weighted average and weighted std
expected_test_mean = 1 * 1. / 4. + 1. / 3. * 3. / 4.
expected_test_std = np.sqrt(1. / 4 * (expected_test_mean - 1) ** 2 +
3. / 4 * (expected_test_mean - 1. / 3.) **
2)
assert_almost_equal(test_mean, expected_test_mean)
assert_almost_equal(test_std, expected_test_std)
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
# once with iid=False
grid_search = GridSearchCV(SVC(),
param_grid={'C': [1, 10]},
cv=cv, iid=False)
random_search = RandomizedSearchCV(SVC(), n_iter=2,
param_distributions={'C': [1, 10]},
cv=cv, iid=False)
for search in (grid_search, random_search):
search.fit(X, y)
assert_false(search.iid)
test_cv_scores = np.array(list(search.cv_results_['split%d_test_score'
% s][0]
for s in range(search.n_splits_)))
test_mean = search.cv_results_['mean_test_score'][0]
test_std = search.cv_results_['std_test_score'][0]
train_cv_scores = np.array(list(search.cv_results_['split%d_train_'
'score' % s][0]
for s in range(search.n_splits_)))
train_mean = search.cv_results_['mean_train_score'][0]
train_std = search.cv_results_['std_train_score'][0]
assert_equal(search.cv_results_['param_C'][0], 1)
# scores are the same as above
assert_array_almost_equal(test_cv_scores, [1, 1. / 3.])
# Unweighted mean/std is used
assert_almost_equal(test_mean, np.mean(test_cv_scores))
assert_almost_equal(test_std, np.std(test_cv_scores))
# For the train scores, we do not take a weighted mean irrespective of
# i.i.d. or not
assert_almost_equal(train_mean, 1)
assert_almost_equal(train_std, 0)
def test_search_cv_results_rank_tie_breaking():
X, y = make_blobs(n_samples=50, random_state=42)
# The two C values are close enough to give similar models
# which would result in a tie of their mean cv-scores
param_grid = {'C': [1, 1.001, 0.001]}
grid_search = GridSearchCV(SVC(), param_grid=param_grid)
random_search = RandomizedSearchCV(SVC(), n_iter=3,
param_distributions=param_grid)
for search in (grid_search, random_search):
search.fit(X, y)
cv_results = search.cv_results_
# Check tie breaking strategy -
# Check that there is a tie in the mean scores between
# candidates 1 and 2 alone
assert_almost_equal(cv_results['mean_test_score'][0],
cv_results['mean_test_score'][1])
assert_almost_equal(cv_results['mean_train_score'][0],
cv_results['mean_train_score'][1])
try:
assert_almost_equal(cv_results['mean_test_score'][1],
cv_results['mean_test_score'][2])
except AssertionError:
pass
try:
assert_almost_equal(cv_results['mean_train_score'][1],
cv_results['mean_train_score'][2])
except AssertionError:
pass
# 'min' rank should be assigned to the tied candidates
assert_almost_equal(search.cv_results_['rank_test_score'], [1, 1, 3])
def test_search_cv_results_none_param():
X, y = [[1], [2], [3], [4], [5]], [0, 0, 0, 0, 1]
estimators = (DecisionTreeRegressor(), DecisionTreeClassifier())
est_parameters = {"random_state": [0, None]}
cv = KFold(random_state=0)
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv).fit(X, y)
assert_array_equal(grid_search.cv_results_['param_random_state'],
[0, None])
@ignore_warnings()
def test_search_cv_timing():
svc = LinearSVC(random_state=0)
X = [[1, ], [2, ], [3, ], [4, ]]
y = [0, 1, 1, 0]
gs = GridSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0)
rs = RandomizedSearchCV(svc, {'C': [0, 1]}, cv=2, error_score=0, n_iter=2)
for search in (gs, rs):
search.fit(X, y)
for key in ['mean_fit_time', 'std_fit_time']:
# NOTE The precision of time.time in windows is not high
# enough for the fit/score times to be non-zero for trivial X and y
assert_true(np.all(search.cv_results_[key] >= 0))
assert_true(np.all(search.cv_results_[key] < 1))
for key in ['mean_score_time', 'std_score_time']:
assert_true(search.cv_results_[key][1] >= 0)
assert_true(search.cv_results_[key][0] == 0.0)
assert_true(np.all(search.cv_results_[key] < 1))
def test_grid_search_correct_score_results():
# test that correct scores are used
n_splits = 3
clf = LinearSVC(random_state=0)
X, y = make_blobs(random_state=0, centers=2)
Cs = [.1, 1, 10]
for score in ['f1', 'roc_auc']:
grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score, cv=n_splits)
cv_results = grid_search.fit(X, y).cv_results_
# Test scorer names
result_keys = list(cv_results.keys())
expected_keys = (("mean_test_score", "rank_test_score") +
tuple("split%d_test_score" % cv_i
for cv_i in range(n_splits)))
assert_true(all(np.in1d(expected_keys, result_keys)))
cv = StratifiedKFold(n_splits=n_splits)
n_splits = grid_search.n_splits_
for candidate_i, C in enumerate(Cs):
clf.set_params(C=C)
cv_scores = np.array(
list(grid_search.cv_results_['split%d_test_score'
% s][candidate_i]
for s in range(n_splits)))
for i, (train, test) in enumerate(cv.split(X, y)):
clf.fit(X[train], y[train])
if score == "f1":
correct_score = f1_score(y[test], clf.predict(X[test]))
elif score == "roc_auc":
dec = clf.decision_function(X[test])
correct_score = roc_auc_score(y[test], dec)
assert_almost_equal(correct_score, cv_scores[i])
def test_pickle():
# Test that a fit search can be pickled
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True)
grid_search.fit(X, y)
grid_search_pickled = pickle.loads(pickle.dumps(grid_search))
assert_array_almost_equal(grid_search.predict(X),
grid_search_pickled.predict(X))
random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]},
refit=True, n_iter=3)
random_search.fit(X, y)
random_search_pickled = pickle.loads(pickle.dumps(random_search))
assert_array_almost_equal(random_search.predict(X),
random_search_pickled.predict(X))
def test_grid_search_with_multioutput_data():
# Test search with multi-output estimator
X, y = make_multilabel_classification(return_indicator=True,
random_state=0)
est_parameters = {"max_depth": [1, 2, 3, 4]}
cv = KFold(random_state=0)
estimators = [DecisionTreeRegressor(random_state=0),
DecisionTreeClassifier(random_state=0)]
# Test with grid search cv
for est in estimators:
grid_search = GridSearchCV(est, est_parameters, cv=cv)
grid_search.fit(X, y)
res_params = grid_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
grid_search.cv_results_['split%d_test_score' % i][cand_i])
# Test with a randomized search
for est in estimators:
random_search = RandomizedSearchCV(est, est_parameters,
cv=cv, n_iter=3)
random_search.fit(X, y)
res_params = random_search.cv_results_['params']
for cand_i in range(len(res_params)):
est.set_params(**res_params[cand_i])
for i, (train, test) in enumerate(cv.split(X, y)):
est.fit(X[train], y[train])
correct_score = est.score(X[test], y[test])
assert_almost_equal(
correct_score,
random_search.cv_results_['split%d_test_score'
% i][cand_i])
def test_predict_proba_disabled():
# Test predict_proba when disabled on estimator.
X = np.arange(20).reshape(5, -1)
y = [0, 0, 1, 1, 1]
clf = SVC(probability=False)
gs = GridSearchCV(clf, {}, cv=2).fit(X, y)
assert_false(hasattr(gs, "predict_proba"))
def test_grid_search_allows_nans():
# Test GridSearchCV with Imputer
X = np.arange(20, dtype=np.float64).reshape(5, -1)
X[2, :] = np.nan
y = [0, 0, 1, 1, 1]
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y)
class FailingClassifier(BaseEstimator):
"""Classifier that raises a ValueError on fit()"""
FAILING_PARAMETER = 2
def __init__(self, parameter=None):
self.parameter = parameter
def fit(self, X, y=None):
if self.parameter == FailingClassifier.FAILING_PARAMETER:
raise ValueError("Failing classifier failed as required")
def predict(self, X):
return np.zeros(X.shape[0])
def test_grid_search_failing_classifier():
# GridSearchCV with on_error != 'raise'
# Ensures that a warning is raised and score reset where appropriate.
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we only want to check that errors caused by fits
# to individual folds will be caught and warnings raised instead. If
# refit was done, then an exception would be raised on refit and not
# caught by grid_search (expected behavior), and this would cause an
# error in this test.
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=0.0)
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
# Ensure that grid scores were set to zero as required for those fits
# that are expected to fail.
def get_cand_scores(i):
return np.array(list(gs.cv_results_['split%d_test_score' % s][i]
for s in range(gs.n_splits_)))
assert all((np.all(get_cand_scores(cand_i) == 0.0)
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER))
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score=float('nan'))
assert_warns(FitFailedWarning, gs.fit, X, y)
n_candidates = len(gs.cv_results_['params'])
assert all(np.all(np.isnan(get_cand_scores(cand_i)))
for cand_i in range(n_candidates)
if gs.cv_results_['param_parameter'][cand_i] ==
FailingClassifier.FAILING_PARAMETER)
def test_grid_search_failing_classifier_raise():
# GridSearchCV with on_error == 'raise' raises the error
X, y = make_classification(n_samples=20, n_features=10, random_state=0)
clf = FailingClassifier()
# refit=False because we want to test the behaviour of the grid search part
gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy',
refit=False, error_score='raise')
# FailingClassifier issues a ValueError so this is what we look for.
assert_raises(ValueError, gs.fit, X, y)
def test_parameters_sampler_replacement():
# raise error if n_iter too large
params = {'first': [0, 1], 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params, n_iter=7)
assert_raises(ValueError, list, sampler)
# degenerates to GridSearchCV if n_iter the same as grid_size
sampler = ParameterSampler(params, n_iter=6)
samples = list(sampler)
assert_equal(len(samples), 6)
for values in ParameterGrid(params):
assert_true(values in samples)
# test sampling without replacement in a large grid
params = {'a': range(10), 'b': range(10), 'c': range(10)}
sampler = ParameterSampler(params, n_iter=99, random_state=42)
samples = list(sampler)
assert_equal(len(samples), 99)
hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c'])
for p in samples]
assert_equal(len(set(hashable_samples)), 99)
# doesn't go into infinite loops
params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']}
sampler = ParameterSampler(params_distribution, n_iter=7)
samples = list(sampler)
assert_equal(len(samples), 7)
def test_stochastic_gradient_loss_param():
# Make sure the predict_proba works when loss is specified
# as one of the parameters in the param_grid.
param_grid = {
'loss': ['log'],
}
X = np.arange(24).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
# When the estimator is not fitted, `predict_proba` is not available as the
# loss is 'hinge'.
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
clf.predict_proba(X)
clf.predict_log_proba(X)
# Make sure `predict_proba` is not available when setting loss=['hinge']
# in param_grid
param_grid = {
'loss': ['hinge'],
}
clf = GridSearchCV(estimator=SGDClassifier(loss='hinge'),
param_grid=param_grid)
assert_false(hasattr(clf, "predict_proba"))
clf.fit(X, y)
assert_false(hasattr(clf, "predict_proba"))
def test_search_train_scores_set_to_false():
X = np.arange(6).reshape(6, -1)
y = [0, 0, 0, 1, 1, 1]
clf = LinearSVC(random_state=0)
gs = GridSearchCV(clf, param_grid={'C': [0.1, 0.2]},
return_train_score=False)
gs.fit(X, y)
def test_grid_search_cv_splits_consistency():
# Check if a one time iterable is accepted as a cv parameter.
n_samples = 100
n_splits = 5
X, y = make_classification(n_samples=n_samples, random_state=0)
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=OneTimeSplitter(n_splits=n_splits,
n_samples=n_samples))
gs.fit(X, y)
gs2 = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.2, 0.3]},
cv=KFold(n_splits=n_splits))
gs2.fit(X, y)
def _pop_time_keys(cv_results):
for key in ('mean_fit_time', 'std_fit_time',
'mean_score_time', 'std_score_time'):
cv_results.pop(key)
return cv_results
# OneTimeSplitter is a non-re-entrant cv where split can be called only
# once if ``cv.split`` is called once per param setting in GridSearchCV.fit
# the 2nd and 3rd parameter will not be evaluated as no train/test indices
# will be generated for the 2nd and subsequent cv.split calls.
# This is a check to make sure cv.split is not called once per param
# setting.
np.testing.assert_equal(_pop_time_keys(gs.cv_results_),
_pop_time_keys(gs2.cv_results_))
# Check consistency of folds across the parameters
gs = GridSearchCV(LinearSVC(random_state=0),
param_grid={'C': [0.1, 0.1, 0.2, 0.2]},
cv=KFold(n_splits=n_splits, shuffle=True))
gs.fit(X, y)
# As the first two param settings (C=0.1) and the next two param
# settings (C=0.2) are same, the test and train scores must also be
# same as long as the same train/test indices are generated for all
# the cv splits, for both param setting
for score_type in ('train', 'test'):
per_param_scores = {}
for param_i in range(4):
per_param_scores[param_i] = list(
gs.cv_results_['split%d_%s_score' % (s, score_type)][param_i]
for s in range(5))
assert_array_almost_equal(per_param_scores[0],
per_param_scores[1])
assert_array_almost_equal(per_param_scores[2],
per_param_scores[3])
def test_transform_inverse_transform_round_trip():
clf = MockClassifier()
grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3)
grid_search.fit(X, y)
X_round_trip = grid_search.inverse_transform(grid_search.transform(X))
assert_array_equal(X, X_round_trip)
| bsd-3-clause |
phiedulxp/tweet_search | ie/current_event_ie.py | 1 | 3580 | from pycorenlp import StanfordCoreNLP
nlp = StanfordCoreNLP('http://localhost:9000')
from datetime import datetime,timedelta
from collections import defaultdict,Counter
from pprint import pprint
from tqdm import tqdm
import re
import pymongo
from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne
from pymongo.errors import BulkWriteError
client = pymongo.MongoClient('localhost:27017')
db = client.tweet
import pandas as pd
def get_ner_dict(word,ner):
ner_tuple = zip(word,ner)
splits = [0]
for index,nt in enumerate(ner_tuple):
if index == 0:
temp = nt[1]
continue
if temp != nt[1]:
splits.append(index)
temp = nt[1]
continue
else:
temp = nt[1]
continue
ner_dict = defaultdict(list)
for index,s in enumerate(splits):
nt = ner_tuple[splits[index-1]:s]
if len(nt) == 0:
continue
if nt[0][1] != u'O':
ner_dict[nt[0][1]].append(' '.join([i[0] for i in nt]))
ner_dict = dict(ner_dict)
for k,v in ner_dict.iteritems():
ner_dict[k] = list(set(v))
return ner_dict
def get_most_common(items):
count = Counter(items)
return count.most_common()[0][0]
def best_openie(openies):
if len(openies) == 0:
return {}
openie_df = pd.DataFrame.from_dict(openies)
object_lsit = openie_df['object'].tolist()
object_,lenth = object_lsit[0],len(object_lsit[0])
for i in object_lsit[1:]:
if len(i) >= lenth: # get the longest object
object_,lenth = i,len(i)
return {'subject':get_most_common(openie_df.subject),
'relation':get_most_common(openie_df.relation),
'object':object_}
def ie_pipeline(text):
output = nlp.annotate(text, properties={
'annotators': 'truecase,ner,openie',#sentiment
'outputFormat': 'json',
})
sentences = output['sentences']
if len(sentences) == 0:
return {
#'word':{},
#'truecaseText':{},
#'lemma':{},
#'pos':{},
#'ner':{},
'ner_list':{},
'openie':{},
#'sentiment':{},
}
words = []
#truecaseTexts = []
#lemmas = []
#poss = []
ners = []
openies = []
#sentiments = []
for sentence in sentences:
tokens_df = pd.DataFrame.from_dict(sentence['tokens'])
words.extend(tokens_df['word'].tolist())
#truecaseTexts.extend(tokens_df['truecaseText'].tolist())
#lemmas.extend(tokens_df['lemma'].tolist())
#poss.extend(tokens_df['pos'].tolist())
ners.extend(tokens_df['ner'].tolist())
for i in sentence['openie']:
temp = i.pop('objectSpan')
temp = i.pop('relationSpan')
temp = i.pop('subjectSpan')
openies.extend(sentence['openie'])
#sentiments.append((sentence['sentiment'],sentence['sentimentValue']))
ner_dict = get_ner_dict(words,ners)
return {
#'word':words,
#'truecaseText':truecaseTexts,
#'lemma':lemmas,
#'pos':poss,
#'ner':ners,
'ner_dict':ner_dict,
'openie':best_openie(openies),
#'sentiment':sentiments,
}
def batch_ie(texts):
ies = []
for text in tqdm(texts):
ies.append(ie_pipeline(re.sub('http.+','',text.encode('utf-8'))))
return ies
def clear_description(des):
return re.sub('\([\w ]+\)','',des).strip()
if __name__ == '__main__':
query = db.current_event.find({'ie':None},{'_id':1,'event.title':1,'event.description':1})
ids = []
texts = []
for i in query:
ids.append(i['_id'])
texts.append(i['event']['title']+'. '+clear_description(i['event']['description']))
ies = batch_ie(texts)
requests = [UpdateOne({'_id': _id,'ie':None}, {'$set': {'ie':ies[index]}}) for index,_id in tqdm(enumerate(ids))]
try:
result = db.current_event.bulk_write(requests)
pprint(result.bulk_api_result)
except BulkWriteError as bwe:
pprint(bwe.details)
client.close() | mit |
lucashtnguyen/wqio | wqio/tests/core_tests/features_tests.py | 1 | 49106 | import os
from nose.tools import *
import numpy as np
import numpy.testing as nptest
from wqio import testing
from wqio.testing.testutils import setup_prefix
usetex = testing.compare_versions(utility='latex')
import matplotlib
matplotlib.rcParams['text.usetex'] = usetex
import matplotlib.pyplot as plt
import seaborn.apionly as seaborn
import pandas
import pandas.util.testing as pdtest
from matplotlib.testing.decorators import image_comparison, cleanup
from wqio.core.features import (
Parameter,
DrainageArea,
Location,
Dataset,
DataCollection
)
from wqio import utils
import warnings
@nottest
def testfilter(data):
return data[data['res'] > 5], False
class _base_Parameter_Mixin(object):
def setup(self):
self.known_name = 'Copper'
self.known_units = 'ug/L'
self.known_pu_nocomma = 'Copper (ug/L)'
self.known_pu_comma = 'Copper, ug/L'
self.param = Parameter(name=self.known_name, units=self.known_units)
def teardown(self):
pass
def test_name(self):
assert_equal(self.known_name, self.param.name)
def test_units(self):
assert_equal(self.known_units, self.param.units)
def test_paramunit_nocomma(self):
assert_equal(self.known_pu_nocomma, self.param.paramunit(usecomma=False))
def test_paramunit_comma(self):
assert_equal(self.known_pu_comma, self.param.paramunit(usecomma=True))
def teardown(self):
plt.close('all')
class test_Parameter_simple(_base_Parameter_Mixin):
def setup(self):
self.known_name = 'Copper'
self.known_units = 'ug/L'
self.known_pu_nocomma = 'Copper (ug/L)'
self.known_pu_comma = 'Copper, ug/L'
self.param = Parameter(name=self.known_name, units=self.known_units)
class test_Parameter_complex(_base_Parameter_Mixin):
#place holder for when TeX/validation/etc gets integrated
def setup(self):
self.known_name = 'Nitrate & Nitrite'
self.known_units = 'mg/L'
self.known_pu_nocomma = 'Nitrate & Nitrite (mg/L)'
self.known_pu_comma = 'Nitrate & Nitrite, mg/L'
self.param = Parameter(name=self.known_name, units=self.known_units)
class test_DrainageArea(object):
def setup(self):
self.total_area = 100.0
self.imp_area = 75.0
self.bmp_area = 10.0
self.da = DrainageArea(self.total_area, self.imp_area, self.bmp_area)
self.volume_conversion = 5
self.known_storm_runoff = 82.5
self.known_annual_runoff = 75.25
self.storm_depth = 1.0
self.storm_volume = self.storm_depth * (self.total_area + self.bmp_area)
self.annualFactor = 0.9
def test_total_area(self):
assert_true(hasattr(self.da, 'total_area'))
assert_equal(self.total_area, self.da.total_area)
def test_imp_area(self):
assert_true(hasattr(self.da, 'imp_area'))
assert_equal(self.imp_area, self.da.imp_area)
def test_bmp_area(self):
assert_true(hasattr(self.da, 'bmp_area'))
assert_equal(self.bmp_area, self.da.bmp_area)
def test_simple_method_noConversion(self):
assert_true(hasattr(self.da, 'simple_method'))
runoff = self.da.simple_method(1)
nptest.assert_almost_equal(self.known_storm_runoff, runoff, decimal=3)
assert_greater(self.storm_volume, runoff)
def test_simple_method_Conversion(self):
assert_true(hasattr(self.da, 'simple_method'))
runoff = self.da.simple_method(1, volume_conversion=self.volume_conversion)
nptest.assert_almost_equal(self.known_storm_runoff * self.volume_conversion, runoff, decimal=3)
assert_greater(self.storm_volume * self.volume_conversion, runoff)
def test_simple_method_annualFactor(self):
assert_true(hasattr(self.da, 'simple_method'))
runoff = self.da.simple_method(1, annualFactor=self.annualFactor)
nptest.assert_almost_equal(self.known_annual_runoff, runoff, decimal=3)
assert_greater(self.storm_volume, runoff)
def teardown(self):
plt.close('all')
class _base_LocationMixin(object):
@nottest
def makePath(self, filename):
return os.path.join(self.prefix, filename)
@nottest
def main_setup(self):
# basic test data
self.tolerance = 0.05
self.known_bsIter = 1500
self.data = testing.getTestROSData()
# Location stuff
self.known_station_name = 'Influent'
self.known_station_type = 'inflow'
self.known_plot_marker = 'o'
self.known_scatter_marker = 'v'
self.known_color = np.array((0.32157, 0.45271, 0.66667))
self.known_rescol = 'res'
self.known_qualcol = 'qual'
self.known_all_positive = True
self.known_include = True
self.known_exclude = False
self.known_hasData = True
self.known_min_detect = 2.00
self.known_min_DL = 5.00
self.known_filtered_include = False
def teardown(self):
plt.close('all')
def test_bsIter(self):
assert_true(hasattr(self.loc, 'bsIter'))
assert_equal(self.loc.bsIter, self.known_bsIter)
def test_useROS(self):
assert_true(hasattr(self.loc, 'useROS'))
assert_equal(self.loc.useROS, self.known_useRos)
def test_name(self):
assert_true(hasattr(self.loc, 'name'))
assert_equal(self.loc.name, self.known_station_name)
def test_N(self):
assert_true(hasattr(self.loc, 'N'))
assert_equal(self.loc.N, self.known_N)
def test_hasData(self):
assert_true(hasattr(self.loc, 'hasData'))
assert_equal(self.loc.hasData, self.known_hasData)
def test_ND(self):
assert_true(hasattr(self.loc, 'ND'))
assert_equal(self.loc.ND, self.known_ND)
def test_fractionND(self):
assert_true(hasattr(self.loc, 'fractionND'))
assert_equal(self.loc.fractionND, self.known_fractionND)
def test_NUnique(self):
assert_true(hasattr(self.loc, 'NUnique'))
assert_equal(self.loc.NUnique, self.known_NUnique)
def test_analysis_space(self):
assert_true(hasattr(self.loc, 'analysis_space'))
assert_equal(self.loc.analysis_space, self.known_analysis_space)
def test_cov(self):
assert_true(hasattr(self.loc, 'cov'))
nptest.assert_allclose(self.loc.cov, self.known_cov, rtol=self.tolerance)
def test_geomean(self):
assert_true(hasattr(self.loc, 'geomean'))
nptest.assert_allclose(self.loc.geomean, self.known_geomean, rtol=self.tolerance)
def test_geomean_conf_interval(self):
assert_true(hasattr(self.loc, 'geomean_conf_interval'))
nptest.assert_allclose(
self.loc.geomean_conf_interval,
self.known_geomean_conf_interval,
rtol=self.tolerance
)
def test_geostd(self):
assert_true(hasattr(self.loc, 'geostd'))
nptest.assert_allclose(
self.loc.geostd,
self.known_geostd,
rtol=self.tolerance
)
def test_logmean(self):
assert_true(hasattr(self.loc, 'logmean'))
nptest.assert_allclose(
self.loc.logmean,
self.known_logmean,
rtol=self.tolerance
)
def test_logmean_conf_interval(self):
assert_true(hasattr(self.loc, 'logmean_conf_interval'))
nptest.assert_allclose(self.loc.logmean_conf_interval, self.known_logmean_conf_interval, rtol=self.tolerance)
def test_logstd(self):
assert_true(hasattr(self.loc, 'logstd'))
nptest.assert_allclose(self.loc.logstd, self.known_logstd, rtol=self.tolerance)
def test_max(self):
assert_true(hasattr(self.loc, 'max'))
assert_equal(self.loc.max, self.known_max)
def test_mean(self):
assert_true(hasattr(self.loc, 'mean'))
nptest.assert_allclose(self.loc.mean, self.known_mean, rtol=self.tolerance)
def test_mean_conf_interval(self):
assert_true(hasattr(self.loc, 'mean_conf_interval'))
nptest.assert_allclose(self.loc.mean_conf_interval, self.known_mean_conf_interval, rtol=self.tolerance)
def test_median(self):
assert_true(hasattr(self.loc, 'median'))
nptest.assert_allclose(self.loc.median, self.known_median, rtol=self.tolerance)
def test_median_conf_interval(self):
assert_true(hasattr(self.loc, 'median_conf_interval'))
nptest.assert_allclose(self.loc.median_conf_interval, self.known_median_conf_interval, rtol=self.tolerance*1.5)
def test_min(self):
assert_true(hasattr(self.loc, 'min'))
assert_equal(self.loc.min, self.known_min)
def test_min_detect(self):
assert_true(hasattr(self.loc, 'min_detect'))
assert_equal(self.loc.min_detect, self.known_min_detect)
def test_min_DL(self):
assert_true(hasattr(self.loc, 'min_DL'))
assert_equal(self.loc.min_DL, self.known_min_DL)
def test_pctl10(self):
assert_true(hasattr(self.loc, 'pctl10'))
nptest.assert_allclose(self.loc.pctl10, self.known_pctl10, rtol=self.tolerance)
def test_pctl25(self):
assert_true(hasattr(self.loc, 'pctl25'))
nptest.assert_allclose(self.loc.pctl25, self.known_pctl25, rtol=self.tolerance)
def test_pctl75(self):
assert_true(hasattr(self.loc, 'pctl75'))
nptest.assert_allclose(self.loc.pctl75, self.known_pctl75, rtol=self.tolerance)
def test_pctl90(self):
assert_true(hasattr(self.loc, 'pctl90'))
nptest.assert_allclose(self.loc.pctl90, self.known_pctl90, rtol=self.tolerance)
def test_pnorm(self):
assert_true(hasattr(self.loc, 'pnorm'))
nptest.assert_allclose(self.loc.pnorm, self.known_pnorm, rtol=self.tolerance)
def test_plognorm(self):
assert_true(hasattr(self.loc, 'plognorm'))
nptest.assert_allclose(self.loc.plognorm, self.known_plognorm, rtol=self.tolerance)
def test_shapiro(self):
assert_true(hasattr(self.loc, 'shapiro'))
nptest.assert_allclose(self.loc.shapiro, self.known_shapiro, rtol=self.tolerance)
def test_shapiro_log(self):
assert_true(hasattr(self.loc, 'shapiro_log'))
nptest.assert_allclose(self.loc.shapiro_log, self.known_shapiro_log, rtol=self.tolerance)
def test_lilliefors(self):
assert_true(hasattr(self.loc, 'lilliefors'))
nptest.assert_allclose(self.loc.lilliefors, self.known_lilliefors, rtol=self.tolerance)
def test_lilliefors_log(self):
assert_true(hasattr(self.loc, 'lilliefors_log'))
nptest.assert_allclose(self.loc.lilliefors_log, self.known_lilliefors_log, rtol=self.tolerance)
def test_anderson(self):
assert_true(hasattr(self.loc, 'anderson'))
nptest.assert_almost_equal(self.loc.anderson[0], self.known_anderson[0], decimal=5)
nptest.assert_allclose(self.loc.anderson[1], self.known_anderson[1], rtol=self.tolerance)
nptest.assert_allclose(self.loc.anderson[2], self.known_anderson[2], rtol=self.tolerance)
def test_anderson_log(self):
assert_true(hasattr(self.loc, 'anderson'))
nptest.assert_almost_equal(self.loc.anderson_log[0], self.known_anderson_log[0], decimal=5)
nptest.assert_allclose(self.loc.anderson_log[1], self.known_anderson_log[1], rtol=self.tolerance)
nptest.assert_allclose(self.loc.anderson_log[2], self.known_anderson_log[2], rtol=self.tolerance)
def test_skew(self):
assert_true(hasattr(self.loc, 'skew'))
nptest.assert_allclose(self.loc.skew, self.known_skew, rtol=self.tolerance)
def test_std(self):
assert_true(hasattr(self.loc, 'std'))
nptest.assert_allclose(self.loc.std, self.known_std, rtol=self.tolerance)
def test_station_name(self):
assert_true(hasattr(self.loc, 'station_name'))
assert_equal(self.loc.station_name, self.known_station_name)
def test_station_type(self):
assert_true(hasattr(self.loc, 'station_type'))
assert_equal(self.loc.station_type, self.known_station_type)
def test_symbology_plot_marker(self):
assert_true(hasattr(self.loc, 'plot_marker'))
assert_equal(self.loc.plot_marker, self.known_plot_marker)
def test_symbology_scatter_marker(self):
assert_true(hasattr(self.loc, 'scatter_marker'))
assert_equal(self.loc.scatter_marker, self.known_scatter_marker)
def test_symbology_color(self):
assert_true(hasattr(self.loc, 'color'))
nptest.assert_almost_equal(
np.array(self.loc.color),
self.known_color,
decimal=4
)
def test_data(self):
assert_true(hasattr(self.loc, 'data'))
assert_true(isinstance(self.loc.data, np.ndarray))
def test_full_data(self):
assert_true(hasattr(self.loc, 'full_data'))
assert_true(isinstance(self.loc.full_data, pandas.DataFrame))
def test_full_data_columns(self):
assert_true('res' in self.data.columns)
assert_true('qual' in self.data.columns)
def test_all_positive(self):
assert_true(hasattr(self.loc, 'all_positive'))
assert_equal(self.loc.all_positive, self.known_all_positive)
def test_include(self):
assert_true(hasattr(self.loc, 'include'))
assert_equal(self.loc.include, self.known_include)
def test_exclude(self):
assert_true(hasattr(self.loc, 'exclude'))
assert_equal(self.loc.exclude, self.known_exclude)
def test_include_setter(self):
self.loc.include = not self.known_include
assert_equal(self.loc.include, not self.known_include)
assert_equal(self.loc.exclude, not self.known_exclude)
def test_applyFilter_exists(self):
assert_true(hasattr(self.loc, 'applyFilter'))
def test_applyFilter_works(self):
self.loc.applyFilter(testfilter)
assert_tuple_equal(self.loc.filtered_data.shape, self.known_filtered_shape)
assert_tuple_equal(self.loc.full_data.shape, self.known_filtered_shape)
assert_tuple_equal(self.loc.data.shape, self.known_filtered_data_shape)
assert_equal(self.loc.include, self.known_filtered_include)
@raises(ValueError)
def test_applyFilter_badDataOutput(self):
def badTestFilter(data):
return 4, False
self.loc.applyFilter(badTestFilter)
@raises(ValueError)
def test_applyFilter_badIncludeOutput(self):
def badTestFilter(data):
df = pandas.Series(np.random.normal(size=37))
return df, 'JUNK'
self.loc.applyFilter(badTestFilter)
class test_Location_ROS(_base_LocationMixin):
def setup(self):
self.main_setup()
self.loc = Location(self.data, station_type='inflow', bsIter=self.known_bsIter,
rescol='res', qualcol='qual', useROS=True)
# known statistics
self.known_N = 35
self.known_ND = 7
self.known_fractionND = 0.2
self.known_NUnique = 31
self.known_analysis_space = 'lognormal'
self.known_cov = 0.597430584141
self.known_geomean = 8.08166947637
self.known_geomean_conf_interval = [6.63043647, 9.79058872]
self.known_geostd = 1.79152565521
self.known_logmean = 2.08959846955
self.known_logmean_conf_interval = [1.89167063, 2.28142159]
self.known_logstd = 0.583067578178
self.known_max = 22.97
self.known_mean = 9.59372041292
self.known_mean_conf_interval = [7.75516047, 11.45197482]
self.known_median = 7.73851689962
self.known_median_conf_interval = [5.66, 8.71]
self.known_min = 2.0
self.known_pctl10 = 4.04355908285
self.known_pctl25 = 5.615
self.known_pctl75 = 11.725
self.known_pctl90 = 19.178
self.known_pnorm = 0.00179254170507
self.known_plognorm = 0.521462738514
self.known_shapiro = [ 0.886889, 0.001789]
self.known_shapiro_log = [ 0.972679, 0.520949]
self.known_lilliefors = [ 0.18518 , 0.003756]
self.known_lilliefors_log = [ 0.091855, 0.635536]
self.known_anderson = (1.543888, [ 0.527, 0.6 , 0.719, 0.839, 0.998], [ 15. , 10. , 5. , 2.5, 1. ])
self.known_anderson_log = (0.30409633964188032, [ 0.527, 0.6 , 0.719, 0.839, 0.998], [ 15. , 10. , 5. , 2.5, 1. ])
self.known_skew = 0.869052892573
self.known_std = 5.52730949374
self.known_useRos = True
self.known_all_positive = True
self.known_filtered_shape = (27,2)
self.known_filtered_data_shape = (27,)
class test_Location_noROS(_base_LocationMixin):
def setup(self):
self.main_setup()
# set useROS to True, then turn it off later to make sure that everything
# propogated correctly
self.loc = Location(self.data, station_type='inflow', bsIter=self.known_bsIter,
rescol='res', qualcol='qual', useROS=True)
# turn ROS off
self.loc.useROS = False
# known statistics
self.known_N = 35
self.known_ND = 7
self.known_fractionND = 0.2
self.known_NUnique = 30
self.known_analysis_space = 'lognormal'
self.known_cov = 0.534715517405
self.known_geomean = 8.82772846655
self.known_geomean_conf_interval = [7.37541563, 10.5446939]
self.known_geostd = 1.68975502971
self.known_logmean = 2.17789772971
self.known_logmean_conf_interval = [1.99815226, 2.35562279]
self.known_logstd = 0.524583565596
self.known_max = 22.97
self.known_mean = 10.1399679143
self.known_mean_conf_interval = [8.3905866, 11.96703843]
self.known_median = 8.671859
self.known_median_conf_interval1 = [6.65, 9.85]
self.known_median_conf_interval2 = [6.65, 10.82]
self.known_min = 2.0
self.known_pctl10 = 5.0
self.known_pctl25 = 5.805
self.known_pctl75 = 11.725
self.known_pctl90 = 19.178
self.known_pnorm = 0.00323620648123
self.known_plognorm = 0.306435495615
self.known_shapiro = [ 0.896744, 0.003236]
self.known_shapiro_log = [ 0.964298, 0.306435]
self.known_lilliefors = [ 0.160353, 0.023078]
self.known_lilliefors_log = [ 0.08148, 0.84545]
self.known_anderson = (1.4392085, [ 0.527, 0.6 , 0.719, 0.839, 0.998], [ 15. , 10. , 5. , 2.5, 1. ])
self.known_anderson_log = (0.3684061, [ 0.527, 0.6 , 0.719, 0.839, 0.998], [ 15. , 10. , 5. , 2.5, 1. ])
self.known_skew = 0.853756570358
self.known_std = 5.24122841148
self.known_useRos = False
self.known_filtered_shape = (30,2)
self.known_filtered_data_shape = (30,)
def test_median_conf_interval(self):
assert_true(hasattr(self.loc, 'median_conf_interval'))
try:
nptest.assert_allclose(self.loc.median_conf_interval,
self.known_median_conf_interval1,
rtol=self.tolerance)
except AssertionError:
nptest.assert_allclose(self.loc.median_conf_interval,
self.known_median_conf_interval2,
rtol=self.tolerance)
@nottest
def setup_location(station_type):
data = testing.getTestROSData()
np.random.seed(0)
loc = Location(data, station_type=station_type, bsIter=10000,
rescol='res', qualcol='qual', useROS=True)
plt.rcdefaults()
return loc
@image_comparison(baseline_images=[
'test_loc_boxplot_default',
'test_loc_boxplot_patch_artists',
'test_loc_boxplot_linscale',
'test_loc_boxplot_no_mean',
'test_loc_boxplot_width',
'test_loc_boxplot_no_notch',
'test_loc_boxplot_bacteria_geomean',
'test_loc_boxplot_with_ylabel',
'test_loc_boxplot_fallback_to_vert_scatter',
'test_loc_boxplot_provided_ax',
'test_loc_boxplot_custom_position'
], extensions=['png'])
def test_location_boxplot():
xlims = {'left': 0, 'right': 2}
loc = setup_location('inflow')
loc.color = 'cornflowerblue'
loc.plot_marker = 'o'
fig1 = loc.boxplot()
fig2 = loc.boxplot(patch_artist=True, xlims=xlims)
fig3 = loc.boxplot(yscale='linear', xlims=xlims)
loc.color = 'firebrick'
loc.plot_marker = 'd'
fig4 = loc.boxplot(showmean=False, xlims=xlims)
fig5 = loc.boxplot(width=1.25, xlims=xlims)
fig6 = loc.boxplot(notch=False, xlims=xlims)
loc.color = 'forestgreen'
loc.plot_marker = 's'
fig7 = loc.boxplot(bacteria=True, xlims=xlims)
fig8 = loc.boxplot(ylabel='Test Ylabel', xlims=xlims)
fig9 = loc.boxplot(minpoints=np.inf, xlims=xlims)
fig10, ax10 = plt.subplots()
fig10 = loc.boxplot(ax=ax10, xlims=xlims)
assert_true(isinstance(fig10, plt.Figure))
assert_raises(ValueError, loc.boxplot, ax='junk')
fig11 = loc.boxplot(pos=1.5, xlims=xlims)
@image_comparison(baseline_images=[
'test_loc_probplot_default',
'test_loc_probplot_provided_ax',
'test_loc_probplot_yscale_linear',
'test_loc_probplot_ppax',
'test_loc_probplot_qqax',
'test_loc_probplot_ylabel',
'test_loc_probplot_clear_yticks',
'test_loc_probplot_no_managegrid',
'test_loc_probplot_no_rotate_xticklabels',
'test_loc_probplot_no_set_xlims',
'test_loc_probplot_plotopts1',
'test_loc_probplot_plotopts2',
], extensions=['png'])
def test_location_probplot():
loc = setup_location('inflow')
loc.color = 'cornflowerblue'
loc.plot_marker = 'o'
fig1 = loc.probplot()
fig2, ax2 = plt.subplots()
fig2 = loc.probplot(ax=ax2)
assert_true(isinstance(fig2, plt.Figure))
assert_raises(ValueError, loc.probplot, ax='junk')
fig3 = loc.probplot(yscale='linear')
fig4 = loc.probplot(axtype='pp')
fig5 = loc.probplot(axtype='qq')
loc.color = 'firebrick'
loc.plot_marker = 'd'
fig6 = loc.probplot(ylabel='test ylabel')
fig7 = loc.probplot(clearYLabels=True)
fig8 = loc.probplot(managegrid=False)
loc.color = 'forestgreen'
loc.plot_marker = 'd'
fig10 = loc.probplot(rotateticklabels=False)
fig11 = loc.probplot(setxlimits=False)
fig12 = loc.probplot(markersize=10, linestyle='--', color='blue', markerfacecolor='none', markeredgecolor='green')
fig13 = loc.probplot(markeredgewidth=2, markerfacecolor='none', markeredgecolor='green')
@image_comparison(baseline_images=[
'test_loc_statplot_custom_position',
'test_loc_statplot_yscale_linear',
'test_loc_statplot_no_notch',
'test_loc_statplot_no_mean',
'test_loc_statplot_custom_width',
'test_loc_statplot_bacteria_true',
'test_loc_statplot_ylabeled',
'test_loc_statplot_qq',
'test_loc_statplot_pp',
'test_loc_statplot_patch_artist',
], extensions=['png'])
def test_location_statplot():
loc = setup_location('inflow')
loc.color = 'cornflowerblue'
loc.plot_marker = 'o'
fig1 = loc.statplot(pos=1.25)
fig2 = loc.statplot(yscale='linear')
fig3 = loc.statplot(notch=False)
loc.color = 'firebrick'
loc.plot_marker = 'd'
fig4 = loc.statplot(showmean=False)
fig5 = loc.statplot(width=1.5)
fig6 = loc.statplot(bacteria=True)
fig7 = loc.statplot(ylabel='Test Y-Label')
loc.color = 'forestgreen'
loc.plot_marker = 's'
fig8 = loc.statplot(axtype='qq')
fig9 = loc.statplot(axtype='pp')
fig10 = loc.statplot(patch_artist=True)
assert_true(fig10, plt.Figure)
@image_comparison(baseline_images=[
'test_loc_vertical_scatter_default',
'test_loc_vertical_scatter_provided_ax',
'test_loc_vertical_scatter_pos',
'test_loc_vertical_scatter_nojitter',
'test_loc_vertical_scatter_alpha',
'test_loc_vertical_scatter_ylabel',
'test_loc_vertical_scatter_yscale_linear',
'test_loc_vertical_scatter_not_ignoreROS',
'test_loc_vertical_scatter_markersize',
], extensions=['png'])
def test_location_verticalScatter():
xlims = {'left': 0, 'right': 2}
loc = setup_location('inflow')
loc.color = 'cornflowerblue'
loc.plot_marker = 'o'
fig1 = loc.verticalScatter()
fig2, ax2 = plt.subplots()
fig2 = loc.verticalScatter(ax=ax2, xlims=xlims)
assert_true(isinstance(fig2, plt.Figure))
assert_raises(ValueError, loc.verticalScatter, ax='junk', xlims=xlims)
fig3 = loc.verticalScatter(pos=1.25, xlims=xlims)
fig4 = loc.verticalScatter(jitter=0.0, xlims=xlims)
fig5 = loc.verticalScatter(alpha=0.25, xlims=xlims)
loc.color = 'firebrick'
loc.plot_marker = 's'
loc.verticalScatter(ylabel='Test Y-Label', xlims=xlims)
loc.verticalScatter(yscale='linear', xlims=xlims)
loc.verticalScatter(ignoreROS=False, xlims=xlims)
loc.verticalScatter(markersize=8, xlims=xlims)
class test_Dataset(object):
def setup(self):
self.maxDiff = None
# basic test data
self.tolerance = 0.05
self.known_bsIter = 750
in_data = testing.getTestROSData()
in_data['res'] += 3
out_data = testing.getTestROSData()
out_data['res'] -= 1.5
self.influent = Location(in_data, station_type='inflow', bsIter=self.known_bsIter,
rescol='res', qualcol='qual', useROS=False)
self.effluent = Location(out_data, station_type='outflow', bsIter=self.known_bsIter,
rescol='res', qualcol='qual', useROS=False)
self.ds = Dataset(self.influent, self.effluent)
self.fig, self.ax = plt.subplots()
self.known_dumpFile = None
self.known_kendall_stats = (1.000000e+00, 2.916727e-17)
self.known_kendall_tau = self.known_kendall_stats[0]
self.known_kendall_p = self.known_kendall_stats[1]
self.known_mannwhitney_stats = (2.980000e+02, 1.125761e-04)
self.known_mannwhitney_u = self.known_mannwhitney_stats[0]
self.known_mannwhitney_p = self.known_mannwhitney_stats[1] * 2
self.known_spearman_stats = (1.0, 0.0)
self.known_spearman_rho = self.known_spearman_stats[0]
self.known_spearman_p = self.known_spearman_stats[1]
self.known_theil_stats = (1.0, -4.5, 1.0, 1.0)
self.known_theil_hislope = self.known_theil_stats[0]
self.known_theil_intercept = self.known_theil_stats[1]
self.known_theil_loslope = self.known_theil_stats[2]
self.known_theil_medslope = self.known_theil_stats[3]
self.known_wilcoxon_stats = (0.0, 2.4690274207037342e-07)
self.known_wilcoxon_z = self.known_wilcoxon_stats[0]
self.known_wilcoxon_p = self.known_wilcoxon_stats[1]
self.known__non_paired_stats = True
self.known__paired_stats = True
self.known_definition = {'attr1': 'test1', 'attr2': 'test2'}
self.known_include = True
self.known_exclude = not self.known_include
self.known_medianCIsOverlap = False
def teardown(self):
self.ax.cla()
self.fig.clf()
plt.close('all')
@nottest
def makePath(self, filename):
return os.path.join(self.prefix, filename)
def test_data(self):
assert_true(hasattr(self.ds, 'data'))
assert_true(isinstance(self.ds.data, pandas.DataFrame))
def test_paired_data(self):
assert_true(hasattr(self.ds, 'paired_data'))
assert_true(isinstance(self.ds.paired_data, pandas.DataFrame))
def test__non_paired_stats(self):
assert_true(hasattr(self.ds, '_non_paired_stats'))
assert_equal(self.ds._non_paired_stats, self.known__non_paired_stats)
def test__paired_stats(self):
assert_true(hasattr(self.ds, '_paired_stats'))
assert_equal(self.ds._paired_stats, self.known__paired_stats)
def test_name(self):
assert_true(hasattr(self.ds, 'name'))
assert_true(self.ds.name is None)
def test_name_set(self):
assert_true(hasattr(self.ds, 'name'))
testname = 'Test Name'
self.ds.name = testname
assert_equal(self.ds.name, testname)
def test_defintion_default(self):
assert_true(hasattr(self.ds, 'definition'))
assert_dict_equal(self.ds.definition, {})
def test_defintion_set(self):
assert_true(hasattr(self.ds, 'definition'))
self.ds.definition = self.known_definition
assert_dict_equal(self.ds.definition, self.known_definition)
def test_include(self):
assert_true(hasattr(self.ds, 'include'))
assert_equal(self.ds.include, self.known_include)
def test_exclude(self):
assert_true(hasattr(self.ds, 'exclude'))
assert_equal(self.ds.exclude, self.known_exclude)
def test_wilcoxon_z(self):
assert_true(hasattr(self.ds, 'wilcoxon_z'))
nptest.assert_allclose(self.ds.wilcoxon_z, self.known_wilcoxon_z, rtol=self.tolerance)
def test_wilcoxon_p(self):
assert_true(hasattr(self.ds, 'wilcoxon_p'))
nptest.assert_allclose(self.ds.wilcoxon_p, self.known_wilcoxon_p, rtol=self.tolerance)
def test_mannwhitney_u(self):
assert_true(hasattr(self.ds, 'mannwhitney_u'))
nptest.assert_allclose(self.ds.mannwhitney_u, self.known_mannwhitney_u, rtol=self.tolerance)
def test_mannwhitney_p(self):
assert_true(hasattr(self.ds, 'mannwhitney_p'))
nptest.assert_allclose(self.ds.mannwhitney_p, self.known_mannwhitney_p, rtol=self.tolerance)
def test_kendall_tau(self):
assert_true(hasattr(self.ds, 'kendall_tau'))
nptest.assert_allclose(self.ds.kendall_tau, self.known_kendall_tau, rtol=self.tolerance)
def test_kendall_p(self):
assert_true(hasattr(self.ds, 'kendall_p'))
nptest.assert_allclose(self.ds.kendall_p, self.known_kendall_p, rtol=self.tolerance)
def test_spearman_rho(self):
assert_true(hasattr(self.ds, 'spearman_rho'))
nptest.assert_allclose(self.ds.spearman_rho, self.known_spearman_rho, rtol=self.tolerance)
def test_spearman_p(self):
assert_true(hasattr(self.ds, 'spearman_p'))
nptest.assert_allclose(self.ds.spearman_p, self.known_spearman_p, rtol=self.tolerance)
def test_theil_medslope(self):
assert_true(hasattr(self.ds, 'theil_medslope'))
nptest.assert_allclose(self.ds.theil_medslope, self.known_theil_medslope, rtol=self.tolerance)
def test_theil_intercept(self):
assert_true(hasattr(self.ds, 'theil_intercept'))
nptest.assert_allclose(self.ds.theil_intercept, self.known_theil_intercept, rtol=self.tolerance)
def test_theil_loslope(self):
assert_true(hasattr(self.ds, 'theil_loslope'))
nptest.assert_allclose(self.ds.theil_loslope, self.known_theil_loslope, rtol=self.tolerance)
def test_theil_hilope(self):
assert_true(hasattr(self.ds, 'theil_hislope'))
nptest.assert_allclose(self.ds.theil_hislope, self.known_theil_hislope, rtol=self.tolerance)
def test_wilcoxon_stats(self):
assert_true(hasattr(self.ds, '_wilcoxon_stats'))
nptest.assert_allclose(self.ds._wilcoxon_stats, self.known_wilcoxon_stats, rtol=self.tolerance)
def test_mannwhitney_stats(self):
assert_true(hasattr(self.ds, '_mannwhitney_stats'))
nptest.assert_allclose(self.ds._mannwhitney_stats, self.known_mannwhitney_stats, rtol=self.tolerance)
def test_kendall_stats(self):
assert_true(hasattr(self.ds, '_kendall_stats'))
nptest.assert_allclose(self.ds._kendall_stats, self.known_kendall_stats, rtol=self.tolerance)
def test_spearman_stats(self):
assert_true(hasattr(self.ds, '_spearman_stats'))
nptest.assert_allclose(self.ds._spearman_stats, self.known_spearman_stats, rtol=self.tolerance)
def test_theil_stats(self):
assert_true(hasattr(self.ds, '_theil_stats'))
nptest.assert_almost_equal(self.ds._theil_stats['medslope'],
self.known_theil_stats[0],
decimal=4)
nptest.assert_almost_equal(self.ds._theil_stats['intercept'],
self.known_theil_stats[1],
decimal=4)
nptest.assert_almost_equal(self.ds._theil_stats['loslope'],
self.known_theil_stats[2],
decimal=4)
nptest.assert_almost_equal(self.ds._theil_stats['hislope'],
self.known_theil_stats[3],
decimal=4)
assert_true(not self.ds._theil_stats['is_inverted'])
assert_true('estimated_effluent' in list(self.ds._theil_stats.keys()))
assert_true('estimate_error' in list(self.ds._theil_stats.keys()))
def test_theil_effluent_ties(self):
cache_theil_stats = self.ds._theil_stats
self.ds.useROS = True
self.ds.effluent.useROS = False # restores ties in the effl data
assert_true(self.ds._theil_stats['is_inverted'])
def test_medianCIsOverlap(self):
assert_equal(self.known_medianCIsOverlap, self.ds.medianCIsOverlap)
def test__repr__normal(self):
self.ds.__repr__
def test_repr__None(self):
self.ds.definition = None
self.ds.__repr__
def test_reset_useROS(self):
#warnings.simplefilter("error")
self.ds.useROS = True
infl_ros_mean = self.ds.influent.mean
effl_ros_mean = self.ds.effluent.mean
self.ds.useROS = False
infl_raw_mean = self.ds.influent.mean
effl_raw_mean = self.ds.effluent.mean
assert_true(infl_ros_mean != infl_raw_mean)
assert_true(effl_ros_mean != effl_raw_mean)
@nottest
def setup_dataset(extra_NDs=False):
np.random.seed(0)
in_data = testing.getTestROSData()
in_data['res'] += 3
out_data = testing.getTestROSData()
out_data['res'] -= 1.5
if extra_NDs:
in_data.loc[[0, 1, 2], 'qual'] = 'ND'
out_data.loc[[14, 15, 16], 'qual'] = 'ND'
influent = Location(in_data, station_type='inflow', bsIter=10000,
rescol='res', qualcol='qual', useROS=False)
effluent = Location(out_data, station_type='outflow', bsIter=10000,
rescol='res', qualcol='qual', useROS=False)
ds = Dataset(influent, effluent, name='Test Dataset')
plt.rcdefaults()
return ds
@image_comparison(baseline_images=[
'test_ds_boxplot_default',
'test_ds_boxplot_patch_artists',
'test_ds_boxplot_linscale',
'test_ds_boxplot_no_mean',
'test_ds_boxplot_width',
'test_ds_boxplot_no_notch',
'test_ds_boxplot_bacteria_geomean',
'test_ds_boxplot_with_ylabel',
'test_ds_boxplot_fallback_to_vert_scatter',
'test_ds_boxplot_provided_ax',
'test_ds_boxplot_custom_position',
'test_ds_boxplot_custom_offset',
'test_ds_boxplot_single_tick',
'test_ds_boxplot_single_tick_no_name',
], extensions=['png'])
def test_dataset_boxplot():
xlims = {'left': 0, 'right': 2}
ds = setup_dataset()
fig1 = ds.boxplot()
fig2 = ds.boxplot(patch_artist=True, xlims=xlims)
fig3 = ds.boxplot(yscale='linear', xlims=xlims)
fig4 = ds.boxplot(showmean=False, xlims=xlims)
fig5 = ds.boxplot(width=1.25, xlims=xlims)
fig6 = ds.boxplot(notch=False, xlims=xlims)
fig7 = ds.boxplot(bacteria=True, xlims=xlims)
fig8 = ds.boxplot(ylabel='Test Ylabel', xlims=xlims)
fig9 = ds.boxplot(minpoints=np.inf, xlims=xlims)
fig10, ax10 = plt.subplots()
fig10 = ds.boxplot(ax=ax10, xlims=xlims)
assert_true(isinstance(fig10, plt.Figure))
assert_raises(ValueError, ds.boxplot, ax='junk')
fig11 = ds.boxplot(pos=1.5, xlims=xlims)
fig12 = ds.boxplot(offset=0.75, xlims=xlims)
fig13 = ds.boxplot(bothTicks=False, xlims=xlims)
ds.name = None
fig14 = ds.boxplot(bothTicks=False, xlims=xlims)
@image_comparison(baseline_images=[
'test_ds_probplot_default',
'test_ds_probplot_provided_ax',
'test_ds_probplot_yscale_linear',
'test_ds_probplot_ppax',
'test_ds_probplot_qqax',
'test_ds_probplot_ylabel',
'test_ds_probplot_clear_yticks',
'test_ds_probplot_no_managegrid',
'test_ds_probplot_no_rotate_xticklabels',
'test_ds_probplot_no_set_xlims',
], extensions=['png'])
def test_dataset_probplot():
ds = setup_dataset()
fig1 = ds.probplot()
fig2, ax2 = plt.subplots()
fig2 = ds.probplot(ax=ax2)
assert_true(isinstance(fig2, plt.Figure))
assert_raises(ValueError, ds.probplot, ax='junk')
fig3 = ds.probplot(yscale='linear')
fig4 = ds.probplot(axtype='pp')
fig5 = ds.probplot(axtype='qq')
fig6 = ds.probplot(ylabel='test ylabel')
fig7 = ds.probplot(clearYLabels=True)
fig8 = ds.probplot(managegrid=False)
fig10 = ds.probplot(rotateticklabels=False)
fig11 = ds.probplot(setxlimits=False)
@image_comparison(baseline_images=[
'test_ds_statplot_custom_position',
'test_ds_statplot_yscale_linear',
'test_ds_statplot_no_notch',
'test_ds_statplot_no_mean',
'test_ds_statplot_custom_width',
'test_ds_statplot_bacteria_true',
'test_ds_statplot_ylabeled',
'test_ds_statplot_qq',
'test_ds_statplot_pp',
'test_ds_statplot_patch_artist',
], extensions=['png'])
def test_dataset_statplot():
ds = setup_dataset()
fig1 = ds.statplot(pos=1.25)
fig2 = ds.statplot(yscale='linear')
fig3 = ds.statplot(notch=False)
fig4 = ds.statplot(showmean=False)
fig5 = ds.statplot(width=1.5)
fig6 = ds.statplot(bacteria=True)
fig7 = ds.statplot(ylabel='Test Y-Label')
fig8 = ds.statplot(axtype='qq')
fig9 = ds.statplot(axtype='pp')
fig10 = ds.statplot(patch_artist=True)
assert_true(fig10, plt.Figure)
@image_comparison(baseline_images=[
'test_ds_scatterplot_default',
'test_ds_scatterplot_provided_ax',
'test_ds_scatterplot_xscale_linear',
'test_ds_scatterplot_xyscale_linear',
'test_ds_scatterplot_yscale_linear',
'test_ds_scatterplot_xlabel',
'test_ds_scatterplot_ylabel',
'test_ds_scatterplot_no_xlabel',
'test_ds_scatterplot_no_ylabel',
'test_ds_scatterplot_no_legend',
'test_ds_scatterplot_one2one',
'test_ds_scatterplot_useROS',
], extensions=['png'])
def test_dataset_scatterplot():
ds = setup_dataset(extra_NDs=True)
fig1 = ds.scatterplot()
fig2, ax2 = plt.subplots()
fig2 = ds.scatterplot(ax=ax2)
assert_true(isinstance(fig2, plt.Figure))
assert_raises(ValueError, ds.scatterplot, ax='junk')
fig3 = ds.scatterplot(xscale='linear')
fig5 = ds.scatterplot(xscale='linear', yscale='linear')
fig4 = ds.scatterplot(yscale='linear')
fig6 = ds.scatterplot(xlabel='X-label')
fig7 = ds.scatterplot(ylabel='Y-label')
fig8 = ds.scatterplot(xlabel='')
fig9 = ds.scatterplot(ylabel='')
fig10 = ds.scatterplot(showlegend=False)
fig10 = ds.scatterplot(one2one=True)
fig11 = ds.scatterplot(useROS=True)
@image_comparison(baseline_images=[
'test_ds__plot_NDs_both',
'test_ds__plot_NDs_effluent',
'test_ds__plot_NDs_influent',
'test_ds__plot_NDs_neither',
], extensions=['png'])
def test_dataset__plot_NDs():
ds = setup_dataset(extra_NDs=True)
markerkwargs = dict(
linestyle='none',
markerfacecolor='black',
markeredgecolor='white',
markeredgewidth=0.5,
markersize=6,
zorder=10,
)
fig1, ax1 = plt.subplots()
ds._plot_nds(ax1, which='both', marker='d', **markerkwargs)
fig2, ax2 = plt.subplots()
ds._plot_nds(ax2, which='effluent', marker='<', **markerkwargs)
fig3, ax3 = plt.subplots()
ds._plot_nds(ax3, which='influent', marker='v', **markerkwargs)
fig4, ax4 = plt.subplots()
ds._plot_nds(ax4, which='neither', marker='o', **markerkwargs)
@image_comparison(baseline_images=[
'test_ds_joint_hist',
'test_ds_joint_kde',
'test_ds_joint_rug',
'test_ds_joint_kde_rug_hist',
], extensions=['png'])
def test_dataset_jointplot():
ds = setup_dataset(extra_NDs=True)
def do_jointplots(ds, hist=False, kde=False, rug=False):
jg = ds.jointplot(hist=hist, kde=kde, rug=rug)
assert_true(isinstance(jg, seaborn.JointGrid))
return jg.fig
fig1 = do_jointplots(ds, hist=True)
fig2 = do_jointplots(ds, kde=True)
fig3 = do_jointplots(ds, rug=True)
fig4 = do_jointplots(ds, hist=True, kde=True, rug=True)
@nottest
def make_dc_data():
np.random.seed(0)
dl_map = {
'A': 0.1, 'B': 0.2, 'C': 0.3, 'D': 0.4,
'E': 0.1, 'F': 0.2, 'G': 0.3, 'H': 0.4,
}
index = pandas.MultiIndex.from_product([
list('ABCDEFGH'),
list('1234567'),
['GA', 'AL', 'OR', 'CA'],
['Inflow', 'Outflow', 'Reference']
], names=['param', 'bmp', 'state', 'loc'])
array = np.random.lognormal(mean=0.75, sigma=1.25, size=len(index))
data = pandas.DataFrame(data=array, index=index, columns=['res'])
data['DL'] = data.apply(
lambda r: dl_map.get(r.name[0]),
axis=1
)
data['res'] = data.apply(
lambda r: dl_map.get(r.name[0]) if r['res'] < r['DL'] else r['res'],
axis=1
)
data['qual'] = data.apply(
lambda r: 'ND' if r['res'] <= r['DL'] else '=',
axis=1
)
return data
class _base_DataCollecionMixin(object):
@nottest
def _base_setup(self):
self.known_raw_rescol = 'res'
self.known_roscol = 'ros_res'
self.known_qualcol = 'qual'
self.known_stationcol = 'loc'
self.known_paramcol = 'param'
self.known_ndval = 'ND'
def teardown(self):
plt.close('all')
def test__raw_rescol(self):
assert_equal(self.dc._raw_rescol, self.known_raw_rescol)
def test_data(self):
assert_true(isinstance(self.dc.data, pandas.DataFrame))
def test_roscol(self):
assert_equal(self.dc.roscol, self.known_roscol)
def test_rescol(self):
assert_equal(self.dc.rescol, self.known_rescol)
def test_qualcol(self):
assert_equal(self.dc.qualcol, self.known_qualcol)
def test_stationncol(self):
assert_equal(self.dc.stationcol, self.known_stationcol)
def test_paramcol(self):
assert_equal(self.dc.paramcol, self.known_paramcol)
def test_ndval(self):
assert_equal(self.dc.ndval, self.known_ndval)
def test_bsIter(self):
assert_equal(self.dc.bsIter, self.known_bsIter)
def test_groupby(self):
assert_equal(self.dc.groupby, self.known_groupby)
def test_columns(self):
assert_equal(self.dc.columns, self.known_columns)
def test_filterfxn(self):
assert_true(hasattr(self.dc, 'filterfxn'))
def test_tidy_exists(self):
assert_true(hasattr(self.dc, 'tidy'))
def test_tidy_type(self):
assert_true(isinstance(self.dc.tidy, pandas.DataFrame))
def test_tidy_cols(self):
tidycols = self.dc.tidy.columns.tolist()
knowncols = self.known_columns + [self.known_roscol]
assert_list_equal(sorted(tidycols), sorted(knowncols))
def test_means(self):
np.random.seed(0)
pdtest.assert_frame_equal(
np.round(self.dc.means, 3),
self.known_means,
check_names=False
)
def test_medians(self):
np.random.seed(0)
pdtest.assert_frame_equal(
np.round(self.dc.medians, 3),
self.known_medians,
check_names=False
)
def test__generic_stat(self):
np.random.seed(0)
pdtest.assert_frame_equal(
np.round(self.dc._generic_stat(np.min), 3),
self.known_genericstat,
check_names=False
)
class test_DataCollection_baseline(_base_DataCollecionMixin):
def setup(self):
self._base_setup()
self.dc = DataCollection(make_dc_data(), paramcol='param',
stationcol='loc')
self.known_rescol = 'ros_res'
self.known_groupby = ['loc', 'param']
self.known_columns = ['loc', 'param', 'res', 'qual']
self.known_bsIter = 10000
self.known_means = pandas.DataFrame({
('Reference', 'upper'): {
'A': 3.859, 'D': 5.586, 'F': 9.406, 'C': 6.346,
'B': 7.387, 'E': 4.041, 'G': 5.619, 'H': 3.402
},
('Inflow', 'upper'): {
'A': 9.445, 'D': 7.251, 'F': 8.420, 'C': 6.157,
'B': 10.362, 'E': 5.542, 'G': 5.581, 'H': 4.687
},
('Inflow', 'lower'): {
'A': 2.781, 'D': 1.798, 'F': 2.555, 'C': 1.665,
'B': 3.946, 'E': 2.427, 'G': 2.266, 'H': 1.916
},
('Reference', 'lower'): {
'A': 1.403, 'D': 2.203, 'F': 1.486, 'C': 1.845,
'B': 2.577, 'E': 1.240, 'G': 1.829, 'H': 1.710
},
('Outflow', 'stat'): {
'A': 4.804, 'D': 4.457, 'F': 3.503, 'C': 3.326,
'B': 6.284, 'E': 4.023, 'G': 2.759, 'H': 2.629
},
('Outflow', 'lower'): {
'A': 2.715, 'D': 1.877, 'F': 2.479, 'C': 2.347,
'B': 3.610, 'E': 2.479, 'G': 1.644, 'H': 1.572
},
('Reference', 'stat'): {
'A': 2.550, 'D': 3.829, 'F': 4.995, 'C': 3.882,
'B': 4.917, 'E': 2.605, 'G': 3.653, 'H': 2.515
},
('Inflow', 'stat'): {
'A': 5.889, 'D': 4.216, 'F': 5.405, 'C': 3.668,
'B': 6.944, 'E': 3.872, 'G': 3.912, 'H': 3.248
},
('Outflow', 'upper'): {
'A': 7.080, 'D': 7.425, 'F': 4.599, 'C': 4.318,
'B': 9.160, 'E': 5.606, 'G': 3.880, 'H': 3.824
}
})
self.known_medians = pandas.DataFrame({
('Reference', 'upper'): {
'A': 2.051, 'D': 3.719, 'F': 2.131, 'C': 2.893,
'B': 4.418, 'E': 1.883, 'G': 2.513, 'H': 2.846
},
('Inflow', 'upper'): {
'A': 3.485, 'D': 2.915, 'F': 4.345, 'C': 2.012,
'B': 6.327, 'E': 4.113, 'G': 3.841, 'H': 2.829
},
('Inflow', 'lower'): {
'A': 1.329, 'D': 0.766, 'F': 1.178, 'C': 0.924,
'B': 1.691, 'E': 1.302, 'G': 0.735, 'H': 1.305
},
('Reference', 'lower'): {
'A': 0.691, 'D': 1.190, 'F': 0.752, 'C': 0.938,
'B': 0.833, 'E': 0.612, 'G': 1.137, 'H': 0.976
},
('Outflow', 'stat'): {
'A': 2.515, 'D': 1.627, 'F': 2.525, 'C': 2.860,
'B': 2.871, 'E': 2.364, 'G': 1.525, 'H': 1.618
},
('Outflow', 'lower'): {
'A': 1.234, 'D': 0.877, 'F': 1.500, 'C': 1.272,
'B': 0.889, 'E': 1.317, 'G': 0.817, 'H': 0.662
},
('Reference', 'stat'): {
'A': 1.313, 'D': 2.314, 'F': 1.564, 'C': 1.856,
'B': 2.298, 'E': 1.243, 'G': 1.987, 'H': 2.003
},
('Inflow', 'stat'): {
'A': 2.712, 'D': 1.934, 'F': 2.671, 'C': 1.439,
'B': 3.848, 'E': 2.789, 'G': 2.162, 'H': 1.849
},
('Outflow', 'upper'): {
'A': 3.554, 'D': 1.930, 'F': 4.227, 'C': 3.736,
'B': 5.167, 'E': 3.512, 'G': 2.421, 'H': 2.387
}
})
self.known_genericstat = pandas.DataFrame({
('Inflow', 'stat'): {
'H': 0.2100, 'A': 0.1780, 'B': 0.4330, 'C': 0.1160,
'D': 0.1070, 'E': 0.2360, 'F': 0.1570, 'G': 0.0960
},
('Outflow', 'lower'): {
'H': 0.1280, 'A': 0.3440, 'B': 0.4090, 'C': 0.2190,
'D': 0.1180, 'E': 0.1260, 'F': 0.3000, 'G': 0.1240
},
('Inflow', 'lower'): {
'H': 0.2100, 'A': 0.1780, 'B': 0.4330, 'C': 0.1160,
'D': 0.1070, 'E': 0.2360, 'F': 0.1570, 'G': 0.0960
},
('Reference', 'upper'): {
'H': 0.4620, 'A': 0.4840, 'B': 0.4580, 'C': 0.3570,
'D': 0.5630, 'E': 0.2970, 'F': 0.3600, 'G': 0.4000
},
('Reference', 'lower'): {
'H': 0.2210, 'A': 0.1190, 'B': 0.3070, 'C': 0.1350,
'D': 0.2090, 'E': 0.2110, 'F': 0.0990, 'G': 0.1890
},
('Reference', 'stat'): {
'H': 0.2210, 'A': 0.1190, 'B': 0.3070, 'C': 0.1350,
'D': 0.2090, 'E': 0.2110, 'F': 0.0990, 'G': 0.1890
},
('Inflow', 'upper'): {
'H': 0.6600, 'A': 0.2760, 'B': 1.2250, 'C': 0.4990,
'D': 0.4130, 'E': 0.3510, 'F': 0.5930, 'G': 0.2790
},
('Outflow', 'stat'): {
'H': 0.1280, 'A': 0.3440, 'B': 0.4090, 'C': 0.2190,
'D': 0.1180, 'E': 0.1260, 'F': 0.3000, 'G': 0.1240
},
('Outflow', 'upper'): {
'H': 0.3150, 'A': 0.5710, 'B': 0.5490, 'C': 0.4920,
'D': 0.4790, 'E': 0.7710, 'F': 0.6370, 'G': 0.3070
}
})
| bsd-3-clause |
bartosh/zipline | tests/test_continuous_futures.py | 3 | 62530 | #
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import deque
from functools import partial
from textwrap import dedent
from numpy import (
arange,
array,
int64,
full,
repeat,
tile,
)
from numpy.testing import assert_almost_equal
import pandas as pd
from pandas import Timestamp, DataFrame
from zipline import TradingAlgorithm
from zipline.assets.continuous_futures import (
OrderedContracts,
delivery_predicate
)
from zipline.data.minute_bars import FUTURES_MINUTES_PER_DAY
from zipline.errors import SymbolNotFound
from zipline.testing.fixtures import (
WithAssetFinder,
WithCreateBarData,
WithDataPortal,
WithBcolzFutureMinuteBarReader,
WithSimParams,
ZiplineTestCase,
)
class ContinuousFuturesTestCase(WithCreateBarData,
WithDataPortal,
WithSimParams,
WithBcolzFutureMinuteBarReader,
ZiplineTestCase):
START_DATE = pd.Timestamp('2015-01-05', tz='UTC')
END_DATE = pd.Timestamp('2016-10-19', tz='UTC')
SIM_PARAMS_START = pd.Timestamp('2016-01-26', tz='UTC')
SIM_PARAMS_END = pd.Timestamp('2016-01-28', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
TRADING_CALENDAR_STRS = ('us_futures',)
TRADING_CALENDAR_PRIMARY_CAL = 'us_futures'
TRADING_ENV_FUTURE_CHAIN_PREDICATES = {
'BZ': partial(delivery_predicate, set(['F', 'H'])),
}
@classmethod
def make_root_symbols_info(self):
return pd.DataFrame({
'root_symbol': ['FO', 'BZ', 'MA', 'DF'],
'root_symbol_id': [1, 2, 3, 4],
'exchange': ['CME', 'CME', 'CME', 'CME']})
@classmethod
def make_futures_info(self):
fo_frame = DataFrame({
'symbol': ['FOF16', 'FOG16', 'FOH16', 'FOJ16', 'FOK16', 'FOF22',
'FOG22'],
'sid': range(0, 7),
'root_symbol': ['FO'] * 7,
'asset_name': ['Foo'] * 7,
'start_date': [Timestamp('2015-01-05', tz='UTC'),
Timestamp('2015-02-05', tz='UTC'),
Timestamp('2015-03-05', tz='UTC'),
Timestamp('2015-04-05', tz='UTC'),
Timestamp('2015-05-05', tz='UTC'),
Timestamp('2021-01-05', tz='UTC'),
Timestamp('2015-01-05', tz='UTC')],
'end_date': [Timestamp('2016-08-19', tz='UTC'),
Timestamp('2016-09-19', tz='UTC'),
Timestamp('2016-10-19', tz='UTC'),
Timestamp('2016-11-19', tz='UTC'),
Timestamp('2022-08-19', tz='UTC'),
Timestamp('2022-09-19', tz='UTC'),
# Set the last contract's end date (which is the last
# date for which there is data to a value that is
# within the range of the dates being tested. This
# models real life scenarios where the end date of the
# furthest out contract is not necessarily the
# greatest end date all contracts in the chain.
Timestamp('2015-02-05', tz='UTC')],
'notice_date': [Timestamp('2016-01-27', tz='UTC'),
Timestamp('2016-02-26', tz='UTC'),
Timestamp('2016-03-24', tz='UTC'),
Timestamp('2016-04-26', tz='UTC'),
Timestamp('2016-05-26', tz='UTC'),
Timestamp('2022-01-26', tz='UTC'),
Timestamp('2022-02-26', tz='UTC')],
'expiration_date': [Timestamp('2016-01-27', tz='UTC'),
Timestamp('2016-02-26', tz='UTC'),
Timestamp('2016-03-24', tz='UTC'),
Timestamp('2016-04-26', tz='UTC'),
Timestamp('2016-05-26', tz='UTC'),
Timestamp('2022-01-26', tz='UTC'),
Timestamp('2022-02-26', tz='UTC')],
'auto_close_date': [Timestamp('2016-01-27', tz='UTC'),
Timestamp('2016-02-26', tz='UTC'),
Timestamp('2016-03-24', tz='UTC'),
Timestamp('2016-04-26', tz='UTC'),
Timestamp('2016-05-26', tz='UTC'),
Timestamp('2022-01-26', tz='UTC'),
Timestamp('2022-02-26', tz='UTC')],
'tick_size': [0.001] * 7,
'multiplier': [1000.0] * 7,
'exchange': ['CME'] * 7,
})
# BZ is set up to test chain predicates, for futures such as PL which
# only use a subset of contracts for the roll chain.
bz_frame = DataFrame({
'symbol': ['BZF16', 'BZG16', 'BZH16'],
'root_symbol': ['BZ'] * 3,
'asset_name': ['Baz'] * 3,
'sid': range(10, 13),
'start_date': [Timestamp('2005-01-01', tz='UTC'),
Timestamp('2005-01-21', tz='UTC'),
Timestamp('2005-01-21', tz='UTC')],
'end_date': [Timestamp('2016-08-19', tz='UTC'),
Timestamp('2016-11-21', tz='UTC'),
Timestamp('2016-10-19', tz='UTC')],
'notice_date': [Timestamp('2016-01-11', tz='UTC'),
Timestamp('2016-02-08', tz='UTC'),
Timestamp('2016-03-09', tz='UTC')],
'expiration_date': [Timestamp('2016-01-11', tz='UTC'),
Timestamp('2016-02-08', tz='UTC'),
Timestamp('2016-03-09', tz='UTC')],
'auto_close_date': [Timestamp('2016-01-11', tz='UTC'),
Timestamp('2016-02-08', tz='UTC'),
Timestamp('2016-03-09', tz='UTC')],
'tick_size': [0.001] * 3,
'multiplier': [1000.0] * 3,
'exchange': ['CME'] * 3,
})
# MA is set up to test a contract which is has no active volume.
ma_frame = DataFrame({
'symbol': ['MAG16', 'MAH16', 'MAJ16'],
'root_symbol': ['MA'] * 3,
'asset_name': ['Most Active'] * 3,
'sid': range(14, 17),
'start_date': [Timestamp('2005-01-01', tz='UTC'),
Timestamp('2005-01-21', tz='UTC'),
Timestamp('2005-01-21', tz='UTC')],
'end_date': [Timestamp('2016-08-19', tz='UTC'),
Timestamp('2016-11-21', tz='UTC'),
Timestamp('2016-10-19', tz='UTC')],
'notice_date': [Timestamp('2016-02-17', tz='UTC'),
Timestamp('2016-03-16', tz='UTC'),
Timestamp('2016-04-13', tz='UTC')],
'expiration_date': [Timestamp('2016-02-17', tz='UTC'),
Timestamp('2016-03-16', tz='UTC'),
Timestamp('2016-04-13', tz='UTC')],
'auto_close_date': [Timestamp('2016-02-17', tz='UTC'),
Timestamp('2016-03-16', tz='UTC'),
Timestamp('2016-04-13', tz='UTC')],
'tick_size': [0.001] * 3,
'multiplier': [1000.0] * 3,
'exchange': ['CME'] * 3,
})
# DF is set up to have a double volume flip between the 'F' and 'G'
# contracts, and then a really early temporary volume flip between the
# 'G' and 'H' contracts.
df_frame = DataFrame({
'symbol': ['DFF16', 'DFG16', 'DFH16'],
'root_symbol': ['DF'] * 3,
'asset_name': ['Double Flip'] * 3,
'sid': range(17, 20),
'start_date': [Timestamp('2005-01-01', tz='UTC'),
Timestamp('2005-02-01', tz='UTC'),
Timestamp('2005-03-01', tz='UTC')],
'end_date': [Timestamp('2016-08-19', tz='UTC'),
Timestamp('2016-09-19', tz='UTC'),
Timestamp('2016-10-19', tz='UTC')],
'notice_date': [Timestamp('2016-02-19', tz='UTC'),
Timestamp('2016-03-18', tz='UTC'),
Timestamp('2016-04-22', tz='UTC')],
'expiration_date': [Timestamp('2016-02-19', tz='UTC'),
Timestamp('2016-03-18', tz='UTC'),
Timestamp('2016-04-22', tz='UTC')],
'auto_close_date': [Timestamp('2016-02-17', tz='UTC'),
Timestamp('2016-03-16', tz='UTC'),
Timestamp('2016-04-20', tz='UTC')],
'tick_size': [0.001] * 3,
'multiplier': [1000.0] * 3,
'exchange': ['CME'] * 3,
})
return pd.concat([fo_frame, bz_frame, ma_frame, df_frame])
@classmethod
def make_future_minute_bar_data(cls):
tc = cls.trading_calendar
start = pd.Timestamp('2016-01-26', tz='UTC')
end = pd.Timestamp('2016-04-29', tz='UTC')
dts = tc.minutes_for_sessions_in_range(start, end)
sessions = tc.sessions_in_range(start, end)
# Generate values in the XXY.YYY space, with XX representing the
# session and Y.YYY representing the minute within the session.
# e.g. the close of the 23rd session would be 231.440.
r = 10.0
day_markers = repeat(
arange(r, r * len(sessions) + r, r),
FUTURES_MINUTES_PER_DAY)
r = 0.001
min_markers = tile(
arange(r, r * FUTURES_MINUTES_PER_DAY + r, r),
len(sessions))
markers = day_markers + min_markers
# Volume uses a similar scheme as above but times 1000.
r = 10.0 * 1000
vol_day_markers = repeat(
arange(r, r * len(sessions) + r, r, dtype=int64),
FUTURES_MINUTES_PER_DAY)
r = 0.001 * 1000
vol_min_markers = tile(
arange(r, r * FUTURES_MINUTES_PER_DAY + r, r, dtype=int64),
len(sessions))
vol_markers = vol_day_markers + vol_min_markers
base_df = pd.DataFrame(
{
'open': full(len(dts), 102000.0) + markers,
'high': full(len(dts), 109000.0) + markers,
'low': full(len(dts), 101000.0) + markers,
'close': full(len(dts), 105000.0) + markers,
'volume': full(len(dts), 10000, dtype=int64) + vol_markers,
},
index=dts)
# Add the sid to the ones place of the prices, so that the ones
# place can be used to eyeball the source contract.
# For volume roll tests end sid volume early.
# FOF16 cuts out day before autoclose of 01-26
# FOG16 cuts out on autoclose
# FOH16 cuts out 4 days before autoclose
# FOJ16 cuts out 3 days before autoclose
# Make FOG22 have a blip of trading, but not be the actively trading,
# so that it does not particpate in volume rolls.
sid_to_vol_stop_session = {
0: Timestamp('2016-01-26', tz='UTC'),
1: Timestamp('2016-02-26', tz='UTC'),
2: Timestamp('2016-03-18', tz='UTC'),
3: Timestamp('2016-04-20', tz='UTC'),
6: Timestamp('2016-01-27', tz='UTC'),
}
for i in range(20):
df = base_df.copy()
df += i * 10000
if i in sid_to_vol_stop_session:
vol_stop_session = sid_to_vol_stop_session[i]
m_open = tc.open_and_close_for_session(vol_stop_session)[0]
loc = dts.searchsorted(m_open)
# Add a little bit of noise to roll. So that predicates that
# check for exactly 0 do not work, since there may be
# stragglers after a roll.
df.volume.values[loc] = 1000
df.volume.values[loc + 1:] = 0
j = i - 1
if j in sid_to_vol_stop_session:
non_primary_end = sid_to_vol_stop_session[j]
m_close = tc.open_and_close_for_session(non_primary_end)[1]
if m_close > dts[0]:
loc = dts.get_loc(m_close)
# Add some volume before a roll, since a contract may be
# entered earlier than when it is the primary.
df.volume.values[:loc + 1] = 10
if i == 15: # No volume for MAH16
df.volume.values[:] = 0
if i == 17:
end_loc = dts.searchsorted('2016-02-16 23:00:00+00:00')
df.volume.values[:end_loc] = 10
df.volume.values[end_loc:] = 0
if i == 18:
cross_loc_1 = dts.searchsorted('2016-02-09 23:01:00+00:00')
cross_loc_2 = dts.searchsorted('2016-02-11 23:01:00+00:00')
cross_loc_3 = dts.searchsorted('2016-02-15 23:01:00+00:00')
end_loc = dts.searchsorted('2016-03-15 23:01:00+00:00')
df.volume.values[:cross_loc_1] = 5
df.volume.values[cross_loc_1:cross_loc_2] = 15
df.volume.values[cross_loc_2:cross_loc_3] = 5
df.volume.values[cross_loc_3:end_loc] = 15
df.volume.values[end_loc:] = 0
if i == 19:
early_cross_1 = dts.searchsorted('2016-03-01 23:01:00+00:00')
early_cross_2 = dts.searchsorted('2016-03-03 23:01:00+00:00')
end_loc = dts.searchsorted('2016-04-19 23:01:00+00:00')
df.volume.values[:early_cross_1] = 1
df.volume.values[early_cross_1:early_cross_2] = 20
df.volume.values[early_cross_2:end_loc] = 10
df.volume.values[end_loc:] = 0
yield i, df
def test_double_volume_switch(self):
"""
Test that when a double volume switch occurs we treat the first switch
as the roll, assuming it is within a certain distance of the next auto
close date. See `VolumeRollFinder._active_contract` for a full
explanation and example.
"""
cf = self.asset_finder.create_continuous_future(
'DF', 0, 'volume', None,
)
sessions = self.trading_calendar.sessions_in_range(
'2016-02-09', '2016-02-17',
)
for session in sessions:
bar_data = self.create_bardata(lambda: session)
contract = bar_data.current(cf, 'contract')
# The 'G' contract surpasses the 'F' contract in volume on
# 2016-02-10, which means that the 'G' contract should become the
# front contract starting on 2016-02-11.
if session < pd.Timestamp('2016-02-11', tz='UTC'):
self.assertEqual(contract.symbol, 'DFF16')
else:
self.assertEqual(contract.symbol, 'DFG16')
# TODO: This test asserts behavior about a back contract briefly
# spiking in volume, but more than a week before the front contract's
# auto close date, meaning it does not fall in the 'grace' period used
# by `VolumeRollFinder._active_contract`. The current behavior is that
# during the spike, the back contract is considered current, but it may
# be worth changing that behavior in the future.
# sessions = self.trading_calendar.sessions_in_range(
# '2016-03-01', '2016-03-21',
# )
# for session in sessions:
# bar_data = self.create_bardata(lambda: session)
# contract = bar_data.current(cf, 'contract')
# if session < pd.Timestamp('2016-03-16', tz='UTC'):
# self.assertEqual(contract.symbol, 'DFG16')
# else:
# self.assertEqual(contract.symbol, 'DFH16')
def test_create_continuous_future(self):
cf_primary = self.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
self.assertEqual(cf_primary.root_symbol, 'FO')
self.assertEqual(cf_primary.offset, 0)
self.assertEqual(cf_primary.roll_style, 'calendar')
self.assertEqual(cf_primary.start_date,
Timestamp('2015-01-05', tz='UTC'))
self.assertEqual(cf_primary.end_date,
Timestamp('2022-09-19', tz='UTC'))
retrieved_primary = self.asset_finder.retrieve_asset(
cf_primary.sid)
self.assertEqual(retrieved_primary, cf_primary)
cf_secondary = self.asset_finder.create_continuous_future(
'FO', 1, 'calendar', None)
self.assertEqual(cf_secondary.root_symbol, 'FO')
self.assertEqual(cf_secondary.offset, 1)
self.assertEqual(cf_secondary.roll_style, 'calendar')
self.assertEqual(cf_primary.start_date,
Timestamp('2015-01-05', tz='UTC'))
self.assertEqual(cf_primary.end_date,
Timestamp('2022-09-19', tz='UTC'))
retrieved = self.asset_finder.retrieve_asset(
cf_secondary.sid)
self.assertEqual(retrieved, cf_secondary)
self.assertNotEqual(cf_primary, cf_secondary)
# Assert that the proper exception is raised if the given root symbol
# does not exist.
with self.assertRaises(SymbolNotFound):
self.asset_finder.create_continuous_future(
'NO', 0, 'calendar', None)
def test_current_contract(self):
cf_primary = self.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
bar_data = self.create_bardata(
lambda: pd.Timestamp('2016-01-26', tz='UTC'))
contract = bar_data.current(cf_primary, 'contract')
self.assertEqual(contract.symbol, 'FOF16')
bar_data = self.create_bardata(
lambda: pd.Timestamp('2016-01-27', tz='UTC'))
contract = bar_data.current(cf_primary, 'contract')
self.assertEqual(contract.symbol, 'FOG16',
'Auto close at beginning of session so FOG16 is now '
'the current contract.')
def test_get_value_contract_daily(self):
cf_primary = self.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
contract = self.data_portal.get_spot_value(
cf_primary,
'contract',
pd.Timestamp('2016-01-26', tz='UTC'),
'daily',
)
self.assertEqual(contract.symbol, 'FOF16')
contract = self.data_portal.get_spot_value(
cf_primary,
'contract',
pd.Timestamp('2016-01-27', tz='UTC'),
'daily',
)
self.assertEqual(contract.symbol, 'FOG16',
'Auto close at beginning of session so FOG16 is now '
'the current contract.')
# Test that the current contract outside of the continuous future's
# start and end dates is None.
contract = self.data_portal.get_spot_value(
cf_primary,
'contract',
self.START_DATE - self.trading_calendar.day,
'daily',
)
self.assertIsNone(contract)
def test_get_value_close_daily(self):
cf_primary = self.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
value = self.data_portal.get_spot_value(
cf_primary,
'close',
pd.Timestamp('2016-01-26', tz='UTC'),
'daily',
)
self.assertEqual(value, 105011.44)
value = self.data_portal.get_spot_value(
cf_primary,
'close',
pd.Timestamp('2016-01-27', tz='UTC'),
'daily',
)
self.assertEqual(value, 115021.44,
'Auto close at beginning of session so FOG16 is now '
'the current contract.')
# Check a value which occurs after the end date of the last known
# contract, to prevent a regression where the end date of the last
# contract was used instead of the max date of all contracts.
value = self.data_portal.get_spot_value(
cf_primary,
'close',
pd.Timestamp('2016-03-26', tz='UTC'),
'daily',
)
self.assertEqual(value, 135441.44,
'Value should be for FOJ16, even though last '
'contract ends before query date.')
def test_current_contract_volume_roll(self):
cf_primary = self.asset_finder.create_continuous_future(
'FO', 0, 'volume', None)
bar_data = self.create_bardata(
lambda: pd.Timestamp('2016-01-26', tz='UTC'))
contract = bar_data.current(cf_primary, 'contract')
self.assertEqual(contract.symbol, 'FOF16')
bar_data = self.create_bardata(
lambda: pd.Timestamp('2016-01-27', tz='UTC'))
contract = bar_data.current(cf_primary, 'contract')
self.assertEqual(contract.symbol, 'FOG16',
'Auto close at beginning of session. FOG16 is now '
'the current contract.')
bar_data = self.create_bardata(
lambda: pd.Timestamp('2016-02-26', tz='UTC'))
contract = bar_data.current(cf_primary, 'contract')
self.assertEqual(contract.symbol, 'FOH16',
'Volume switch to FOH16, should have triggered roll.')
def test_current_contract_in_algo(self):
code = dedent("""
from zipline.api import (
record,
continuous_future,
schedule_function,
get_datetime,
)
def initialize(algo):
algo.primary_cl = continuous_future('FO', 0, 'calendar', None)
algo.secondary_cl = continuous_future('FO', 1, 'calendar', None)
schedule_function(record_current_contract)
def record_current_contract(algo, data):
record(datetime=get_datetime())
record(primary=data.current(algo.primary_cl, 'contract'))
record(secondary=data.current(algo.secondary_cl, 'contract'))
""")
algo = TradingAlgorithm(script=code,
sim_params=self.sim_params,
trading_calendar=self.trading_calendar,
env=self.env)
results = algo.run(self.data_portal)
result = results.iloc[0]
self.assertEqual(result.primary.symbol,
'FOF16',
'Primary should be FOF16 on first session.')
self.assertEqual(result.secondary.symbol,
'FOG16',
'Secondary should be FOG16 on first session.')
result = results.iloc[1]
# Second day, primary should switch to FOG
self.assertEqual(result.primary.symbol,
'FOG16',
'Primary should be FOG16 on second session, auto '
'close is at beginning of the session.')
self.assertEqual(result.secondary.symbol,
'FOH16',
'Secondary should be FOH16 on second session, auto '
'close is at beginning of the session.')
result = results.iloc[2]
# Second day, primary should switch to FOG
self.assertEqual(result.primary.symbol,
'FOG16',
'Primary should remain as FOG16 on third session.')
self.assertEqual(result.secondary.symbol,
'FOH16',
'Secondary should remain as FOH16 on third session.')
def test_current_chain_in_algo(self):
code = dedent("""
from zipline.api import (
record,
continuous_future,
schedule_function,
get_datetime,
)
def initialize(algo):
algo.primary_cl = continuous_future('FO', 0, 'calendar', None)
algo.secondary_cl = continuous_future('FO', 1, 'calendar', None)
schedule_function(record_current_contract)
def record_current_contract(algo, data):
record(datetime=get_datetime())
primary_chain = data.current_chain(algo.primary_cl)
secondary_chain = data.current_chain(algo.secondary_cl)
record(primary_len=len(primary_chain))
record(primary_first=primary_chain[0].symbol)
record(primary_last=primary_chain[-1].symbol)
record(secondary_len=len(secondary_chain))
record(secondary_first=secondary_chain[0].symbol)
record(secondary_last=secondary_chain[-1].symbol)
""")
algo = TradingAlgorithm(script=code,
sim_params=self.sim_params,
trading_calendar=self.trading_calendar,
env=self.env)
results = algo.run(self.data_portal)
result = results.iloc[0]
self.assertEqual(result.primary_len,
6,
'There should be only 6 contracts in the chain for '
'the primary, there are 7 contracts defined in the '
'fixture, but one has a start after the simulation '
'date.')
self.assertEqual(result.secondary_len,
5,
'There should be only 5 contracts in the chain for '
'the primary, there are 7 contracts defined in the '
'fixture, but one has a start after the simulation '
'date. And the first is not included because it is '
'the primary on that date.')
self.assertEqual(result.primary_first,
'FOF16',
'Front of primary chain should be FOF16 on first '
'session.')
self.assertEqual(result.secondary_first,
'FOG16',
'Front of secondary chain should be FOG16 on first '
'session.')
self.assertEqual(result.primary_last,
'FOG22',
'End of primary chain should be FOK16 on first '
'session.')
self.assertEqual(result.secondary_last,
'FOG22',
'End of secondary chain should be FOK16 on first '
'session.')
# Second day, primary should switch to FOG
result = results.iloc[1]
self.assertEqual(result.primary_len,
5,
'There should be only 5 contracts in the chain for '
'the primary, there are 7 contracts defined in the '
'fixture, but one has a start after the simulation '
'date. The first is not included because of roll.')
self.assertEqual(result.secondary_len,
4,
'There should be only 4 contracts in the chain for '
'the primary, there are 7 contracts defined in the '
'fixture, but one has a start after the simulation '
'date. The first is not included because of roll, '
'the second is the primary on that date.')
self.assertEqual(result.primary_first,
'FOG16',
'Front of primary chain should be FOG16 on second '
'session.')
self.assertEqual(result.secondary_first,
'FOH16',
'Front of secondary chain should be FOH16 on second '
'session.')
# These values remain FOJ16 because fixture data is not exhaustive
# enough to move the end of the chain.
self.assertEqual(result.primary_last,
'FOG22',
'End of primary chain should be FOK16 on second '
'session.')
self.assertEqual(result.secondary_last,
'FOG22',
'End of secondary chain should be FOK16 on second '
'session.')
def test_history_sid_session(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-03-04 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1d', 'sid', 'minute')
self.assertEqual(window.loc['2016-01-26', cf],
0,
"Should be FOF16 at beginning of window.")
self.assertEqual(window.loc['2016-01-27', cf],
1,
"Should be FOG16 after first roll.")
self.assertEqual(window.loc['2016-02-25', cf],
1,
"Should be FOG16 on session before roll.")
self.assertEqual(window.loc['2016-02-26', cf],
2,
"Should be FOH16 on session with roll.")
self.assertEqual(window.loc['2016-02-29', cf],
2,
"Should be FOH16 on session after roll.")
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-04-06 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1d', 'sid', 'minute')
self.assertEqual(window.loc['2016-02-25', cf],
1,
"Should be FOG16 at beginning of window.")
self.assertEqual(window.loc['2016-02-26', cf],
2,
"Should be FOH16 on session with roll.")
self.assertEqual(window.loc['2016-02-29', cf],
2,
"Should be FOH16 on session after roll.")
self.assertEqual(window.loc['2016-03-24', cf],
3,
"Should be FOJ16 on session with roll.")
self.assertEqual(window.loc['2016-03-28', cf],
3,
"Should be FOJ16 on session after roll.")
def test_history_sid_session_delivery_predicate(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'BZ', 0, 'calendar', None)
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-01-11 18:01', tz='US/Eastern').tz_convert('UTC'),
3, '1d', 'sid', 'minute')
self.assertEqual(window.loc['2016-01-08', cf],
10,
"Should be BZF16 at beginning of window.")
self.assertEqual(window.loc['2016-01-11', cf],
12,
"Should be BZH16 after first roll, having skipped "
"over BZG16.")
self.assertEqual(window.loc['2016-01-12', cf],
12,
"Should have remained BZG16")
def test_history_sid_session_secondary(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 1, 'calendar', None)
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-03-04 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1d', 'sid', 'minute')
self.assertEqual(window.loc['2016-01-26', cf],
1,
"Should be FOG16 at beginning of window.")
self.assertEqual(window.loc['2016-01-27', cf],
2,
"Should be FOH16 after first roll.")
self.assertEqual(window.loc['2016-02-25', cf],
2,
"Should be FOH16 on session before roll.")
self.assertEqual(window.loc['2016-02-26', cf],
3,
"Should be FOJ16 on session with roll.")
self.assertEqual(window.loc['2016-02-29', cf],
3,
"Should be FOJ16 on session after roll.")
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-04-06 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1d', 'sid', 'minute')
self.assertEqual(window.loc['2016-02-25', cf],
2,
"Should be FOH16 at beginning of window.")
self.assertEqual(window.loc['2016-02-26', cf],
3,
"Should be FOJ16 on session with roll.")
self.assertEqual(window.loc['2016-02-29', cf],
3,
"Should be FOJ16 on session after roll.")
self.assertEqual(window.loc['2016-03-24', cf],
4,
"Should be FOK16 on session with roll.")
self.assertEqual(window.loc['2016-03-28', cf],
4,
"Should be FOK16 on session after roll.")
def test_history_sid_session_volume_roll(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'volume', None)
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-03-04 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1d', 'sid', 'minute')
# Volume cuts out for FOF16 on 2016-01-25
self.assertEqual(window.loc['2016-01-26', cf],
0,
"Should be FOF16 at beginning of window.")
self.assertEqual(window.loc['2016-01-27', cf],
1,
"Should have rolled to FOG16.")
self.assertEqual(window.loc['2016-02-25', cf],
1,
"Should be FOG16 on session before roll.")
self.assertEqual(window.loc['2016-02-26', cf],
2,
"Should be FOH16 on session with roll.")
self.assertEqual(window.loc['2016-02-29', cf],
2,
"Should be FOH16 on session after roll.")
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-04-06 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1d', 'sid', 'minute')
self.assertEqual(window.loc['2016-02-25', cf],
1,
"Should be FOG16 at beginning of window.")
self.assertEqual(window.loc['2016-02-26', cf],
2,
"Should be FOH16 on roll session.")
self.assertEqual(window.loc['2016-02-29', cf],
2,
"Should remain FOH16.")
self.assertEqual(window.loc['2016-03-17', cf],
2,
"Should be FOH16 on session before volume cuts out.")
self.assertEqual(window.loc['2016-03-18', cf],
2,
"Should be FOH16 on session where the volume of "
"FOH16 cuts out, the roll is upcoming.")
self.assertEqual(window.loc['2016-03-24', cf],
3,
"Should have rolled to FOJ16.")
self.assertEqual(window.loc['2016-03-28', cf],
3,
"Should have remained FOJ16.")
def test_history_sid_minute(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
window = self.data_portal.get_history_window(
[cf.sid],
Timestamp('2016-01-26 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'sid', 'minute')
self.assertEqual(window.loc['2016-01-26 22:32', cf],
0,
"Should be FOF16 at beginning of window. A minute "
"which is in the 01-26 session, before the roll.")
self.assertEqual(window.loc['2016-01-26 23:00', cf],
0,
"Should be FOF16 on on minute before roll minute.")
self.assertEqual(window.loc['2016-01-26 23:01', cf],
1,
"Should be FOG16 on minute after roll.")
# Advance the window a day.
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-01-27 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'sid', 'minute')
self.assertEqual(window.loc['2016-01-27 22:32', cf],
1,
"Should be FOG16 at beginning of window.")
self.assertEqual(window.loc['2016-01-27 23:01', cf],
1,
"Should remain FOG16 on next session.")
def test_history_close_session(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
window = self.data_portal.get_history_window(
[cf.sid],
Timestamp('2016-03-06', tz='UTC'),
30, '1d', 'close', 'daily')
assert_almost_equal(
window.loc['2016-01-26', cf],
105011.440,
err_msg="At beginning of window, should be FOG16's first value.")
assert_almost_equal(
window.loc['2016-02-26', cf],
125241.440,
err_msg="On session with roll, should be FOH16's 24th value.")
assert_almost_equal(
window.loc['2016-02-29', cf],
125251.440,
err_msg="After roll, Should be FOH16's 25th value.")
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf.sid],
Timestamp('2016-04-06', tz='UTC'),
30, '1d', 'close', 'daily')
assert_almost_equal(
window.loc['2016-02-24', cf],
115221.440,
err_msg="At beginning of window, should be FOG16's 22nd value.")
assert_almost_equal(
window.loc['2016-02-26', cf],
125241.440,
err_msg="On session with roll, should be FOH16's 24th value.")
assert_almost_equal(
window.loc['2016-02-29', cf],
125251.440,
err_msg="On session after roll, should be FOH16's 25th value.")
assert_almost_equal(
window.loc['2016-03-24', cf],
135431.440,
err_msg="On session with roll, should be FOJ16's 43rd value.")
assert_almost_equal(
window.loc['2016-03-28', cf],
135441.440,
err_msg="On session after roll, Should be FOJ16's 44th value.")
def test_history_close_session_skip_volume(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'MA', 0, 'volume', None)
window = self.data_portal.get_history_window(
[cf.sid],
Timestamp('2016-03-06', tz='UTC'),
30, '1d', 'close', 'daily')
assert_almost_equal(
window.loc['2016-01-26', cf],
245011.440,
err_msg="At beginning of window, should be MAG16's first value.")
assert_almost_equal(
window.loc['2016-02-26', cf],
265241.440,
err_msg="Should have skipped MAH16 to MAJ16.")
assert_almost_equal(
window.loc['2016-02-29', cf],
265251.440,
err_msg="Should have remained MAJ16.")
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf.sid],
Timestamp('2016-04-06', tz='UTC'),
30, '1d', 'close', 'daily')
assert_almost_equal(
window.loc['2016-02-24', cf],
265221.440,
err_msg="Should be MAJ16, having skipped MAH16.")
assert_almost_equal(
window.loc['2016-02-29', cf],
265251.440,
err_msg="Should be MAJ1 for rest of window.")
assert_almost_equal(
window.loc['2016-03-24', cf],
265431.440,
err_msg="Should be MAJ16 for rest of window.")
def test_history_close_session_adjusted(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
cf_mul = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', 'mul')
cf_add = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', 'add')
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
Timestamp('2016-03-06', tz='UTC'),
30, '1d', 'close', 'daily')
# Unadjusted value is: 115011.44
# Adjustment is based on hop from 115231.44 to 125231.44
# a ratio of ~0.920
assert_almost_equal(
window.loc['2016-01-26', cf_mul],
124992.348,
err_msg="At beginning of window, should be FOG16's first value, "
"adjusted.")
# Difference of 7008.561
assert_almost_equal(
window.loc['2016-01-26', cf_add],
125011.44,
err_msg="At beginning of window, should be FOG16's first value, "
"adjusted.")
assert_almost_equal(
window.loc['2016-02-26', cf_mul],
125241.440,
err_msg="On session with roll, should be FOH16's 24th value, "
"unadjusted.")
assert_almost_equal(
window.loc['2016-02-26', cf_add],
125241.440,
err_msg="On session with roll, should be FOH16's 24th value, "
"unadjusted.")
assert_almost_equal(
window.loc['2016-02-29', cf_mul],
125251.440,
err_msg="After roll, Should be FOH16's 25th value, unadjusted.")
assert_almost_equal(
window.loc['2016-02-29', cf_add],
125251.440,
err_msg="After roll, Should be FOH16's 25th value, unadjusted.")
# Advance the window a month.
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
Timestamp('2016-04-06', tz='UTC'),
30, '1d', 'close', 'daily')
# Unadjusted value: 115221.44
# Adjustments based on hops:
# 2016-02-25 00:00:00+00:00
# front 115231.440
# back 125231.440
# ratio: ~0.920
# difference: 10000.0
# and
# 2016-03-23 00:00:00+00:00
# front 125421.440
# back 135421.440
# ratio: ~1.080
# difference: 10000.00
assert_almost_equal(
window.loc['2016-02-24', cf_mul],
135236.905,
err_msg="At beginning of window, should be FOG16's 22nd value, "
"with two adjustments.")
assert_almost_equal(
window.loc['2016-02-24', cf_add],
135251.44,
err_msg="At beginning of window, should be FOG16's 22nd value, "
"with two adjustments")
# Unadjusted: 125241.44
assert_almost_equal(
window.loc['2016-02-26', cf_mul],
135259.442,
err_msg="On session with roll, should be FOH16's 24th value, "
"with one adjustment.")
assert_almost_equal(
window.loc['2016-02-26', cf_add],
135271.44,
err_msg="On session with roll, should be FOH16's 24th value, "
"with one adjustment.")
# Unadjusted: 125251.44
assert_almost_equal(
window.loc['2016-02-29', cf_mul],
135270.241,
err_msg="On session after roll, should be FOH16's 25th value, "
"with one adjustment.")
assert_almost_equal(
window.loc['2016-02-29', cf_add],
135281.44,
err_msg="On session after roll, should be FOH16's 25th value, "
"unadjusted.")
# Unadjusted: 135431.44
assert_almost_equal(
window.loc['2016-03-24', cf_mul],
135431.44,
err_msg="On session with roll, should be FOJ16's 43rd value, "
"unadjusted.")
assert_almost_equal(
window.loc['2016-03-24', cf_add],
135431.44,
err_msg="On session with roll, should be FOJ16's 43rd value.")
# Unadjusted: 135441.44
assert_almost_equal(
window.loc['2016-03-28', cf_mul],
135441.44,
err_msg="On session after roll, Should be FOJ16's 44th value.")
assert_almost_equal(
window.loc['2016-03-28', cf_add],
135441.44,
err_msg="On session after roll, Should be FOJ16's 44th value.")
def test_history_close_minute(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
window = self.data_portal.get_history_window(
[cf.sid],
Timestamp('2016-02-25 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'close', 'minute')
self.assertEqual(window.loc['2016-02-25 22:32', cf],
115231.412,
"Should be FOG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll.")
self.assertEqual(window.loc['2016-02-25 23:00', cf],
115231.440,
"Should be FOG16 on on minute before roll minute.")
self.assertEqual(window.loc['2016-02-25 23:01', cf],
125240.001,
"Should be FOH16 on minute after roll.")
# Advance the window a session.
window = self.data_portal.get_history_window(
[cf],
Timestamp('2016-02-28 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'close', 'minute')
self.assertEqual(window.loc['2016-02-26 22:32', cf],
125241.412,
"Should be FOH16 at beginning of window.")
self.assertEqual(window.loc['2016-02-28 23:01', cf],
125250.001,
"Should remain FOH16 on next session.")
def test_history_close_minute_adjusted(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', None)
cf_mul = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', 'mul')
cf_add = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'calendar', 'add')
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
Timestamp('2016-02-25 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'close', 'minute')
# Unadjusted: 115231.412
# Adjustment based on roll:
# 2016-02-25 23:00:00+00:00
# front: 115231.440
# back: 125231.440
# Ratio: ~0.920
# Difference: 10000.00
self.assertEqual(window.loc['2016-02-25 22:32', cf_mul],
125231.41,
"Should be FOG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll.")
self.assertEqual(window.loc['2016-02-25 22:32', cf_add],
125231.412,
"Should be FOG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll.")
# Unadjusted: 115231.44
# Should use same ratios as above.
self.assertEqual(window.loc['2016-02-25 23:00', cf_mul],
125231.44,
"Should be FOG16 on on minute before roll minute, "
"adjusted.")
self.assertEqual(window.loc['2016-02-25 23:00', cf_add],
125231.44,
"Should be FOG16 on on minute before roll minute, "
"adjusted.")
self.assertEqual(window.loc['2016-02-25 23:01', cf_mul],
125240.001,
"Should be FOH16 on minute after roll, unadjusted.")
self.assertEqual(window.loc['2016-02-25 23:01', cf_add],
125240.001,
"Should be FOH16 on minute after roll, unadjusted.")
# Advance the window a session.
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
Timestamp('2016-02-28 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'close', 'minute')
# No adjustments in this window.
self.assertEqual(window.loc['2016-02-26 22:32', cf_mul],
125241.412,
"Should be FOH16 at beginning of window.")
self.assertEqual(window.loc['2016-02-28 23:01', cf_mul],
125250.001,
"Should remain FOH16 on next session.")
def test_history_close_minute_adjusted_volume_roll(self):
cf = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'volume', None)
cf_mul = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'volume', 'mul')
cf_add = self.data_portal.asset_finder.create_continuous_future(
'FO', 0, 'volume', 'add')
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
Timestamp('2016-02-25 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'close', 'minute')
# Unadjusted: 115231.412
# Adjustment based on roll:
# 2016-02-25 23:00:00+00:00
# front: 115231.440
# back: 125231.440
# Ratio: ~0.920
# Difference: 10000.00
self.assertEqual(window.loc['2016-02-25 22:32', cf_mul],
125231.41,
"Should be FOG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll.")
self.assertEqual(window.loc['2016-02-25 22:32', cf_add],
125231.412,
"Should be FOG16 at beginning of window. A minute "
"which is in the 02-25 session, before the roll.")
# Unadjusted: 115231.44
# Should use same ratios as above.
self.assertEqual(window.loc['2016-02-25 23:00', cf_mul],
125231.44,
"Should be FOG16 on on minute before roll minute, "
"adjusted.")
self.assertEqual(window.loc['2016-02-25 23:00', cf_add],
125231.44,
"Should be FOG16 on on minute before roll minute, "
"adjusted.")
self.assertEqual(window.loc['2016-02-25 23:01', cf_mul],
125240.001,
"Should be FOH16 on minute after roll, unadjusted.")
self.assertEqual(window.loc['2016-02-25 23:01', cf_add],
125240.001,
"Should be FOH16 on minute after roll, unadjusted.")
# Advance the window a session.
window = self.data_portal.get_history_window(
[cf, cf_mul, cf_add],
Timestamp('2016-02-28 18:01', tz='US/Eastern').tz_convert('UTC'),
30, '1m', 'close', 'minute')
# No adjustments in this window.
self.assertEqual(window.loc['2016-02-26 22:32', cf_mul],
125241.412,
"Should be FOH16 at beginning of window.")
self.assertEqual(window.loc['2016-02-28 23:01', cf_mul],
125250.001,
"Should remain FOH16 on next session.")
class OrderedContractsTestCase(WithAssetFinder,
ZiplineTestCase):
@classmethod
def make_root_symbols_info(self):
return pd.DataFrame({
'root_symbol': ['FO', 'BA', 'BZ'],
'root_symbol_id': [1, 2, 3],
'exchange': ['CME', 'CME', 'CME']})
@classmethod
def make_futures_info(self):
fo_frame = DataFrame({
'root_symbol': ['FO'] * 4,
'asset_name': ['Foo'] * 4,
'symbol': ['FOF16', 'FOG16', 'FOH16', 'FOJ16'],
'sid': range(1, 5),
'start_date': pd.date_range('2015-01-01', periods=4, tz="UTC"),
'end_date': pd.date_range('2016-01-01', periods=4, tz="UTC"),
'notice_date': pd.date_range('2016-01-01', periods=4, tz="UTC"),
'expiration_date': pd.date_range(
'2016-01-01', periods=4, tz="UTC"),
'auto_close_date': pd.date_range(
'2016-01-01', periods=4, tz="UTC"),
'tick_size': [0.001] * 4,
'multiplier': [1000.0] * 4,
'exchange': ['CME'] * 4,
})
# BA is set up to test a quarterly roll, to test Eurodollar-like
# behavior
# The roll should go from BAH16 -> BAM16
ba_frame = DataFrame({
'root_symbol': ['BA'] * 3,
'asset_name': ['Bar'] * 3,
'symbol': ['BAF16', 'BAG16', 'BAH16'],
'sid': range(5, 8),
'start_date': pd.date_range('2015-01-01', periods=3, tz="UTC"),
'end_date': pd.date_range('2016-01-01', periods=3, tz="UTC"),
'notice_date': pd.date_range('2016-01-01', periods=3, tz="UTC"),
'expiration_date': pd.date_range(
'2016-01-01', periods=3, tz="UTC"),
'auto_close_date': pd.date_range(
'2016-01-01', periods=3, tz="UTC"),
'tick_size': [0.001] * 3,
'multiplier': [1000.0] * 3,
'exchange': ['CME'] * 3,
})
# BZ is set up to test the case where the first contract in a chain has
# an auto close date before its start date. It also tests the case
# where a contract in the chain has a start date after the auto close
# date of the previous contract, leaving a gap with no active contract.
bz_frame = DataFrame({
'root_symbol': ['BZ'] * 4,
'asset_name': ['Baz'] * 4,
'symbol': ['BZF15', 'BZG15', 'BZH15', 'BZJ16'],
'sid': range(8, 12),
'start_date': [
pd.Timestamp('2015-01-02', tz='UTC'),
pd.Timestamp('2015-01-03', tz='UTC'),
pd.Timestamp('2015-02-23', tz='UTC'),
pd.Timestamp('2015-02-24', tz='UTC'),
],
'end_date': pd.date_range(
'2015-02-01', periods=4, freq='MS', tz='UTC',
),
'notice_date': [
pd.Timestamp('2014-12-31', tz='UTC'),
pd.Timestamp('2015-02-18', tz='UTC'),
pd.Timestamp('2015-03-18', tz='UTC'),
pd.Timestamp('2015-04-17', tz='UTC'),
],
'expiration_date': pd.date_range(
'2015-02-01', periods=4, freq='MS', tz='UTC',
),
'auto_close_date': [
pd.Timestamp('2014-12-29', tz='UTC'),
pd.Timestamp('2015-02-16', tz='UTC'),
pd.Timestamp('2015-03-16', tz='UTC'),
pd.Timestamp('2015-04-15', tz='UTC'),
],
'tick_size': [0.001] * 4,
'multiplier': [1000.0] * 4,
'exchange': ['CME'] * 4,
})
return pd.concat([fo_frame, ba_frame, bz_frame])
def test_contract_at_offset(self):
contract_sids = array([1, 2, 3, 4], dtype=int64)
start_dates = pd.date_range('2015-01-01', periods=4, tz="UTC")
contracts = deque(self.asset_finder.retrieve_all(contract_sids))
oc = OrderedContracts('FO', contracts)
self.assertEquals(1,
oc.contract_at_offset(1, 0, start_dates[-1].value),
"Offset of 0 should return provided sid")
self.assertEquals(2,
oc.contract_at_offset(1, 1, start_dates[-1].value),
"Offset of 1 should return next sid in chain.")
self.assertEquals(None,
oc.contract_at_offset(4, 1, start_dates[-1].value),
"Offset at end of chain should not crash.")
def test_active_chain(self):
contract_sids = array([1, 2, 3, 4], dtype=int64)
contracts = deque(self.asset_finder.retrieve_all(contract_sids))
oc = OrderedContracts('FO', contracts)
# Test sid 1 as days increment, as the sessions march forward
# a contract should be added per day, until all defined contracts
# are returned.
chain = oc.active_chain(1, pd.Timestamp('2014-12-31', tz='UTC').value)
self.assertEquals([], list(chain),
"On session before first start date, no contracts "
"in chain should be active.")
chain = oc.active_chain(1, pd.Timestamp('2015-01-01', tz='UTC').value)
self.assertEquals([1], list(chain),
"[1] should be the active chain on 01-01, since all "
"other start dates occur after 01-01.")
chain = oc.active_chain(1, pd.Timestamp('2015-01-02', tz='UTC').value)
self.assertEquals([1, 2], list(chain),
"[1, 2] should be the active contracts on 01-02.")
chain = oc.active_chain(1, pd.Timestamp('2015-01-03', tz='UTC').value)
self.assertEquals([1, 2, 3], list(chain),
"[1, 2, 3] should be the active contracts on 01-03.")
chain = oc.active_chain(1, pd.Timestamp('2015-01-04', tz='UTC').value)
self.assertEquals(4, len(chain),
"[1, 2, 3, 4] should be the active contracts on "
"01-04, this is all defined contracts in the test "
"case.")
chain = oc.active_chain(1, pd.Timestamp('2015-01-05', tz='UTC').value)
self.assertEquals(4, len(chain),
"[1, 2, 3, 4] should be the active contracts on "
"01-05. This tests the case where all start dates "
"are before the query date.")
# Test querying each sid at a time when all should be alive.
chain = oc.active_chain(2, pd.Timestamp('2015-01-05', tz='UTC').value)
self.assertEquals([2, 3, 4], list(chain))
chain = oc.active_chain(3, pd.Timestamp('2015-01-05', tz='UTC').value)
self.assertEquals([3, 4], list(chain))
chain = oc.active_chain(4, pd.Timestamp('2015-01-05', tz='UTC').value)
self.assertEquals([4], list(chain))
# Test defined contract to check edge conditions.
chain = oc.active_chain(4, pd.Timestamp('2015-01-03', tz='UTC').value)
self.assertEquals([], list(chain),
"No contracts should be active, since 01-03 is "
"before 4's start date.")
chain = oc.active_chain(4, pd.Timestamp('2015-01-04', tz='UTC').value)
self.assertEquals([4], list(chain),
"[4] should be active beginning at its start date.")
def test_delivery_predicate(self):
contract_sids = range(5, 8)
contracts = deque(self.asset_finder.retrieve_all(contract_sids))
oc = OrderedContracts('BA', contracts,
chain_predicate=partial(delivery_predicate,
set(['F', 'H'])))
# Test sid 1 as days increment, as the sessions march forward
# a contract should be added per day, until all defined contracts
# are returned.
chain = oc.active_chain(5, pd.Timestamp('2015-01-05', tz='UTC').value)
self.assertEquals(
[5, 7], list(chain),
"Contract BAG16 (sid=6) should be ommitted from chain, since "
"it does not satisfy the roll predicate.")
def test_auto_close_before_start(self):
contract_sids = array([8, 9, 10, 11], dtype=int64)
contracts = self.asset_finder.retrieve_all(contract_sids)
oc = OrderedContracts('BZ', deque(contracts))
# The OrderedContracts chain should omit BZF16 and start with BZG16.
self.assertEqual(oc.start_date, contracts[1].start_date)
self.assertEqual(oc.end_date, contracts[-1].end_date)
self.assertEqual(oc.contract_before_auto_close(oc.start_date.value), 9)
# The OrderedContracts chain should end on the last contract even
# though there is a gap between the auto close date of BZG16 and the
# start date of BZH16. During this period, BZH16 should be considered
# the center contract, as a placeholder of sorts.
self.assertEqual(
oc.contract_before_auto_close(contracts[1].notice_date.value),
10,
)
self.assertEqual(
oc.contract_before_auto_close(contracts[2].start_date.value),
10,
)
class NoPrefetchContinuousFuturesTestCase(ContinuousFuturesTestCase):
DATA_PORTAL_MINUTE_HISTORY_PREFETCH = 0
DATA_PORTAL_DAILY_HISTORY_PREFETCH = 0
| apache-2.0 |
mdespriee/spark | python/setup.py | 4 | 10245 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
sys.exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
# If you are changing the versions here, please also change ./python/pyspark/sql/utils.py and
# ./python/run-tests.py. In case of Arrow, you should also check ./pom.xml.
_minimum_pandas_version = "0.19.2"
_minimum_pyarrow_version = "0.8.0"
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
except OSError:
print("Could not convert - pandoc is not installed", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.mllib.linalg',
'pyspark.mllib.stat',
'pyspark.ml',
'pyspark.ml.linalg',
'pyspark.ml.param',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.8.1'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': [
'pandas>=%s' % _minimum_pandas_version,
'pyarrow>=%s' % _minimum_pyarrow_version,
]
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
| apache-2.0 |
cython-testbed/pandas | pandas/tests/tseries/offsets/test_offsets.py | 1 | 133743 | from distutils.version import LooseVersion
from datetime import date, datetime, timedelta
import pytest
import pytz
from pandas.compat import range
from pandas import compat
import numpy as np
from pandas.compat.numpy import np_datetime64_compat
from pandas.core.series import Series
from pandas._libs.tslibs import conversion
from pandas._libs.tslibs.frequencies import (get_freq_code, get_freq_str,
INVALID_FREQ_ERR_MSG)
from pandas.tseries.frequencies import _offset_map, get_offset
from pandas.core.indexes.datetimes import (
_to_m8, DatetimeIndex, _daterange_cache)
from pandas.core.indexes.timedeltas import TimedeltaIndex
import pandas._libs.tslibs.offsets as liboffsets
from pandas._libs.tslibs.offsets import CacheableOffset
from pandas.tseries.offsets import (BDay, CDay, BQuarterEnd, BMonthEnd,
BusinessHour, WeekOfMonth, CBMonthEnd,
CustomBusinessHour,
CBMonthBegin, BYearEnd, MonthEnd,
MonthBegin, SemiMonthBegin, SemiMonthEnd,
BYearBegin, QuarterBegin, BQuarterBegin,
BMonthBegin, DateOffset, Week, YearBegin,
YearEnd, Day,
QuarterEnd, BusinessMonthEnd, FY5253,
Nano, Easter, FY5253Quarter,
LastWeekOfMonth, Tick, CalendarDay)
import pandas.tseries.offsets as offsets
from pandas.io.pickle import read_pickle
from pandas._libs.tslibs import timezones
from pandas._libs.tslib import NaT, Timestamp
from pandas._libs.tslibs.timedeltas import Timedelta
import pandas._libs.tslib as tslib
import pandas.util.testing as tm
from pandas.tseries.holiday import USFederalHolidayCalendar
from .common import assert_offset_equal, assert_onOffset
class WeekDay(object):
# TODO: Remove: This is not used outside of tests
MON = 0
TUE = 1
WED = 2
THU = 3
FRI = 4
SAT = 5
SUN = 6
####
# Misc function tests
####
def test_to_m8():
valb = datetime(2007, 10, 1)
valu = _to_m8(valb)
assert isinstance(valu, np.datetime64)
# assert valu == np.datetime64(datetime(2007,10,1))
# def test_datetime64_box():
# valu = np.datetime64(datetime(2007,10,1))
# valb = _dt_box(valu)
# assert type(valb) == datetime
# assert valb == datetime(2007,10,1)
#####
# DateOffset Tests
#####
class Base(object):
_offset = None
d = Timestamp(datetime(2008, 1, 2))
timezones = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Tokyo', 'dateutil/US/Pacific']
def _get_offset(self, klass, value=1, normalize=False):
# create instance from offset class
if klass is FY5253:
klass = klass(n=value, startingMonth=1, weekday=1,
variation='last', normalize=normalize)
elif klass is FY5253Quarter:
klass = klass(n=value, startingMonth=1, weekday=1,
qtr_with_extra_week=1, variation='last',
normalize=normalize)
elif klass is LastWeekOfMonth:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is WeekOfMonth:
klass = klass(n=value, week=1, weekday=5, normalize=normalize)
elif klass is Week:
klass = klass(n=value, weekday=5, normalize=normalize)
elif klass is DateOffset:
klass = klass(days=value, normalize=normalize)
else:
try:
klass = klass(value, normalize=normalize)
except Exception:
klass = klass(normalize=normalize)
return klass
def test_apply_out_of_range(self, tz_naive_fixture):
tz = tz_naive_fixture
if self._offset is None:
return
# try to create an out-of-bounds result timestamp; if we can't create
# the offset skip
try:
if self._offset in (BusinessHour, CustomBusinessHour):
# Using 10000 in BusinessHour fails in tz check because of DST
# difference
offset = self._get_offset(self._offset, value=100000)
else:
offset = self._get_offset(self._offset, value=10000)
result = Timestamp('20080101') + offset
assert isinstance(result, datetime)
assert result.tzinfo is None
# Check tz is preserved
t = Timestamp('20080101', tz=tz)
result = t + offset
assert isinstance(result, datetime)
assert t.tzinfo == result.tzinfo
except tslib.OutOfBoundsDatetime:
raise
except (ValueError, KeyError):
# we are creating an invalid offset
# so ignore
pass
def test_offsets_compare_equal(self):
# root cause of GH#456: __ne__ was not implemented
if self._offset is None:
return
offset1 = self._offset()
offset2 = self._offset()
assert not offset1 != offset2
assert offset1 == offset2
def test_rsub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d - self.offset2 == (-self.offset2).apply(self.d)
def test_radd(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
assert self.d + self.offset2 == self.offset2 + self.d
def test_sub(self):
if self._offset is None or not hasattr(self, "offset2"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset2 attr
return
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-2)
assert self.d - self.offset2 == self.d - (2 * off - off)
def testMult1(self):
if self._offset is None or not hasattr(self, "offset1"):
# i.e. skip for TestCommon and YQM subclasses that do not have
# offset1 attr
return
assert self.d + 10 * self.offset1 == self.d + self._offset(10)
assert self.d + 5 * self.offset1 == self.d + self._offset(5)
def testMult2(self):
if self._offset is None:
return
assert self.d + (-5 * self._offset(-10)) == self.d + self._offset(50)
assert self.d + (-3 * self._offset(-2)) == self.d + self._offset(6)
class TestCommon(Base):
# exected value created by Base._get_offset
# are applied to 2011/01/01 09:00 (Saturday)
# used for .apply and .rollforward
expecteds = {'Day': Timestamp('2011-01-02 09:00:00'),
'CalendarDay': Timestamp('2011-01-02 09:00:00'),
'DateOffset': Timestamp('2011-01-02 09:00:00'),
'BusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessDay': Timestamp('2011-01-03 09:00:00'),
'CustomBusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'CustomBusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthBegin': Timestamp('2011-02-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2011-01-03 09:00:00'),
'MonthEnd': Timestamp('2011-01-31 09:00:00'),
'SemiMonthEnd': Timestamp('2011-01-15 09:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 09:00:00'),
'BusinessMonthEnd': Timestamp('2011-01-31 09:00:00'),
'YearBegin': Timestamp('2012-01-01 09:00:00'),
'BYearBegin': Timestamp('2011-01-03 09:00:00'),
'YearEnd': Timestamp('2011-12-31 09:00:00'),
'BYearEnd': Timestamp('2011-12-30 09:00:00'),
'QuarterBegin': Timestamp('2011-03-01 09:00:00'),
'BQuarterBegin': Timestamp('2011-03-01 09:00:00'),
'QuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BQuarterEnd': Timestamp('2011-03-31 09:00:00'),
'BusinessHour': Timestamp('2011-01-03 10:00:00'),
'CustomBusinessHour': Timestamp('2011-01-03 10:00:00'),
'WeekOfMonth': Timestamp('2011-01-08 09:00:00'),
'LastWeekOfMonth': Timestamp('2011-01-29 09:00:00'),
'FY5253Quarter': Timestamp('2011-01-25 09:00:00'),
'FY5253': Timestamp('2011-01-25 09:00:00'),
'Week': Timestamp('2011-01-08 09:00:00'),
'Easter': Timestamp('2011-04-24 09:00:00'),
'Hour': Timestamp('2011-01-01 10:00:00'),
'Minute': Timestamp('2011-01-01 09:01:00'),
'Second': Timestamp('2011-01-01 09:00:01'),
'Milli': Timestamp('2011-01-01 09:00:00.001000'),
'Micro': Timestamp('2011-01-01 09:00:00.000001'),
'Nano': Timestamp(np_datetime64_compat(
'2011-01-01T09:00:00.000000001Z'))}
def test_immutable(self, offset_types):
# GH#21341 check that __setattr__ raises
offset = self._get_offset(offset_types)
with pytest.raises(AttributeError):
offset.normalize = True
with pytest.raises(AttributeError):
offset.n = 91
def test_return_type(self, offset_types):
offset = self._get_offset(offset_types)
# make sure that we are returning a Timestamp
result = Timestamp('20080101') + offset
assert isinstance(result, Timestamp)
# make sure that we are returning NaT
assert NaT + offset is NaT
assert offset + NaT is NaT
assert NaT - offset is NaT
assert (-offset).apply(NaT) is NaT
def test_offset_n(self, offset_types):
offset = self._get_offset(offset_types)
assert offset.n == 1
neg_offset = offset * -1
assert neg_offset.n == -1
mul_offset = offset * 3
assert mul_offset.n == 3
def test_offset_freqstr(self, offset_types):
offset = self._get_offset(offset_types)
freqstr = offset.freqstr
if freqstr not in ('<Easter>',
"<DateOffset: days=1>",
'LWOM-SAT', ):
code = get_offset(freqstr)
assert offset.rule_code == code
def _check_offsetfunc_works(self, offset, funcname, dt, expected,
normalize=False):
if normalize and issubclass(offset, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_s = self._get_offset(offset, normalize=normalize)
func = getattr(offset_s, funcname)
result = func(dt)
assert isinstance(result, Timestamp)
assert result == expected
result = func(Timestamp(dt))
assert isinstance(result, Timestamp)
assert result == expected
# see gh-14101
exp_warning = None
ts = Timestamp(dt) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected + Nano(5)
else:
assert result == expected
if isinstance(dt, np.datetime64):
# test tz when input is datetime or Timestamp
return
for tz in self.timezones:
expected_localize = expected.tz_localize(tz)
tz_obj = timezones.maybe_get_tz(tz)
dt_tz = conversion.localize_pydatetime(dt, tz_obj)
result = func(dt_tz)
assert isinstance(result, Timestamp)
assert result == expected_localize
result = func(Timestamp(dt, tz=tz))
assert isinstance(result, Timestamp)
assert result == expected_localize
# see gh-14101
exp_warning = None
ts = Timestamp(dt, tz=tz) + Nano(5)
if (offset_s.__class__.__name__ == 'DateOffset' and
(funcname == 'apply' or normalize) and
ts.nanosecond > 0):
exp_warning = UserWarning
# test nanosecond is preserved
with tm.assert_produces_warning(exp_warning,
check_stacklevel=False):
result = func(ts)
assert isinstance(result, Timestamp)
if normalize is False:
assert result == expected_localize + Nano(5)
else:
assert result == expected_localize
def test_apply(self, offset_types):
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = self.expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'apply', dt, expected)
expected = Timestamp(expected.date())
self._check_offsetfunc_works(offset_types, 'apply', dt, expected,
normalize=True)
def test_rollforward(self, offset_types):
expecteds = self.expecteds.copy()
# result will not be changed if the target is on the offset
no_changes = ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin',
'Week', 'Hour', 'Minute', 'Second', 'Milli', 'Micro',
'Nano', 'DateOffset', 'CalendarDay']
for n in no_changes:
expecteds[n] = Timestamp('2011/01/01 09:00')
expecteds['BusinessHour'] = Timestamp('2011-01-03 09:00:00')
expecteds['CustomBusinessHour'] = Timestamp('2011-01-03 09:00:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2011-01-02 00:00:00'),
'CalendarDay': Timestamp('2011-01-02 00:00:00'),
'DateOffset': Timestamp('2011-01-02 00:00:00'),
'MonthBegin': Timestamp('2011-02-01 00:00:00'),
'SemiMonthBegin': Timestamp('2011-01-15 00:00:00'),
'YearBegin': Timestamp('2012-01-01 00:00:00'),
'Week': Timestamp('2011-01-08 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollforward', dt,
expected, normalize=True)
def test_rollback(self, offset_types):
expecteds = {'BusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessDay': Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthEnd':
Timestamp('2010-12-31 09:00:00'),
'CustomBusinessMonthBegin':
Timestamp('2010-12-01 09:00:00'),
'BusinessMonthBegin': Timestamp('2010-12-01 09:00:00'),
'MonthEnd': Timestamp('2010-12-31 09:00:00'),
'SemiMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessMonthEnd': Timestamp('2010-12-31 09:00:00'),
'BYearBegin': Timestamp('2010-01-01 09:00:00'),
'YearEnd': Timestamp('2010-12-31 09:00:00'),
'BYearEnd': Timestamp('2010-12-31 09:00:00'),
'QuarterBegin': Timestamp('2010-12-01 09:00:00'),
'BQuarterBegin': Timestamp('2010-12-01 09:00:00'),
'QuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BQuarterEnd': Timestamp('2010-12-31 09:00:00'),
'BusinessHour': Timestamp('2010-12-31 17:00:00'),
'CustomBusinessHour': Timestamp('2010-12-31 17:00:00'),
'WeekOfMonth': Timestamp('2010-12-11 09:00:00'),
'LastWeekOfMonth': Timestamp('2010-12-25 09:00:00'),
'FY5253Quarter': Timestamp('2010-10-26 09:00:00'),
'FY5253': Timestamp('2010-01-26 09:00:00'),
'Easter': Timestamp('2010-04-04 09:00:00')}
# result will not be changed if the target is on the offset
for n in ['Day', 'MonthBegin', 'SemiMonthBegin', 'YearBegin', 'Week',
'Hour', 'Minute', 'Second', 'Milli', 'Micro', 'Nano',
'DateOffset', 'CalendarDay']:
expecteds[n] = Timestamp('2011/01/01 09:00')
# but be changed when normalize=True
norm_expected = expecteds.copy()
for k in norm_expected:
norm_expected[k] = Timestamp(norm_expected[k].date())
normalized = {'Day': Timestamp('2010-12-31 00:00:00'),
'CalendarDay': Timestamp('2010-12-31 00:00:00'),
'DateOffset': Timestamp('2010-12-31 00:00:00'),
'MonthBegin': Timestamp('2010-12-01 00:00:00'),
'SemiMonthBegin': Timestamp('2010-12-15 00:00:00'),
'YearBegin': Timestamp('2010-01-01 00:00:00'),
'Week': Timestamp('2010-12-25 00:00:00'),
'Hour': Timestamp('2011-01-01 00:00:00'),
'Minute': Timestamp('2011-01-01 00:00:00'),
'Second': Timestamp('2011-01-01 00:00:00'),
'Milli': Timestamp('2011-01-01 00:00:00'),
'Micro': Timestamp('2011-01-01 00:00:00')}
norm_expected.update(normalized)
sdt = datetime(2011, 1, 1, 9, 0)
ndt = np_datetime64_compat('2011-01-01 09:00Z')
for dt in [sdt, ndt]:
expected = expecteds[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected)
expected = norm_expected[offset_types.__name__]
self._check_offsetfunc_works(offset_types, 'rollback', dt,
expected, normalize=True)
def test_onOffset(self, offset_types):
dt = self.expecteds[offset_types.__name__]
offset_s = self._get_offset(offset_types)
assert offset_s.onOffset(dt)
# when normalize=True, onOffset checks time is 00:00:00
if issubclass(offset_types, Tick):
# normalize=True disallowed for Tick subclasses GH#21427
return
offset_n = self._get_offset(offset_types, normalize=True)
assert not offset_n.onOffset(dt)
if offset_types in (BusinessHour, CustomBusinessHour):
# In default BusinessHour (9:00-17:00), normalized time
# cannot be in business hour range
return
date = datetime(dt.year, dt.month, dt.day)
assert offset_n.onOffset(date)
def test_add(self, offset_types, tz_naive_fixture):
tz = tz_naive_fixture
dt = datetime(2011, 1, 1, 9, 0)
offset_s = self._get_offset(offset_types)
expected = self.expecteds[offset_types.__name__]
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
# normalize=True, disallowed for Tick subclasses GH#21427
if issubclass(offset_types, Tick):
return
offset_s = self._get_offset(offset_types, normalize=True)
expected = Timestamp(expected.date())
result_dt = dt + offset_s
result_ts = Timestamp(dt) + offset_s
for result in [result_dt, result_ts]:
assert isinstance(result, Timestamp)
assert result == expected
expected_localize = expected.tz_localize(tz)
result = Timestamp(dt, tz=tz) + offset_s
assert isinstance(result, Timestamp)
assert result == expected_localize
def test_pickle_v0_15_2(self, datapath):
offsets = {'DateOffset': DateOffset(years=1),
'MonthBegin': MonthBegin(1),
'Day': Day(1),
'YearBegin': YearBegin(1),
'Week': Week(1)}
pickle_path = datapath('tseries', 'offsets', 'data',
'dateoffset_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
# with open(pickle_path, 'wb') as f: pickle.dump(offsets, f)
#
tm.assert_dict_equal(offsets, read_pickle(pickle_path))
class TestDateOffset(Base):
def setup_method(self, method):
self.d = Timestamp(datetime(2008, 1, 2))
_offset_map.clear()
def test_repr(self):
repr(DateOffset())
repr(DateOffset(2))
repr(2 * DateOffset())
repr(2 * DateOffset(months=2))
def test_mul(self):
assert DateOffset(2) == 2 * DateOffset(1)
assert DateOffset(2) == DateOffset(1) * 2
def test_constructor(self):
assert ((self.d + DateOffset(months=2)) == datetime(2008, 3, 2))
assert ((self.d - DateOffset(months=2)) == datetime(2007, 11, 2))
assert ((self.d + DateOffset(2)) == datetime(2008, 1, 4))
assert not DateOffset(2).isAnchored()
assert DateOffset(1).isAnchored()
d = datetime(2008, 1, 31)
assert ((d + DateOffset(months=1)) == datetime(2008, 2, 29))
def test_copy(self):
assert (DateOffset(months=2).copy() == DateOffset(months=2))
def test_eq(self):
offset1 = DateOffset(days=1)
offset2 = DateOffset(days=365)
assert offset1 != offset2
class TestBusinessDay(Base):
_offset = BDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = BDay()
self.offset1 = self.offset
self.offset2 = BDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<BusinessDay>'
assert repr(self.offset2) == '<2 * BusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
def testRollback1(self):
assert BDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (BDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert BDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (BDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = BDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
def test_onOffset(self):
tests = [(BDay(), datetime(2008, 1, 1), True),
(BDay(), datetime(2008, 1, 5), False)]
for offset, d, expected in tests:
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * BDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * BDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((BDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + BDay(10)
assert result == datetime(2012, 11, 6)
result = dt + BDay(100) - BDay(100)
assert result == dt
off = BDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
off = BDay() * 10
rs = datetime(2014, 1, 5) + off # see #5890
xp = datetime(2014, 1, 17)
assert rs == xp
def test_apply_corner(self):
pytest.raises(TypeError, BDay().apply, BMonthEnd())
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start='20:00', end='05:00')
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30),
end=dt_time(6, 30))
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
BusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
BusinessHour(start='AAA')
with pytest.raises(ValueError):
BusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<BusinessHour: BH=09:00-17:00>'
assert repr(self.offset2) == '<3 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset3) == '<-1 * BusinessHour: BH=09:00-17:00>'
assert repr(self.offset4) == '<-4 * BusinessHours: BH=09:00-17:00>'
assert repr(self.offset5) == '<BusinessHour: BH=11:00-14:30>'
assert repr(self.offset6) == '<BusinessHour: BH=20:00-05:00>'
assert repr(self.offset7) == '<-2 * BusinessHours: BH=21:30-06:30>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert offset == offset
assert BusinessHour() != BusinessHour(-1)
assert BusinessHour(start='09:00') == BusinessHour()
assert BusinessHour(start='09:00') != BusinessHour(start='09:01')
assert (BusinessHour(start='09:00', end='17:00') !=
BusinessHour(start='17:00', end='09:01'))
def test_hash(self):
for offset in [self.offset1, self.offset2, self.offset3, self.offset4]:
assert hash(offset) == hash(offset)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
def test_sub(self):
# we have to override test_sub here becasue self.offset2 is not
# defined as self._offset(2)
off = self.offset2
with pytest.raises(Exception):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert (self.offset5.rollforward(self.d) ==
datetime(2014, 7, 1, 11, 0))
assert (self.offset6.rollforward(self.d) ==
datetime(2014, 7, 1, 20, 0))
assert (self.offset7.rollforward(self.d) ==
datetime(2014, 7, 1, 21, 30))
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((BusinessHour(normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((BusinessHour(-1, normalize=True), {
datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((BusinessHour(1, normalize=True, start='17:00',
end='04:00'), {
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('case', normalize_cases)
def test_normalize(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
on_offset_cases = []
on_offset_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='10:00', end='15:00'), {
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
on_offset_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False}))
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, cases = case
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
opening_time_cases = []
# opening time should be affected by sign of n, not by n's value and
# end
opening_time_cases.append(([BusinessHour(), BusinessHour(n=2),
BusinessHour(n=4), BusinessHour(end='10:00'),
BusinessHour(n=2, end='4:00'),
BusinessHour(n=4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 9)),
# if timestamp is on opening time, next opening time is
# as it is
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 3, 9),
datetime(2014, 7, 2, 9)),
# 2014-07-05 is saturday
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 8, 9),
datetime(2014, 7, 7, 9))}))
opening_time_cases.append(([BusinessHour(start='11:15'),
BusinessHour(n=2, start='11:15'),
BusinessHour(n=3, start='11:15'),
BusinessHour(start='11:15', end='10:00'),
BusinessHour(n=2, start='11:15', end='4:00'),
BusinessHour(n=3, start='11:15',
end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 11, 15),
datetime(2014, 6, 30, 11, 15)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 1, 11, 15)),
datetime(2014, 7, 2, 11, 15): (datetime(2014, 7, 2, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 2, 11, 15, 1): (datetime(2014, 7, 3, 11, 15),
datetime(2014, 7, 2, 11, 15)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 11, 15),
datetime(2014, 7, 3, 11, 15)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 11, 15),
datetime(2014, 7, 4, 11, 15))}))
opening_time_cases.append(([BusinessHour(-1), BusinessHour(n=-2),
BusinessHour(n=-4),
BusinessHour(n=-1, end='10:00'),
BusinessHour(n=-2, end='4:00'),
BusinessHour(n=-4, end='15:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 9)),
datetime(2014, 7, 2, 10): (datetime(2014, 7, 2, 9),
datetime(2014, 7, 3, 9)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 7, 9)),
datetime(2014, 7, 7, 9, 1): (datetime(2014, 7, 7, 9),
datetime(2014, 7, 8, 9))}))
opening_time_cases.append(([BusinessHour(start='17:00', end='05:00'),
BusinessHour(n=3, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 7, 1, 17),
datetime(2014, 6, 30, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 2, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 4, 17): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 3, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 7, 17, 1): (datetime(2014, 7, 8, 17),
datetime(2014, 7, 7, 17)), }))
opening_time_cases.append(([BusinessHour(-1, start='17:00', end='05:00'),
BusinessHour(n=-2, start='17:00',
end='03:00')], {
datetime(2014, 7, 1, 11): (datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 17)),
datetime(2014, 7, 1, 18): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 1, 23): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 8): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 9): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 2, 16, 59): (datetime(2014, 7, 1, 17),
datetime(2014, 7, 2, 17)),
datetime(2014, 7, 5, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 4, 10): (datetime(2014, 7, 3, 17),
datetime(2014, 7, 4, 17)),
datetime(2014, 7, 4, 23): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 6, 10): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 5): (datetime(2014, 7, 4, 17),
datetime(2014, 7, 7, 17)),
datetime(2014, 7, 7, 18): (datetime(2014, 7, 7, 17),
datetime(2014, 7, 8, 17))}))
@pytest.mark.parametrize('case', opening_time_cases)
def test_opening_time(self, case):
_offsets, cases = case
for offset in _offsets:
for dt, (exp_next, exp_prev) in compat.iteritems(cases):
assert offset._next_opening_time(dt) == exp_next
assert offset._prev_opening_time(dt) == exp_prev
apply_cases = []
apply_cases.append((BusinessHour(), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((BusinessHour(4), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
apply_cases.append((BusinessHour(-1), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30)}))
apply_cases.append((BusinessHour(-4), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30)}))
apply_cases.append((BusinessHour(start='13:00', end='16:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14)}))
apply_cases.append((BusinessHour(n=2, start='13:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='13:00', end='16:00'), {
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15)}))
apply_cases.append((BusinessHour(n=-3, start='10:00', end='16:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30)}))
apply_cases.append((BusinessHour(start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30)}))
apply_cases.append((BusinessHour(n=-1, start='19:00', end='05:00'), {
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
apply_large_n_cases = []
# A week later
apply_large_n_cases.append((BusinessHour(40), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30)}))
# 3 days and 1 hour before
apply_large_n_cases.append((BusinessHour(-25), {
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30)}))
# 5 days and 3 hours later
apply_large_n_cases.append((BusinessHour(28, start='21:00', end='02:00'), {
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30)}))
@pytest.mark.parametrize('case', apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = []
tests.append((BusinessHour(),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 16:00') + Nano(5),
Timestamp('2014-07-04 16:00') + Nano(5): Timestamp(
'2014-07-07 09:00') + Nano(5),
Timestamp('2014-07-04 16:00') - Nano(5): Timestamp(
'2014-07-04 17:00') - Nano(5)}))
tests.append((BusinessHour(-1),
{Timestamp('2014-07-04 15:00') + Nano(5): Timestamp(
'2014-07-04 14:00') + Nano(5),
Timestamp('2014-07-04 10:00') + Nano(5): Timestamp(
'2014-07-04 09:00') + Nano(5),
Timestamp('2014-07-04 10:00') - Nano(5): Timestamp(
'2014-07-03 17:00') - Nano(5), }))
for offset, cases in tests:
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_datetimeindex(self):
idx1 = DatetimeIndex(start='2014-07-04 15:00', end='2014-07-08 10:00',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:00', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:00', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:00', '2014-07-04 16:00',
'2014-07-07 09:00',
'2014-07-07 10:00', '2014-07-07 11:00',
'2014-07-07 12:00',
'2014-07-07 13:00', '2014-07-07 14:00',
'2014-07-07 15:00',
'2014-07-07 16:00', '2014-07-08 09:00',
'2014-07-08 10:00'],
freq='BH')
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = DatetimeIndex(start='2014-07-04 15:45', end='2014-07-08 10:45',
freq='BH')
idx2 = DatetimeIndex(start='2014-07-04 15:45', periods=12, freq='BH')
idx3 = DatetimeIndex(end='2014-07-08 10:45', periods=12, freq='BH')
expected = DatetimeIndex(['2014-07-04 15:45', '2014-07-04 16:45',
'2014-07-07 09:45',
'2014-07-07 10:45', '2014-07-07 11:45',
'2014-07-07 12:45',
'2014-07-07 13:45', '2014-07-07 14:45',
'2014-07-07 15:45',
'2014-07-07 16:45', '2014-07-08 09:45',
'2014-07-08 10:45'],
freq='BH')
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
class TestCustomBusinessHour(Base):
_offset = CustomBusinessHour
holidays = ['2014-06-27', datetime(2014, 6, 30),
np.datetime64('2014-07-02')]
def setup_method(self, method):
# 2014 Calendar to check custom holidays
# Sun Mon Tue Wed Thu Fri Sat
# 6/22 23 24 25 26 27 28
# 29 30 7/1 2 3 4 5
# 6 7 8 9 10 11 12
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = CustomBusinessHour(weekmask='Tue Wed Thu Fri')
self.offset2 = CustomBusinessHour(holidays=self.holidays)
def test_constructor_errors(self):
from datetime import time as dt_time
with pytest.raises(ValueError):
CustomBusinessHour(start=dt_time(11, 0, 5))
with pytest.raises(ValueError):
CustomBusinessHour(start='AAA')
with pytest.raises(ValueError):
CustomBusinessHour(start='14:00:05')
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == '<CustomBusinessHour: CBH=09:00-17:00>'
assert repr(self.offset2) == '<CustomBusinessHour: CBH=09:00-17:00>'
def test_with_offset(self):
expected = Timestamp('2014-07-01 13:00')
assert self.d + CustomBusinessHour() * 3 == expected
assert self.d + CustomBusinessHour(n=3) == expected
def test_eq(self):
for offset in [self.offset1, self.offset2]:
assert offset == offset
assert CustomBusinessHour() != CustomBusinessHour(-1)
assert (CustomBusinessHour(start='09:00') ==
CustomBusinessHour())
assert (CustomBusinessHour(start='09:00') !=
CustomBusinessHour(start='09:01'))
assert (CustomBusinessHour(start='09:00', end='17:00') !=
CustomBusinessHour(start='17:00', end='09:01'))
assert (CustomBusinessHour(weekmask='Tue Wed Thu Fri') !=
CustomBusinessHour(weekmask='Mon Tue Wed Thu Fri'))
assert (CustomBusinessHour(holidays=['2014-06-27']) !=
CustomBusinessHour(holidays=['2014-06-28']))
def test_sub(self):
# override the Base.test_sub implementation because self.offset2 is
# defined differently in this class than the test expects
pass
def test_hash(self):
assert hash(self.offset1) == hash(self.offset1)
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 11)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
d = datetime(2014, 7, 1, 0)
# 2014/07/01 is Tuesday, 06/30 is Monday(holiday)
assert self.offset1.rollback(d) == datetime(2014, 6, 27, 17)
# 2014/6/30 and 2014/6/27 are holidays
assert self.offset2.rollback(d) == datetime(2014, 6, 26, 17)
def testRollback2(self):
assert (self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) ==
datetime(2014, 7, 4, 17, 0))
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
def testRollforward2(self):
assert (self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) ==
datetime(2014, 7, 7, 9))
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append((
CustomBusinessHour(normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 3),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7)}))
normalize_cases.append((
CustomBusinessHour(-1, normalize=True, holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 6, 26),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 26),
datetime(2014, 7, 1, 0): datetime(2014, 6, 26),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4)}))
normalize_cases.append((
CustomBusinessHour(1, normalize=True,
start='17:00', end='04:00',
holidays=holidays),
{datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 3),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7)}))
@pytest.mark.parametrize('norm_cases', normalize_cases)
def test_normalize(self, norm_cases):
offset, cases = norm_cases
for dt, expected in compat.iteritems(cases):
assert offset.apply(dt) == expected
def test_onOffset(self):
tests = []
tests.append((CustomBusinessHour(start='10:00', end='15:00',
holidays=self.holidays),
{datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False}))
for offset, cases in tests:
for dt, expected in compat.iteritems(cases):
assert offset.onOffset(dt) == expected
apply_cases = []
apply_cases.append((
CustomBusinessHour(holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30)}))
apply_cases.append((
CustomBusinessHour(4, holidays=holidays),
{datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30)}))
@pytest.mark.parametrize('apply_case', apply_cases)
def test_apply(self, apply_case):
offset, cases = apply_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
nano_cases = []
nano_cases.append(
(CustomBusinessHour(holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 16:00') + Nano(5),
Timestamp('2014-07-01 16:00') + Nano(5):
Timestamp('2014-07-03 09:00') + Nano(5),
Timestamp('2014-07-01 16:00') - Nano(5):
Timestamp('2014-07-01 17:00') - Nano(5)}))
nano_cases.append(
(CustomBusinessHour(-1, holidays=holidays),
{Timestamp('2014-07-01 15:00') + Nano(5):
Timestamp('2014-07-01 14:00') + Nano(5),
Timestamp('2014-07-01 10:00') + Nano(5):
Timestamp('2014-07-01 09:00') + Nano(5),
Timestamp('2014-07-01 10:00') - Nano(5):
Timestamp('2014-06-26 17:00') - Nano(5)}))
@pytest.mark.parametrize('nano_case', nano_cases)
def test_apply_nanoseconds(self, nano_case):
offset, cases = nano_case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
class TestCustomBusinessDay(Base):
_offset = CDay
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.nd = np_datetime64_compat('2008-01-01 00:00:00Z')
self.offset = CDay()
self.offset1 = self.offset
self.offset2 = CDay(2)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessDay>'
assert repr(self.offset2) == '<2 * CustomBusinessDays>'
if compat.PY37:
expected = '<BusinessDay: offset=datetime.timedelta(days=1)>'
else:
expected = '<BusinessDay: offset=datetime.timedelta(1)>'
assert repr(self.offset + timedelta(1)) == expected
def test_with_offset(self):
offset = self.offset + timedelta(hours=2)
assert (self.d + offset) == datetime(2008, 1, 2, 2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_call(self):
assert self.offset2(self.d) == datetime(2008, 1, 3)
assert self.offset2(self.nd) == datetime(2008, 1, 3)
def testRollback1(self):
assert CDay(10).rollback(self.d) == self.d
def testRollback2(self):
assert (CDay(10).rollback(datetime(2008, 1, 5)) ==
datetime(2008, 1, 4))
def testRollforward1(self):
assert CDay(10).rollforward(self.d) == self.d
def testRollforward2(self):
assert (CDay(10).rollforward(datetime(2008, 1, 5)) ==
datetime(2008, 1, 7))
def test_roll_date_object(self):
offset = CDay()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 14)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 17)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CDay(), datetime(2008, 1, 1), True),
(CDay(), datetime(2008, 1, 5), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 2),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 8)}))
apply_cases.append((2 * CDay(), {
datetime(2008, 1, 1): datetime(2008, 1, 3),
datetime(2008, 1, 4): datetime(2008, 1, 8),
datetime(2008, 1, 5): datetime(2008, 1, 8),
datetime(2008, 1, 6): datetime(2008, 1, 8),
datetime(2008, 1, 7): datetime(2008, 1, 9)}))
apply_cases.append((-CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 3),
datetime(2008, 1, 5): datetime(2008, 1, 4),
datetime(2008, 1, 6): datetime(2008, 1, 4),
datetime(2008, 1, 7): datetime(2008, 1, 4),
datetime(2008, 1, 8): datetime(2008, 1, 7)}))
apply_cases.append((-2 * CDay(), {
datetime(2008, 1, 1): datetime(2007, 12, 28),
datetime(2008, 1, 4): datetime(2008, 1, 2),
datetime(2008, 1, 5): datetime(2008, 1, 3),
datetime(2008, 1, 6): datetime(2008, 1, 3),
datetime(2008, 1, 7): datetime(2008, 1, 3),
datetime(2008, 1, 8): datetime(2008, 1, 4),
datetime(2008, 1, 9): datetime(2008, 1, 7)}))
apply_cases.append((CDay(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 4): datetime(2008, 1, 4),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CDay(10)
assert result == datetime(2012, 11, 6)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CDay() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 12, 23)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2011, 12, 26)
assert rs == xp
def test_apply_corner(self):
pytest.raises(Exception, CDay().apply, BMonthEnd())
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
tday = CDay(holidays=holidays)
for year in range(2012, 2015):
dt = datetime(year, 4, 30)
xp = datetime(year, 5, 2)
rs = dt + tday
assert rs == xp
def test_weekmask(self):
weekmask_saudi = 'Sat Sun Mon Tue Wed' # Thu-Fri Weekend
weekmask_uae = '1111001' # Fri-Sat Weekend
weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend
bday_saudi = CDay(weekmask=weekmask_saudi)
bday_uae = CDay(weekmask=weekmask_uae)
bday_egypt = CDay(weekmask=weekmask_egypt)
dt = datetime(2013, 5, 1)
xp_saudi = datetime(2013, 5, 4)
xp_uae = datetime(2013, 5, 2)
xp_egypt = datetime(2013, 5, 2)
assert xp_saudi == dt + bday_saudi
assert xp_uae == dt + bday_uae
assert xp_egypt == dt + bday_egypt
xp2 = datetime(2013, 5, 5)
assert xp2 == dt + 2 * bday_saudi
assert xp2 == dt + 2 * bday_uae
assert xp2 == dt + 2 * bday_egypt
def test_weekmask_and_holidays(self):
weekmask_egypt = 'Sun Mon Tue Wed Thu' # Fri-Sat Weekend
holidays = ['2012-05-01', datetime(2013, 5, 1),
np.datetime64('2014-05-01')]
bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)
dt = datetime(2013, 4, 30)
xp_egypt = datetime(2013, 5, 5)
assert xp_egypt == dt + 2 * bday_egypt
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_calendar(self):
calendar = USFederalHolidayCalendar()
dt = datetime(2014, 1, 17)
assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self.offset)
_check_roundtrip(self.offset2)
_check_roundtrip(self.offset * 2)
def test_pickle_compat_0_14_1(self, datapath):
hdays = [datetime(2013, 1, 1) for ele in range(4)]
pth = datapath('tseries', 'offsets', 'data', 'cday-0.14.1.pickle')
cday0_14_1 = read_pickle(pth)
cday = CDay(holidays=hdays)
assert cday == cday0_14_1
class CustomBusinessMonthBase(object):
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask='Mon Wed Fri')
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthEnd>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthEnds>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, d, expected = case
assert_onOffset(offset, d, expected)
apply_cases = []
apply_cases.append((CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
apply_cases.append((2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31)}))
apply_cases.append((-CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31)}))
apply_cases.append((-2 * CBMonthEnd(), {
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31)}))
apply_cases.append((CBMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-01-31', datetime(2012, 2, 28),
np.datetime64('2012-02-29')]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=freq).tolist()[0] == datetime(2012, 1, 31))
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` doesnt match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == '<CustomBusinessMonthBegin>'
assert repr(self.offset2) == '<2 * CustomBusinessMonthBegins>'
def testCall(self):
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert (CDay(10).rollback(datetime(2007, 12, 31)) ==
datetime(2007, 12, 31))
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
offset, dt, expected = case
assert_onOffset(offset, dt, expected)
apply_cases = []
apply_cases.append((CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3)}))
apply_cases.append((2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1)}))
apply_cases.append((-CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1)}))
apply_cases.append((-2 * CBMonthBegin(), {
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1)}))
apply_cases.append((CBMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1)}))
@pytest.mark.parametrize('case', apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ['2012-02-01', datetime(2012, 2, 2),
np.datetime64('2012-03-01')]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert (DatetimeIndex(start='20120101', end='20130101',
freq=cbmb).tolist()[0] == datetime(2012, 1, 3))
class TestWeek(Base):
_offset = Week
d = Timestamp(datetime(2008, 1, 2))
offset1 = _offset()
offset2 = _offset(2)
def test_repr(self):
assert repr(Week(weekday=0)) == "<Week: weekday=0>"
assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"
assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"
def test_corner(self):
with pytest.raises(ValueError):
Week(weekday=7)
with pytest.raises(ValueError, match="Day must be"):
Week(weekday=-1)
def test_isAnchored(self):
assert Week(weekday=0).isAnchored()
assert not Week().isAnchored()
assert not Week(2, weekday=2).isAnchored()
assert not Week(2).isAnchored()
offset_cases = []
# not business week
offset_cases.append((Week(), {
datetime(2008, 1, 1): datetime(2008, 1, 8),
datetime(2008, 1, 4): datetime(2008, 1, 11),
datetime(2008, 1, 5): datetime(2008, 1, 12),
datetime(2008, 1, 6): datetime(2008, 1, 13),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# Mon
offset_cases.append((Week(weekday=0), {
datetime(2007, 12, 31): datetime(2008, 1, 7),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 14)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(0, weekday=0), {
datetime(2007, 12, 31): datetime(2007, 12, 31),
datetime(2008, 1, 4): datetime(2008, 1, 7),
datetime(2008, 1, 5): datetime(2008, 1, 7),
datetime(2008, 1, 6): datetime(2008, 1, 7),
datetime(2008, 1, 7): datetime(2008, 1, 7)}))
# n=0 -> roll forward. Mon
offset_cases.append((Week(-2, weekday=1), {
datetime(2010, 4, 6): datetime(2010, 3, 23),
datetime(2010, 4, 8): datetime(2010, 3, 30),
datetime(2010, 4, 5): datetime(2010, 3, 23)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('weekday', range(7))
def test_onOffset(self, weekday):
offset = Week(weekday=weekday)
for day in range(1, 8):
date = datetime(2008, 1, day)
if day % 7 == weekday:
expected = True
else:
expected = False
assert_onOffset(offset, date, expected)
class TestWeekOfMonth(Base):
_offset = WeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=4, weekday=0)
with pytest.raises(ValueError, match="^Week"):
WeekOfMonth(n=1, week=-1, weekday=0)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
WeekOfMonth(n=1, week=0, weekday=-7)
def test_repr(self):
assert (repr(WeekOfMonth(weekday=1, week=2)) ==
"<WeekOfMonth: week=2, weekday=1>")
def test_offset(self):
date1 = datetime(2011, 1, 4) # 1st Tuesday of Month
date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month
date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month
date4 = datetime(2011, 1, 25) # 4th Tuesday of Month
# see for loop for structure
test_cases = [
(-2, 2, 1, date1, datetime(2010, 11, 16)),
(-2, 2, 1, date2, datetime(2010, 11, 16)),
(-2, 2, 1, date3, datetime(2010, 11, 16)),
(-2, 2, 1, date4, datetime(2010, 12, 21)),
(-1, 2, 1, date1, datetime(2010, 12, 21)),
(-1, 2, 1, date2, datetime(2010, 12, 21)),
(-1, 2, 1, date3, datetime(2010, 12, 21)),
(-1, 2, 1, date4, datetime(2011, 1, 18)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 0, 1, date2, datetime(2011, 2, 1)),
(0, 0, 1, date3, datetime(2011, 2, 1)),
(0, 0, 1, date4, datetime(2011, 2, 1)),
(0, 1, 1, date1, datetime(2011, 1, 11)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 1, 1, date3, datetime(2011, 2, 8)),
(0, 1, 1, date4, datetime(2011, 2, 8)),
(0, 0, 1, date1, datetime(2011, 1, 4)),
(0, 1, 1, date2, datetime(2011, 1, 11)),
(0, 2, 1, date3, datetime(2011, 1, 18)),
(0, 3, 1, date4, datetime(2011, 1, 25)),
(1, 0, 0, date1, datetime(2011, 2, 7)),
(1, 0, 0, date2, datetime(2011, 2, 7)),
(1, 0, 0, date3, datetime(2011, 2, 7)),
(1, 0, 0, date4, datetime(2011, 2, 7)),
(1, 0, 1, date1, datetime(2011, 2, 1)),
(1, 0, 1, date2, datetime(2011, 2, 1)),
(1, 0, 1, date3, datetime(2011, 2, 1)),
(1, 0, 1, date4, datetime(2011, 2, 1)),
(1, 0, 2, date1, datetime(2011, 1, 5)),
(1, 0, 2, date2, datetime(2011, 2, 2)),
(1, 0, 2, date3, datetime(2011, 2, 2)),
(1, 0, 2, date4, datetime(2011, 2, 2)),
(1, 2, 1, date1, datetime(2011, 1, 18)),
(1, 2, 1, date2, datetime(2011, 1, 18)),
(1, 2, 1, date3, datetime(2011, 2, 15)),
(1, 2, 1, date4, datetime(2011, 2, 15)),
(2, 2, 1, date1, datetime(2011, 2, 15)),
(2, 2, 1, date2, datetime(2011, 2, 15)),
(2, 2, 1, date3, datetime(2011, 3, 15)),
(2, 2, 1, date4, datetime(2011, 3, 15))]
for n, week, weekday, dt, expected in test_cases:
offset = WeekOfMonth(n, week=week, weekday=weekday)
assert_offset_equal(offset, dt, expected)
# try subtracting
result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)
assert result == datetime(2011, 1, 12)
result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)
assert result == datetime(2011, 2, 2)
on_offset_cases = [(0, 0, datetime(2011, 2, 7), True),
(0, 0, datetime(2011, 2, 6), False),
(0, 0, datetime(2011, 2, 14), False),
(1, 0, datetime(2011, 2, 14), True),
(0, 1, datetime(2011, 2, 1), True),
(0, 1, datetime(2011, 2, 8), False)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
week, weekday, dt, expected = case
offset = WeekOfMonth(week=week, weekday=weekday)
assert offset.onOffset(dt) == expected
class TestLastWeekOfMonth(Base):
_offset = LastWeekOfMonth
offset1 = _offset()
offset2 = _offset(2)
def test_constructor(self):
with pytest.raises(ValueError, match="^N cannot be 0"):
LastWeekOfMonth(n=0, weekday=1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=-1)
with pytest.raises(ValueError, match="^Day"):
LastWeekOfMonth(n=1, weekday=7)
def test_offset(self):
# Saturday
last_sat = datetime(2013, 8, 31)
next_sat = datetime(2013, 9, 28)
offset_sat = LastWeekOfMonth(n=1, weekday=5)
one_day_before = (last_sat + timedelta(days=-1))
assert one_day_before + offset_sat == last_sat
one_day_after = (last_sat + timedelta(days=+1))
assert one_day_after + offset_sat == next_sat
# Test On that day
assert last_sat + offset_sat == next_sat
# Thursday
offset_thur = LastWeekOfMonth(n=1, weekday=3)
last_thurs = datetime(2013, 1, 31)
next_thurs = datetime(2013, 2, 28)
one_day_before = last_thurs + timedelta(days=-1)
assert one_day_before + offset_thur == last_thurs
one_day_after = last_thurs + timedelta(days=+1)
assert one_day_after + offset_thur == next_thurs
# Test on that day
assert last_thurs + offset_thur == next_thurs
three_before = last_thurs + timedelta(days=-3)
assert three_before + offset_thur == last_thurs
two_after = last_thurs + timedelta(days=+2)
assert two_after + offset_thur == next_thurs
offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)
assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)
on_offset_cases = [
(WeekDay.SUN, datetime(2013, 1, 27), True),
(WeekDay.SAT, datetime(2013, 3, 30), True),
(WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon
(WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN
(WeekDay.MON, datetime(2013, 2, 25), True),
(WeekDay.SAT, datetime(2013, 11, 30), True),
(WeekDay.SAT, datetime(2006, 8, 26), True),
(WeekDay.SAT, datetime(2007, 8, 25), True),
(WeekDay.SAT, datetime(2008, 8, 30), True),
(WeekDay.SAT, datetime(2009, 8, 29), True),
(WeekDay.SAT, datetime(2010, 8, 28), True),
(WeekDay.SAT, datetime(2011, 8, 27), True),
(WeekDay.SAT, datetime(2019, 8, 31), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
weekday, dt, expected = case
offset = LastWeekOfMonth(weekday=weekday)
assert offset.onOffset(dt) == expected
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthEnd().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SM')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthEnd(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthEnd(0), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15)}))
offset_cases.append((SemiMonthEnd(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16)}))
offset_cases.append((SemiMonthEnd(2), {
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30)}))
offset_cases.append((SemiMonthEnd(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31)}))
offset_cases.append((SemiMonthEnd(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15))
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
result = SemiMonthBegin().apply_index(s)
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = DatetimeIndex(start=dates[0], end=dates[-1], freq='SMS')
exp = DatetimeIndex(dates)
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append((SemiMonthBegin(), {
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(day_of_month=20), {
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20)}))
offset_cases.append((SemiMonthBegin(0), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(0, day_of_month=16), {
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1)}))
offset_cases.append((SemiMonthBegin(2), {
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1)}))
offset_cases.append((SemiMonthBegin(-1), {
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15)}))
offset_cases.append((SemiMonthBegin(-1, day_of_month=4), {
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4)}))
offset_cases.append((SemiMonthBegin(-2), {
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1)}))
@pytest.mark.parametrize('case', offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in compat.iteritems(cases):
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize('case', offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
result = offset.apply_index(s)
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True)]
@pytest.mark.parametrize('case', on_offset_cases)
def test_onOffset(self, case):
dt, expected = case
assert_onOffset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize('klass', [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-02-01 00:15:00', tz='US/Central'),
Timestamp('2000-03-01', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass([Timestamp('2000-01-01 00:15:00', tz='US/Central'),
Timestamp('2000-02-01', tz='US/Central')], name='a')
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
Timestamp('2000-02-15', tz='US/Central')], name='a')
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
def test_Easter():
assert_offset_equal(Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24))
assert_offset_equal(Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24))
assert_offset_equal(Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8))
assert_offset_equal(-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4))
assert_offset_equal(-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4))
assert_offset_equal(-Easter(2),
datetime(2011, 1, 1),
datetime(2009, 4, 12))
assert_offset_equal(-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12))
assert_offset_equal(-Easter(2),
datetime(2010, 4, 4),
datetime(2008, 3, 23))
class TestOffsetNames(object):
def test_get_offset_name(self):
assert BDay().freqstr == 'B'
assert BDay(2).freqstr == '2B'
assert BMonthEnd().freqstr == 'BM'
assert Week(weekday=0).freqstr == 'W-MON'
assert Week(weekday=1).freqstr == 'W-TUE'
assert Week(weekday=2).freqstr == 'W-WED'
assert Week(weekday=3).freqstr == 'W-THU'
assert Week(weekday=4).freqstr == 'W-FRI'
assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"
def test_get_offset():
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('gibberish')
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset('QS-JAN-B')
pairs = [
('B', BDay()), ('b', BDay()), ('bm', BMonthEnd()),
('Bm', BMonthEnd()), ('W-MON', Week(weekday=0)),
('W-TUE', Week(weekday=1)), ('W-WED', Week(weekday=2)),
('W-THU', Week(weekday=3)), ('W-FRI', Week(weekday=4))]
for name, expected in pairs:
offset = get_offset(name)
assert offset == expected, ("Expected %r to yield %r (actual: %r)" %
(name, expected, offset))
def test_get_offset_legacy():
pairs = [('w@Sat', Week(weekday=5))]
for name, expected in pairs:
with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
get_offset(name)
class TestOffsetAliases(object):
def setup_method(self, method):
_offset_map.clear()
def test_alias_equality(self):
for k, v in compat.iteritems(_offset_map):
if v is None:
continue
assert k == v.copy()
def test_rule_code(self):
lst = ['M', 'MS', 'BM', 'BMS', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
assert k == get_offset(k).rule_code
# should be cached - this is kind of an internals test...
assert k in _offset_map
assert k == (get_offset(k) * 3).rule_code
suffix_lst = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
base = 'W'
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
suffix_lst = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG',
'SEP', 'OCT', 'NOV', 'DEC']
base_lst = ['A', 'AS', 'BA', 'BAS', 'Q', 'QS', 'BQ', 'BQS']
for base in base_lst:
for v in suffix_lst:
alias = '-'.join([base, v])
assert alias == get_offset(alias).rule_code
assert alias == (get_offset(alias) * 5).rule_code
lst = ['M', 'D', 'B', 'H', 'T', 'S', 'L', 'U']
for k in lst:
code, stride = get_freq_code('3' + k)
assert isinstance(code, int)
assert stride == 3
assert k == get_freq_str(code)
def test_dateoffset_misc():
oset = offsets.DateOffset(months=2, days=4)
# it works
oset.freqstr
assert (not offsets.DateOffset(months=2) == 2)
def test_freq_offsets():
off = BDay(1, offset=timedelta(0, 1800))
assert (off.freqstr == 'B+30Min')
off = BDay(1, offset=timedelta(0, -1800))
assert (off.freqstr == 'B-30Min')
def get_all_subclasses(cls):
ret = set()
this_subclasses = cls.__subclasses__()
ret = ret | set(this_subclasses)
for this_subclass in this_subclasses:
ret | get_all_subclasses(this_subclass)
return ret
class TestCaching(object):
# as of GH 6479 (in 0.14.0), offset caching is turned off
# as of v0.12.0 only BusinessMonth/Quarter were actually caching
def setup_method(self, method):
_daterange_cache.clear()
_offset_map.clear()
def run_X_index_creation(self, cls):
inst1 = cls()
if not inst1.isAnchored():
assert not inst1._should_cache(), cls
return
assert inst1._should_cache(), cls
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=inst1, normalize=True)
assert cls() in _daterange_cache, cls
def test_should_cache_month_end(self):
assert not MonthEnd()._should_cache()
def test_should_cache_bmonth_end(self):
assert not BusinessMonthEnd()._should_cache()
def test_should_cache_week_month(self):
assert not WeekOfMonth(weekday=1, week=2)._should_cache()
def test_all_cacheableoffsets(self):
for subclass in get_all_subclasses(CacheableOffset):
if subclass.__name__[0] == "_" \
or subclass in TestCaching.no_simple_ctr:
continue
self.run_X_index_creation(subclass)
def test_month_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 31),
freq=MonthEnd(), normalize=True)
assert not MonthEnd() in _daterange_cache
def test_bmonth_end_index_creation(self):
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=BusinessMonthEnd(), normalize=True)
assert not BusinessMonthEnd() in _daterange_cache
def test_week_of_month_index_creation(self):
inst1 = WeekOfMonth(weekday=1, week=2)
DatetimeIndex(start=datetime(2013, 1, 31), end=datetime(2013, 3, 29),
freq=inst1, normalize=True)
inst2 = WeekOfMonth(weekday=1, week=2)
assert inst2 not in _daterange_cache
class TestReprNames(object):
def test_str_for_named_is_name(self):
# look at all the amazing combinations!
month_prefixes = ['A', 'AS', 'BA', 'BAS', 'Q', 'BQ', 'BQS', 'QS']
names = [prefix + '-' + month
for prefix in month_prefixes
for month in ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL',
'AUG', 'SEP', 'OCT', 'NOV', 'DEC']]
days = ['MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT', 'SUN']
names += ['W-' + day for day in days]
names += ['WOM-' + week + day
for week in ('1', '2', '3', '4') for day in days]
_offset_map.clear()
for name in names:
offset = get_offset(name)
assert offset.freqstr == name
def get_utc_offset_hours(ts):
# take a Timestamp and compute total hours of utc offset
o = ts.utcoffset()
return (o.days * 24 * 3600 + o.seconds) / 3600.0
class TestDST(object):
"""
test DateOffset additions over Daylight Savings Time
"""
# one microsecond before the DST transition
ts_pre_fallback = "2013-11-03 01:59:59.999999"
ts_pre_springfwd = "2013-03-10 01:59:59.999999"
# test both basic names and dateutil timezones
timezone_utc_offsets = {
'US/Eastern': dict(utc_offset_daylight=-4,
utc_offset_standard=-5, ),
'dateutil/US/Pacific': dict(utc_offset_daylight=-7,
utc_offset_standard=-8, )
}
valid_date_offsets_singular = [
'weekday', 'day', 'hour', 'minute', 'second', 'microsecond'
]
valid_date_offsets_plural = [
'weeks', 'days',
'hours', 'minutes', 'seconds',
'milliseconds', 'microseconds'
]
def _test_all_offsets(self, n, **kwds):
valid_offsets = self.valid_date_offsets_plural if n > 1 \
else self.valid_date_offsets_singular
for name in valid_offsets:
self._test_offset(offset_name=name, offset_n=n, **kwds)
def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):
offset = DateOffset(**{offset_name: offset_n})
t = tstart + offset
if expected_utc_offset is not None:
assert get_utc_offset_hours(t) == expected_utc_offset
if offset_name == 'weeks':
# dates should match
assert t.date() == timedelta(days=7 * offset.kwds[
'weeks']) + tstart.date()
# expect the same day of week, hour of day, minute, second, ...
assert (t.dayofweek == tstart.dayofweek and
t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name == 'days':
# dates should match
assert timedelta(offset.kwds['days']) + tstart.date() == t.date()
# expect the same hour of day, minute, second, ...
assert (t.hour == tstart.hour and
t.minute == tstart.minute and
t.second == tstart.second)
elif offset_name in self.valid_date_offsets_singular:
# expect the singular offset value to match between tstart and t
datepart_offset = getattr(t, offset_name
if offset_name != 'weekday' else
'dayofweek')
assert datepart_offset == offset.kwds[offset_name]
else:
# the offset should be the same as if it was done in UTC
assert (t == (tstart.tz_convert('UTC') + offset)
.tz_convert('US/Pacific'))
def _make_timestamp(self, string, hrs_offset, tz):
if hrs_offset >= 0:
offset_string = '{hrs:02d}00'.format(hrs=hrs_offset)
else:
offset_string = '-{hrs:02d}00'.format(hrs=-1 * hrs_offset)
return Timestamp(string + offset_string).tz_convert(tz)
def test_fallback_plural(self):
# test moving from daylight savings to standard time
import dateutil
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_daylight']
hrs_post = utc_offsets['utc_offset_standard']
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# buggy ambiguous behavior in 2.6.0
# GH 14621
# https://github.com/dateutil/dateutil/issues/321
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_fallback,
hrs_pre, tz),
expected_utc_offset=hrs_post)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed, but skip the test
continue
def test_springforward_plural(self):
# test moving from standard to daylight savings
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
hrs_post = utc_offsets['utc_offset_daylight']
self._test_all_offsets(
n=3, tstart=self._make_timestamp(self.ts_pre_springfwd,
hrs_pre, tz),
expected_utc_offset=hrs_post)
def test_fallback_singular(self):
# in the case of singular offsets, we don't necessarily know which utc
# offset the new Timestamp will wind up in (the tz for 1 month may be
# different from 1 second) so we don't specify an expected_utc_offset
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_fallback, hrs_pre, tz), expected_utc_offset=None)
def test_springforward_singular(self):
for tz, utc_offsets in self.timezone_utc_offsets.items():
hrs_pre = utc_offsets['utc_offset_standard']
self._test_all_offsets(n=1, tstart=self._make_timestamp(
self.ts_pre_springfwd, hrs_pre, tz), expected_utc_offset=None)
offset_classes = {MonthBegin: ['11/2/2012', '12/1/2012'],
MonthEnd: ['11/2/2012', '11/30/2012'],
BMonthBegin: ['11/2/2012', '12/3/2012'],
BMonthEnd: ['11/2/2012', '11/30/2012'],
CBMonthBegin: ['11/2/2012', '12/3/2012'],
CBMonthEnd: ['11/2/2012', '11/30/2012'],
SemiMonthBegin: ['11/2/2012', '11/15/2012'],
SemiMonthEnd: ['11/2/2012', '11/15/2012'],
Week: ['11/2/2012', '11/9/2012'],
YearBegin: ['11/2/2012', '1/1/2013'],
YearEnd: ['11/2/2012', '12/31/2012'],
BYearBegin: ['11/2/2012', '1/1/2013'],
BYearEnd: ['11/2/2012', '12/31/2012'],
QuarterBegin: ['11/2/2012', '12/1/2012'],
QuarterEnd: ['11/2/2012', '12/31/2012'],
BQuarterBegin: ['11/2/2012', '12/3/2012'],
BQuarterEnd: ['11/2/2012', '12/31/2012'],
Day: ['11/4/2012', '11/4/2012 23:00']}.items()
@pytest.mark.parametrize('tup', offset_classes)
def test_all_offset_classes(self, tup):
offset, test_values = tup
first = Timestamp(test_values[0], tz='US/Eastern') + offset()
second = Timestamp(test_values[1], tz='US/Eastern')
assert first == second
# ---------------------------------------------------------------------
def test_get_offset_day_error():
# subclass of _BaseOffset must override _day_opt attribute, or we should
# get a NotImplementedError
with pytest.raises(NotImplementedError):
DateOffset()._get_offset_day(datetime.now())
def test_valid_default_arguments(offset_types):
# GH#19142 check that the calling the constructors without passing
# any keyword arguments produce valid offsets
cls = offset_types
cls()
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_month_attributes(kwd, month_classes):
# GH#18226
cls = month_classes
# check that we cannot create e.g. MonthEnd(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_relativedelta_kwargs(kwd):
# Check that all the arguments specified in liboffsets.relativedelta_kwds
# are in fact valid relativedelta keyword args
DateOffset(**{kwd: 1})
@pytest.mark.parametrize('kwd', sorted(list(liboffsets.relativedelta_kwds)))
def test_valid_tick_attributes(kwd, tick_classes):
# GH#18226
cls = tick_classes
# check that we cannot create e.g. Hour(weeks=3)
with pytest.raises(TypeError):
cls(**{kwd: 3})
def test_validate_n_error():
with pytest.raises(TypeError):
DateOffset(n='Doh!')
with pytest.raises(TypeError):
MonthBegin(n=timedelta(1))
with pytest.raises(TypeError):
BDay(n=np.array([1, 2], dtype=np.int64))
def test_require_integers(offset_types):
cls = offset_types
with pytest.raises(ValueError):
cls(n=1.5)
def test_tick_normalize_raises(tick_classes):
# check that trying to create a Tick object with normalize=True raises
# GH#21427
cls = tick_classes
with pytest.raises(ValueError):
cls(n=3, normalize=True)
def test_weeks_onoffset():
# GH#18510 Week with weekday = None, normalize = False should always
# be onOffset
offset = Week(n=2, weekday=None)
ts = Timestamp('1862-01-13 09:03:34.873477378+0210', tz='Africa/Lusaka')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = Week(n=2, weekday=None)
ts = Timestamp('1856-10-24 16:18:36.556360110-0717', tz='Pacific/Easter')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_weekofmonth_onoffset():
# GH#18864
# Make sure that nanoseconds don't trip up onOffset (and with it apply)
offset = WeekOfMonth(n=2, week=2, weekday=0)
ts = Timestamp('1916-05-15 01:14:49.583410462+0422', tz='Asia/Qyzylorda')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
# negative n
offset = WeekOfMonth(n=-3, week=1, weekday=0)
ts = Timestamp('1980-12-08 03:38:52.878321185+0500', tz='Asia/Oral')
fast = offset.onOffset(ts)
slow = (ts + offset) - offset == ts
assert fast == slow
def test_last_week_of_month_on_offset():
# GH#19036, GH#18977 _adjust_dst was incorrect for LastWeekOfMonth
offset = LastWeekOfMonth(n=4, weekday=6)
ts = Timestamp('1917-05-27 20:55:27.084284178+0200',
tz='Europe/Warsaw')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
# negative n
offset = LastWeekOfMonth(n=-4, weekday=5)
ts = Timestamp('2005-08-27 05:01:42.799392561-0500',
tz='America/Rainy_River')
slow = (ts + offset) - offset == ts
fast = offset.onOffset(ts)
assert fast == slow
class TestCalendarDay(object):
def test_add_across_dst_scalar(self):
# GH 22274
ts = Timestamp('2016-10-30 00:00:00+0300', tz='Europe/Helsinki')
expected = Timestamp('2016-10-31 00:00:00+0200', tz='Europe/Helsinki')
result = ts + CalendarDay(1)
assert result == expected
result = result - CalendarDay(1)
assert result == ts
@pytest.mark.parametrize('box', [DatetimeIndex, Series])
def test_add_across_dst_array(self, box):
# GH 22274
ts = Timestamp('2016-10-30 00:00:00+0300', tz='Europe/Helsinki')
expected = Timestamp('2016-10-31 00:00:00+0200', tz='Europe/Helsinki')
arr = box([ts])
expected = box([expected])
result = arr + CalendarDay(1)
tm.assert_equal(result, expected)
result = result - CalendarDay(1)
tm.assert_equal(arr, result)
@pytest.mark.parametrize('arg', [
Timestamp("2018-11-03 01:00:00", tz='US/Pacific'),
DatetimeIndex([Timestamp("2018-11-03 01:00:00", tz='US/Pacific')])
])
def test_raises_AmbiguousTimeError(self, arg):
# GH 22274
with pytest.raises(pytz.AmbiguousTimeError):
arg + CalendarDay(1)
@pytest.mark.parametrize('arg', [
Timestamp("2019-03-09 02:00:00", tz='US/Pacific'),
DatetimeIndex([Timestamp("2019-03-09 02:00:00", tz='US/Pacific')])
])
def test_raises_NonExistentTimeError(self, arg):
# GH 22274
with pytest.raises(pytz.NonExistentTimeError):
arg + CalendarDay(1)
@pytest.mark.parametrize('arg, exp', [
[1, 2],
[-1, 0],
[-5, -4]
])
def test_arithmetic(self, arg, exp):
# GH 22274
result = CalendarDay(1) + CalendarDay(arg)
expected = CalendarDay(exp)
assert result == expected
@pytest.mark.parametrize('arg', [
timedelta(1),
Day(1),
Timedelta(1),
TimedeltaIndex([timedelta(1)])
])
def test_invalid_arithmetic(self, arg):
# GH 22274
# CalendarDay (relative time) cannot be added to Timedelta-like objects
# (absolute time)
with pytest.raises(TypeError):
CalendarDay(1) + arg
| bsd-3-clause |
LLPLab/LearningPyML | kNN-digits/handWritingRecognition.py | 1 | 1425 | #!/usr/bin/env python
# encoding: utf-8
"""
@version: 1.0
@author: Juay
@file: handWritingRecognition.py
@time: 2015/11/16 16:06
"""
import numpy as np
import os
from sklearn import neighbors
def img2vector(filepath):
vector = np.genfromtxt(filepath, dtype=np.int8, delimiter=1)
return vector.flatten()
def gener_data_set(path_prefix):
training_files = os.listdir(path_prefix)
dataset = []
labels = []
dataset_append = dataset.append
labels_append = labels.append
for trf in training_files:
dataset_append(img2vector(path_prefix + '/' + trf))
labels_append(int(trf[0]))
return dataset, labels
def test_data_set(path_prefix, dataSet, labels):
testData, testLabels = gener_data_set(path_prefix)
test_files = os.listdir(path_prefix)
clf = neighbors.KNeighborsClassifier(algorithm='auto')
clf.fit(dataSet, labels)
error = 0
clf_predict = clf.predict
for index, t in enumerate(testData):
pre_value = clf_predict(t)
if pre_value[0] != testLabels[index]:
print "Predict value is %s, the read value is %s, filePath is %s" % \
(pre_value[0], testLabels[index], test_files[index])
error += 1
print error
print float(error) * 100.0 / len(testData)
if __name__ == "__main__":
dataset, labels = gener_data_set('trainingDigits')
test_data_set('testDigits', dataset, labels)
| mit |
russel1237/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
fro391/Investing | ML/Regression_Intro.py | 1 | 1680 | import pandas as pd
import quandl,math, datetime
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
import urllib
import pickle
style.use('ggplot')
#df = quandl.get('WIKI/GOOG')
#get stock info into pandas df
'''urllib.urlretrieve('http://real-chart.finance.yahoo.com/table.csv?s=BRK-A&a=03&b=12&c=1996&d=08&e=28&f=2016&g=d&ignore=.csv',
'BRK-A.csv')'''
df = pd.read_csv('BRK-A.csv',index_col=0)
#make HL pct and pct change columns and select necessary columns
df = df [['Open','High','Low','Adj Close','Volume']]
df ['HL_PCT'] = (df['High'] - df['Adj Close'])/df['Adj Close'] * 100.0
df ['PCT_change'] = (df['Adj Close'] - df['Open'])/df['Open'] * 100.0
df = df[['Adj Close','HL_PCT','PCT_change','Volume']]
forecast_col = 'Adj Close'
df.fillna(-99999,inplace = True)
forecast_out = int(math.ceil(0.01*len(df))) #days to forecast out
df['label'] = df[forecast_col].shift(-forecast_out) #shifting so that forecast_col will have future data
x = np.array(df.drop(['label'],1))
x = preprocessing.scale(x)
x = x[:-forecast_out]
x_lately = x[-forecast_out:]
df.dropna(inplace=True)
y = np.array(df['label'])
x_train,x_test,y_train, y_test = cross_validation.train_test_split(x,y,test_size = 0.2)
'''clf = LinearRegression()
clf.fit(x_train,y_train)
#pickling classifier
with open('LinearR.pickle','wb') as f:
pickle.dump(clf,f)'''
pickle_in = open('LinearR.pickle','rb')
clf = pickle.load(pickle_in)
accuracy = clf.score(x_test,y_test)
forecast_set = clf.predict(x_lately)
print forecast_set,accuracy,forecast_out | gpl-2.0 |
ozak/geopandas | geopandas/tests/test_dissolve.py | 1 | 3100 | from __future__ import absolute_import
import pytest
import numpy as np
import pandas as pd
import geopandas
from geopandas import GeoDataFrame, read_file
from pandas.util.testing import assert_frame_equal
@pytest.fixture
def nybb_polydf():
nybb_filename = geopandas.datasets.get_path('nybb')
nybb_polydf = read_file(nybb_filename)
nybb_polydf = nybb_polydf[['geometry', 'BoroName', 'BoroCode']]
nybb_polydf = nybb_polydf.rename(columns={'geometry': 'myshapes'})
nybb_polydf = nybb_polydf.set_geometry('myshapes')
nybb_polydf['manhattan_bronx'] = 5
nybb_polydf.loc[3:4, 'manhattan_bronx'] = 6
return nybb_polydf
@pytest.fixture
def merged_shapes(nybb_polydf):
# Merged geometry
manhattan_bronx = nybb_polydf.loc[3:4, ]
others = nybb_polydf.loc[0:2, ]
collapsed = [others.geometry.unary_union,
manhattan_bronx.geometry.unary_union]
merged_shapes = GeoDataFrame(
{'myshapes': collapsed}, geometry='myshapes',
index=pd.Index([5, 6], name='manhattan_bronx'))
return merged_shapes
@pytest.fixture
def first(merged_shapes):
first = merged_shapes.copy()
first['BoroName'] = ['Staten Island', 'Manhattan']
first['BoroCode'] = [5, 1]
return first
@pytest.fixture
def expected_mean(merged_shapes):
test_mean = merged_shapes.copy()
test_mean['BoroCode'] = [4, 1.5]
return test_mean
def test_geom_dissolve(nybb_polydf, first):
test = nybb_polydf.dissolve('manhattan_bronx')
assert test.geometry.name == 'myshapes'
assert test.geom_almost_equals(first).all()
def test_dissolve_retains_existing_crs(nybb_polydf):
assert nybb_polydf.crs is not None
test = nybb_polydf.dissolve('manhattan_bronx')
assert test.crs is not None
def test_dissolve_retains_nonexisting_crs(nybb_polydf):
nybb_polydf.crs = None
test = nybb_polydf.dissolve('manhattan_bronx')
assert test.crs is None
def first_dissolve(nybb_polydf, first):
test = nybb_polydf.dissolve('manhattan_bronx')
assert_frame_equal(first, test, check_column_type=False)
def test_mean_dissolve(nybb_polydf, first, expected_mean):
test = nybb_polydf.dissolve('manhattan_bronx', aggfunc='mean')
assert_frame_equal(expected_mean, test, check_column_type=False)
test = nybb_polydf.dissolve('manhattan_bronx', aggfunc=np.mean)
assert_frame_equal(expected_mean, test, check_column_type=False)
def test_multicolumn_dissolve(nybb_polydf, first):
multi = nybb_polydf.copy()
multi['dup_col'] = multi.manhattan_bronx
multi_test = multi.dissolve(['manhattan_bronx', 'dup_col'],
aggfunc='first')
first_copy = first.copy()
first_copy['dup_col'] = first_copy.index
first_copy = first_copy.set_index([first_copy.index, 'dup_col'])
assert_frame_equal(multi_test, first_copy, check_column_type=False)
def test_reset_index(nybb_polydf, first):
test = nybb_polydf.dissolve('manhattan_bronx', as_index=False)
comparison = first.reset_index()
assert_frame_equal(comparison, test, check_column_type=False)
| bsd-3-clause |
hishnash/inspyred | docs/moonshot.py | 2 | 9510 | # Title: Moon probe evolution example
# Author: Mike Vella
#start_imports
import os
import math
import itertools
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
from random import Random
from time import time
import inspyred
#end_imports
# All units are in SI unless stated otherwise.
# Global constants
#start_globals
G = 6.67300e-11 # Universal gravitational constant
earth_mass = 5.9742e24
earth_radius = 6.378e6
moon_mass = 7.36e22
moon_radius = 1.737e6
moon_position = (384403e3, 0)
earth_position = (0, 0)
#end_globals
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
def distance_between(position_a, position_b):
return math.sqrt((position_a[0] - position_b[0])**2 + (position_a[1] - position_b[1])**2)
def gravitational_force(position_a, mass_a, position_b, mass_b):
"""Returns the gravitational force between the two bodies a and b."""
distance = distance_between(position_a, position_b)
# Calculate the direction and magnitude of the force.
angle = math.atan2(position_a[1] - position_b[1], position_a[0] - position_b[0])
magnitude = G * mass_a * mass_b / (distance**2)
# Find the x and y components of the force.
# Determine sign based on which one is the larger body.
sign = -1 if mass_b > mass_a else 1
x_force = sign * magnitude * math.cos(angle)
y_force = sign * magnitude * math.sin(angle)
return x_force, y_force
def force_on_satellite(position, mass):
"""Returns the total gravitational force acting on the body from the Earth and Moon."""
earth_grav_force = gravitational_force(position, mass, earth_position, earth_mass)
moon_grav_force = gravitational_force(position, mass, moon_position, moon_mass)
F_x = earth_grav_force[0] + moon_grav_force[0]
F_y = earth_grav_force[1] + moon_grav_force[1]
return F_x, F_y
def acceleration_of_satellite(position, mass):
"""Returns the acceleration based on all forces acting upon the body."""
F_x, F_y = force_on_satellite(position, mass)
return F_x / mass, F_y / mass
def moonshot(orbital_height, satellite_mass, boost_velocity, initial_y_velocity,
time_step=60, max_iterations=5e4, plot_trajectory=False):
fitness = 0.0
distance_from_earth_center = orbital_height + earth_radius
eqb_velocity = math.sqrt(G * earth_mass / distance_from_earth_center)
# Start the simulation.
# Keep up with the positions of the satellite as it moves.
position = [(earth_radius + orbital_height, 0.0)] # The initial position of the satellite.
velocity = [0.0, initial_y_velocity]
time = 0
min_distance_from_moon = distance_between(position[-1], moon_position) - moon_radius
i = 0
keep_simulating = True
rockets_boosted = False
while keep_simulating:
# Calculate the acceleration and corresponding change in velocity.
# (This is effectively the Forward Euler Algorithm.)
acceleration = acceleration_of_satellite(position[-1], satellite_mass)
velocity[0] += acceleration[0] * time_step
velocity[1] += acceleration[1] * time_step
# Start the rocket burn:
# add a boost in the +x direction of 1m/s
# closest point to the moon
if position[-1][1] < -100 and position[-1][0] > distance_from_earth_center-100 and not rockets_boosted:
launch_point = position[-1]
velocity[0] += boost_velocity[0]
velocity[1] += boost_velocity[1]
rockets_boosted = True
# Calculate the new position based on the velocity.
position.append((position[-1][0] + velocity[0] * time_step,
position[-1][1] + velocity[1] * time_step))
time += time_step
if i >= max_iterations:
keep_simulating = False
distance_from_moon_surface = distance_between(position[-1], moon_position) - moon_radius
distance_from_earth_surface = distance_between(position[-1], earth_position) - earth_radius
if distance_from_moon_surface < min_distance_from_moon:
min_distance_from_moon = distance_from_moon_surface
# See if the satellite crashes into the Moon or the Earth, or
# if the satellite gets too far away (radio contact is lost).
if distance_from_moon_surface <= 0:
fitness += 100000 # penalty of 100,000 km if crash on moon
keep_simulating = False
elif distance_from_earth_surface <= 0:
keep_simulating = False
fitness -= 100000 # reward of 100,000 km if land on earth
elif distance_from_earth_surface > 2 * distance_between(earth_position, moon_position):
keep_simulating = False #radio contact lost
i += 1
# Augment the fitness to include the minimum distance (in km)
# that the satellite made it to the Moon (lower without crashing is better).
fitness += min_distance_from_moon / 1000.0
# Augment the fitness to include 1% of the total distance
# traveled by the probe (in km). This means the probe
# should prefer shorter paths.
total_distance = 0
for p, q in pairwise(position):
total_distance += distance_between(p, q)
fitness += total_distance / 1000.0 * 0.01
if plot_trajectory:
axes = plt.gca()
earth = Circle(earth_position, earth_radius, facecolor='b', alpha=1)
moon = Circle(moon_position, moon_radius, facecolor='0.5', alpha=1)
axes.add_artist(earth)
axes.add_artist(moon)
axes.annotate('Earth', xy=earth_position, xycoords='data',
xytext=(0, 1e2), textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
axes.annotate('Moon', xy=moon_position, xycoords='data',
xytext=(0, 1e2), textcoords='offset points',
arrowprops=dict(arrowstyle="->"))
x = [p[0] for p in position]
y = [p[1] for p in position]
cm = plt.get_cmap('gist_rainbow')
lines = plt.scatter(x, y, c=range(len(x)), cmap=cm, marker='o', s=2)
plt.setp(lines, edgecolors='None')
plt.axis("equal")
plt.grid("on")
projdir = os.path.dirname(os.getcwd())
name = '{0}/{1}.pdf'.format(projdir, str(fitness))
plt.savefig(name, format="pdf")
plt.clf()
return fitness
def satellite_generator(random, args):
chromosome = []
bounder = args["_ec"].bounder
# The constraints are as follows:
# orbital satellite boost velocity initial y
# height mass (x, y) velocity
for lo, hi in zip(bounder.lower_bound, bounder.upper_bound):
chromosome.append(random.uniform(lo, hi))
return chromosome
def moonshot_evaluator(candidates, args):
fitness=[]
for chromosome in candidates:
orbital_height = chromosome[0]
satellite_mass = chromosome[1]
boost_velocity = (chromosome[2], chromosome[3])
initial_y_velocity = chromosome[4]
fitness.append(moonshot(orbital_height, satellite_mass, boost_velocity, initial_y_velocity))
return fitness
def custom_observer(population, num_generations, num_evaluations, args):
best = max(population)
print('Generations: {0} Evaluations: {1} Best: {2}'.format(num_generations, num_evaluations, str(best)))
#start_main
rand = Random()
rand.seed(int(time()))
# The constraints are as follows:
# orbital satellite boost velocity initial y
# height mass (x, y) velocity
constraints=((6e6, 10.0, 3e3, -10000.0, 4000),
(8e6, 40.0, 9e3, 10000.0, 6000))
algorithm = inspyred.ec.EvolutionaryComputation(rand)
algorithm.terminator = inspyred.ec.terminators.evaluation_termination
algorithm.observer = inspyred.ec.observers.file_observer
algorithm.selector = inspyred.ec.selectors.tournament_selection
algorithm.replacer = inspyred.ec.replacers.generational_replacement
algorithm.variator = [inspyred.ec.variators.blend_crossover, inspyred.ec.variators.gaussian_mutation]
projdir = os.path.dirname(os.getcwd())
stat_file_name = '{0}/moonshot_ec_statistics.csv'.format(projdir)
ind_file_name = '{0}/moonshot_ec_individuals.csv'.format(projdir)
stat_file = open(stat_file_name, 'w')
ind_file = open(ind_file_name, 'w')
final_pop = algorithm.evolve(generator=satellite_generator,
evaluator=moonshot_evaluator,
pop_size=100,
maximize=False,
bounder=inspyred.ec.Bounder(constraints[0], constraints[1]),
num_selected=100,
tournament_size=2,
num_elites=1,
mutation_rate=0.3,
max_evaluations=600,
statistics_file=stat_file,
individuals_file=ind_file)
stat_file.close()
ind_file.close()
# Sort and print the fittest individual, who will be at index 0.
final_pop.sort(reverse=True)
best = final_pop[0]
components = best.candidate
print('\nFittest individual:')
print(best)
moonshot(components[0], components[1], (components[2], components[3]), components[4], plot_trajectory=True)
#end_main
| mit |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/sklearn/linear_model/tests/test_theil_sen.py | 55 | 9939 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| mit |
CxAalto/gtfspy | gtfspy/plots.py | 1 | 1812 | import pandas
from matplotlib import pyplot as plt
"""
A collection of various useful plots.
"""
def plot_trip_counts_per_day(G, ax=None, highlight_dates=None, highlight_date_labels=None, show=False):
"""
Parameters
----------
G: gtfspy.GTFS
ax: maptlotlib.Axes, optional
highlight_dates: list[str|datetime.datetime]
The values of highlight dates should represent dates, and or datetime objects.
highlight_date_labels: list
The labels for each highlight dates.
show: bool, optional
whether or not to immediately show the results
Returns
-------
ax: maptlotlib.Axes object
"""
daily_trip_counts = G.get_trip_counts_per_day()
if ax is None:
_fig, ax = plt.subplots()
daily_trip_counts["datetime"] = pandas.to_datetime(daily_trip_counts["date_str"])
daily_trip_counts.plot("datetime", "trip_counts", kind="line", ax=ax, marker="o", color="C0", ls=":",
label="Trip counts")
ax.set_xlabel("Date")
ax.set_ylabel("Trip counts per day")
if highlight_dates is not None:
assert isinstance(highlight_dates, list)
if highlight_date_labels is not None:
assert isinstance(highlight_date_labels, list)
assert len(highlight_dates) == len(highlight_date_labels), "Number of highlight date labels do not match"
else:
highlight_date_labels = [None] * len(highlight_dates)
for i, (highlight_date, label) in enumerate(zip(highlight_dates, highlight_date_labels)):
color = "C" + str(int(i % 8 + 1))
highlight_date = pandas.to_datetime(highlight_date)
ax.axvline(highlight_date, color=color, label=label)
ax.legend(loc="best")
ax.grid()
if show:
plt.show()
return ax
| mit |
annoviko/pyclustering | pyclustering/cluster/tests/unit/ut_syncsom.py | 1 | 4569 | """!
@brief Unit-tests for SYNC-SOM algorithm.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
import unittest;
# Generate images without having a window appear.
import matplotlib;
matplotlib.use('Agg');
from pyclustering.cluster.syncsom import syncsom;
from pyclustering.utils import read_sample;
from pyclustering.samples.definitions import SIMPLE_SAMPLES;
class SyncsomUnitTest(unittest.TestCase):
def templateLengthSomCluster(self, file, som_map_size, radius, eps):
sample = read_sample(file);
network = syncsom(sample, som_map_size[0], som_map_size[1], radius);
network.process(collect_dynamic = False, order = eps);
# Check unique
som_clusters = network.get_som_clusters();
indexes = set();
for som_cluster in som_clusters:
for index in som_cluster:
assert (index in indexes) is False;
indexes.add(index);
def testSomClusterAllocationSampleSimple1(self):
self.templateLengthSomCluster(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [3, 3], 1.0, 0.99);
def testSomClusterAllocationSampleSimple3(self):
self.templateLengthSomCluster(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [3, 3], 1.0, 0.99);
def testSomClusterAllocationSampleSimple4(self):
self.templateLengthSomCluster(SIMPLE_SAMPLES.SAMPLE_SIMPLE4, [3, 3], 1.0, 0.99);
def testSomClusterAllocationSampleSimple5(self):
self.templateLengthSomCluster(SIMPLE_SAMPLES.SAMPLE_SIMPLE5, [3, 3], 1.0, 0.99);
def templateLengthProcessData(self, file, som_map_size, radius, eps, expected_cluster_length):
result_testing = False;
# If phases crosses each other because of random part of the network then we should try again.
for _ in range(0, 5, 1):
sample = read_sample(file);
network = syncsom(sample, som_map_size[0], som_map_size[1], radius);
network.process(collect_dynamic = False, order = eps);
clusters = network.get_clusters();
obtained_cluster_sizes = [len(cluster) for cluster in clusters];
if (len(sample) != sum(obtained_cluster_sizes)):
continue;
obtained_cluster_sizes.sort();
expected_cluster_length.sort();
#print(obtained_cluster_sizes, expected_cluster_length);
if (obtained_cluster_sizes != expected_cluster_length):
continue;
# Unit-test is passed
result_testing = True;
break;
assert result_testing;
def testClusterAllocationSampleSimple1ByGeaterAmoutNeurons(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [5, 5], 1.0, 0.999, [5, 5]);
def testClusterAllocationSampleSimple1AsSom(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [1, 2], 1.0, 0.999, [5, 5]);
def testClusterAllocationSampleSimple1(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE1, [2, 2], 1.0, 0.999, [5, 5]);
def testClusterAllocationSampleSimple2(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE2, [5, 5], 1.0, 0.999, [10, 5, 8]);
def testClusterAllocationSampleSimple3(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE3, [5, 5], 1.0, 0.999, [10, 10, 10, 30]);
def testClusterAllocationOneDimensionDataSampleSimple7AsSom(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, [1, 2], 1.0, 0.999, [10, 10]);
def testClusterAllocationOneDimensionDataSampleSimple7(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE7, [3, 3], 1.0, 0.999, [10, 10]);
def testClusterAllocationOneDimensionDataSampleSimple9AsSom(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, [1, 2], 1.0, 0.999, [20, 10]);
def testClusterAllocationOneDimensionDataSampleSimple9(self):
self.templateLengthProcessData(SIMPLE_SAMPLES.SAMPLE_SIMPLE9, [3, 3], 1.0, 0.999, [20, 10]);
def testShowLayersProcessing(self):
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE1);
network = syncsom(sample, 4, 4, 1.0);
network.process(collect_dynamic = False, order = 0.99);
network.show_som_layer();
network.show_sync_layer();
| gpl-3.0 |
dhruv13J/scikit-learn | examples/mixture/plot_gmm_sin.py | 248 | 2747 | """
=================================
Gaussian Mixture Model Sine Curve
=================================
This example highlights the advantages of the Dirichlet Process:
complexity control and dealing with sparse data. The dataset is formed
by 100 points loosely spaced following a noisy sine curve. The fit by
the GMM class, using the expectation-maximization algorithm to fit a
mixture of 10 Gaussian components, finds too-small components and very
little structure. The fits by the Dirichlet process, however, show
that the model can either learn a global structure for the data (small
alpha) or easily interpolate to finding relevant local structure
(large alpha), never falling into the problems shown by the GMM class.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
from sklearn.externals.six.moves import xrange
# Number of samples per component
n_samples = 100
# Generate random sample following a sine curve
np.random.seed(0)
X = np.zeros((n_samples, 2))
step = 4 * np.pi / n_samples
for i in xrange(X.shape[0]):
x = i * step - 6
X[i, 0] = x + np.random.normal(0, 0.1)
X[i, 1] = 3 * (np.sin(x) + np.random.normal(0, .2))
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([
(mixture.GMM(n_components=10, covariance_type='full', n_iter=100),
"Expectation-maximization"),
(mixture.DPGMM(n_components=10, covariance_type='full', alpha=0.01,
n_iter=100),
"Dirichlet Process,alpha=0.01"),
(mixture.DPGMM(n_components=10, covariance_type='diag', alpha=100.,
n_iter=100),
"Dirichlet Process,alpha=100.")]):
clf.fit(X)
splot = plt.subplot(3, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-6, 4 * np.pi - 6)
plt.ylim(-5, 5)
plt.title(title)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
godfrey4000/ncexplorer | ncexplorer/plotter/canvas.py | 1 | 1953 | '''
Created on May 29, 2017
@author: neil
'''
import matplotlib as mpl
# The Qt5Agg backend, which is what is selected by default, has a bug that
# pegs the CPU at 99%, making the system very slow. Forcing the backend to
# be TkAgg works around this problem.
mpl.use('TkAgg')
import matplotlib.pyplot as plt
# Turns off the toolbar on the plot window.
mpl.rcParams['toolbar'] = 'None'
class PlottingCanvas(object):
def __init__(self, figsize):
# For newer version, jet isn't the default colormap anymore.
plt.set_cmap('jet')
plt.ion()
self._figure = plt.figure(figsize=figsize)
self._figsize = figsize
self._axes = []
# FIXME: This is a real kludge. In jupyter notebooks, the colorbar
# provokes a IndexException. This is a temporary work-around until the
# cause of this can be found and fixed.
def colorbar_ok(self):
return True
def add_map(self):
# FIXME: Manage the canvas real estate.
ax = self._figure.add_subplot(111)
self._axes.append(ax)
return ax
def clear(self):
self._figure.clf()
def show(self):
self._figure.show()
def plotobj(self):
return plt
def __unicode__(self):
pretty = []
pretty.append("Pyplot instance: %s" % plt)
pretty.append("Figure instance: %s" % self._figure)
pretty.append("Number of subplots: %s" % len(self._axes))
return u'\n'.join(pretty)
def __repr__(self):
return unicode(self)
def __str__(self):
return unicode(self).encode('utf-8')
class NotebookCanvas(object):
def __init__(self, figsize):
self._figure = plt.figure(figsize=figsize)
def colorbar_ok(self):
return True
def add_map(self):
ax = self._figure.add_subplot(111)
return ax
def clear(self):
pass
def show(self):
plt.ion()
| mit |
vatika/Automated-Essay-Grading | weighted_kappa.py | 1 | 2444 |
import numpy as np
import csv
import sklearn
import nltk
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Returns the confusion matrix between rater's ratings
"""
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
try:
conf_mat[a[0] - min_rating][b[0] - min_rating] += 1
except:
print "human score = " + str(a[0]) + " machine score = " + str(b)
return conf_mat
def quadratic_weighted_kappa(rater_a, rater_b, min_rating=None, max_rating=None):
k= rater_a.flatten()
rater_a = np.array(rater_a, dtype=int)
rater_b = np.array(rater_b, dtype=int)
assert(len(rater_a) == len(rater_b))
if min_rating is None:
min_rating = min(min(rater_a), min(rater_b))
if max_rating is None:
max_rating = max(max(rater_a), max(rater_b))
conf_mat = confusion_matrix(rater_a, rater_b,
min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = histogram(rater_a, min_rating, max_rating)
hist_rater_b = histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = (hist_rater_a[i] * hist_rater_b[j]
/ num_scored_items)
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return 1.0 - numerator / denominator
def histogram(ratings, min_rating=None, max_rating=None):
"""
Returns the counts of each type of rating that a rater made
"""
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
try:
hist_ratings[r - min_rating] += 1
except:
hist_ratings[r[0] - min_rating] += 1
return hist_ratings
| gpl-2.0 |
anirudhjayaraman/scikit-learn | sklearn/tests/test_pipeline.py | 35 | 15221 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = TransfT()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
| bsd-3-clause |
DavidMFreeman34/NoahDavidCollab | pythagoreanMJ.py | 1 | 1625 | #-----------------------------------------
# Python + Matplotlib Penrose
# Taken from http://www.bubuko.com/infodetail-911894.html
#-----------------------------------------
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from mpmath import *
plt.figure(figsize=(1.5,1.5),dpi=500)
plt.subplot(aspect=1)
plt.axis([0,1,0,1])
plt.xticks([])
plt.yticks([])
plt.axis('off')
print "Pythagorean(m,j) Tiling"
m = input('Enter the value of m (an integer at least 3): ')
n = input('Enter the value of j (less than m): ')
p=m-1
q=n-m
mp.dps = 20
mp.pretty = True
f = lambda x: x**m-x**n-1
L=findroot(f,1)
l=L**(0.5)
def subdivide(triangles):
result = []
for color,A,B,C in triangles:
if color == p:
P = A + (C-A)*(l**(0-2*m))
result +=[(0,A,P,B),(n,B,P,C)]
for i in xrange(p):
if color == i:
result +=[(i+1,A,B,C)]
return result
cmap = mpl.cm.autumn
def DrawFigure(triangles):
for color,A,B,C in triangles:
vertices = [A,B,C,A]
codes = [Path.MOVETO]+[Path.LINETO]*3
tri = Path(vertices,codes)
for i in xrange(m):
if color == i:
tri_patch=PathPatch(tri,facecolor=cmap(i / float(m)),edgecolor='#000000',linewidth=0.05)
plt.gca().add_patch(tri_patch)
plt.show()
# plt.savefig("Pythag10-1.pdf", format='pdf')
triangles = []
A=np.array([0,0])
B=np.array([l**(-m),0])
C=np.array([l**(-m),l**q])
triangles.append([0,A,B,C])
a = input('Enter number of divisions: ')
for j in xrange(a):
triangles=subdivide(triangles)
DrawFigure(triangles) | mit |
jmikko/EasyMKL | Python/toytest_EasyMKL.py | 1 | 3068 | """
@author: Michele Donini
@email: [email protected]
Toy test of the algorithm EasyMKL.py.
"""
# Test:
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import roc_auc_score
from sklearn.datasets import make_classification
from EasyMKL import EasyMKL
from komd import KOMD
from cvxopt import matrix
import numpy as np
import matplotlib.pyplot as plt
# Binary classification problem
random_state = np.random.RandomState(0)
X, Y = make_classification(n_samples=1000,
n_features=50,
n_informative=10,
n_redundant=10,
n_repeated=10,
n_classes=2,
n_clusters_per_class=5,
weights=None,
flip_y=0.0,
class_sep=0.5,
hypercube=True,
shift=0.0,
scale=1.0,
shuffle=True,
random_state=random_state)
X = matrix(X)
Y = matrix([1.0 if y>0 else -1.0 for y in Y])
# Train & Test:
pertr = 90
idtrain = range(0,len(Y) * pertr / 100)
idtest = range(len(Y) * pertr / 100,len(Y))
Ytr = Y[idtrain]
Yte = Y[idtest]
# Selected features for each weak kernel:
featlist = [[random_state.randint(0,X.size[1]) for i in range(5)] for j in range(50)]
# Generation of the weak Kernels:
klist = [rbf_kernel(X[:,f], gamma = 0.1) for f in featlist]
klisttr = [matrix(k)[idtrain,idtrain] for k in klist]
klistte = [matrix(k)[idtest,idtrain] for k in klist]
# EasyMKL initialization:
l = 0.5 # lambda
easy = EasyMKL(lam=l, tracenorm = True)
easy.train(klisttr,Ytr)
# Evaluation:
rtr = roc_auc_score(np.array(Ytr),np.array(easy.rank(klisttr)))
print 'AUC EasyMKL train:',rtr
ranktest = np.array(easy.rank(klistte))
rte = roc_auc_score(np.array(Yte),ranktest)
print 'AUC EasyMKL test:',rte
print 'weights of kernels:', easy.weights
# Comparison with respect the single kernels:
print '\n\n\n\n\nSingle kernel analisys using KOMD:'
YYtr = matrix(np.diag(list(Ytr)))
for idx,f in enumerate(featlist):
classifier = KOMD(lam=l, Kf = 'rbf', rbf_gamma = 0.1)
y_score = classifier.fit(X[idtrain,f], Ytr).decision_function(X[idtest,f])
print 'K with features:',f,'AUC test:',roc_auc_score(np.array(Yte), np.array(y_score))
print '\t\t margin train: \t\t',(easy.gamma.T * YYtr * matrix(klist[idx])[idtrain,idtrain] * YYtr * easy.gamma)[0]
print '\t\t weight assigned: \t',easy.weights[idx]
# Some (not so useful) images, only if the X.size[1]==2 (2 dimensional datasets):
PLOT_THE_CLASS = True
ranktestnorm = [ (2 * (r - np.min(ranktest))) / (np.max(ranktest) - np.min(ranktest)) - 1.0 for r in ranktest]
if PLOT_THE_CLASS and X.size[1] == 2:
plt.figure(1)
plt.scatter(X[idtrain, 0], X[idtrain, 1], marker='*', s = 140, c=Ytr, cmap='spring')
plt.scatter(X[idtest, 0], X[idtest, 1], marker='o', s = 180, c=ranktestnorm, cmap='spring')
plt.colorbar()
| gpl-3.0 |
karlnapf/shogun | applications/easysvm/esvm/plots.py | 7 | 6555 | """
This module contains code for commonly used plots
"""
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Soeren Sonnenburg
import sys
import random
import numpy
import warnings
import shutil
from shogun import Labels
from shogun import *
def plotroc(output, LTE, draw_random=False, figure_fname="", roc_label='ROC'):
"""Plot the receiver operating characteristic curve"""
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(4,4))
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_ROC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=roc_label)
if draw_random:
pylab.plot([0, 1], [0, 1], 'r-', label='random guessing')
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('1 - specificity (false positive rate)',size=10)
pylab.ylabel('sensitivity (true positive rate)',size=10)
pylab.legend(loc='lower right', prop = matplotlib.font_manager.FontProperties('tiny'))
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auROC=pm.get_auROC()
return auROC ;
def plotprc(output, LTE, figure_fname="", prc_label='PRC'):
"""Plot the precision recall curve"""
import pylab
import matplotlib
pylab.figure(2,dpi=150,figsize=(4,4))
pm=PerformanceMeasures(Labels(numpy.array(LTE)), Labels(numpy.array(output)))
points=pm.get_PRC()
points=numpy.array(points).T # for pylab.plot
pylab.plot(points[0], points[1], 'b-', label=prc_label)
pylab.axis([0, 1, 0, 1])
ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
pylab.xticks(ticks,size=10)
pylab.yticks(ticks,size=10)
pylab.xlabel('sensitivity (true positive rate)',size=10)
pylab.ylabel('precision (1 - false discovery rate)',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
auPRC=pm.get_auPRC()
return auPRC ;
def plotcloud(cloud, figure_fname="", label='cloud'):
"""Plot the cloud of points (the first two dimensions only)"""
import pylab
import matplotlib
pylab.figure(1,dpi=150,figsize=(4,4))
pos = []
neg = []
for i in xrange(len(cloud)):
if cloud[i][0]==1:
pos.append(cloud[i][1:])
elif cloud[i][0]==-1:
neg.append(cloud[i][1:])
fontdict=dict(family="cursive",weight="bold",size=10,y=1.05) ;
pylab.title(label, fontdict)
points=numpy.array(pos).T # for pylab.plot
pylab.plot(points[0], points[1], 'b+', label='positive')
points=numpy.array(neg).T # for pylab.plot
pylab.plot(points[0], points[1], 'rx', label='negative')
#pylab.axis([0, 1, 0, 1])
#ticks=numpy.arange(0., 1., .1, dtype=numpy.float64)
#pylab.xticks(ticks,size=10)
#pylab.yticks(ticks,size=10)
pylab.xlabel('dimension 1',size=10)
pylab.ylabel('dimension 2',size=10)
pylab.legend(loc='lower right')
if figure_fname!=None:
warnings.filterwarnings('ignore','Could not match*')
tempfname = figure_fname + '.png'
pylab.savefig(tempfname)
shutil.move(tempfname,figure_fname)
def plot_poims(poimfilename, poim, max_poim, diff_poim, poim_totalmass, poimdegree, max_len):
"""Plot a summary of the information in poims"""
import pylab
import matplotlib
pylab.figure(3, dpi=150, figsize=(4,5))
# summary figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.05) ;
pylab.subplot(3,2,1)
pylab.title('Total POIM Mass', fontdict)
pylab.plot(poim_totalmass) ;
pylab.ylabel('weight mass', size=5)
pylab.subplot(3,2,3)
pylab.title('POIMs', fontdict)
pylab.pcolor(max_poim, shading='flat') ;
pylab.subplot(3,2,5)
pylab.title('Differential POIMs', fontdict)
pylab.pcolor(diff_poim, shading='flat') ;
for plot in [3, 5]:
pylab.subplot(3,2,plot)
ticks=numpy.arange(1., poimdegree+1, 1, dtype=numpy.float64)
ticks_str = []
for i in xrange(0, poimdegree):
ticks_str.append("%i" % (i+1))
ticks[i] = i + 0.5
pylab.yticks(ticks, ticks_str)
pylab.ylabel('degree', size=5)
# per k-mer figures
fontdict=dict(family="cursive",weight="bold",size=7,y=1.04) ;
# 1-mers
pylab.subplot(3,2,2)
pylab.title('1-mer Positional Importance', fontdict)
pylab.pcolor(poim[0], shading='flat') ;
ticks_str = ['A', 'C', 'G', 'T']
ticks = [0.5, 1.5, 2.5, 3.5]
pylab.yticks(ticks, ticks_str, size=5)
pylab.axis([0, max_len, 0, 4])
# 2-mers
pylab.subplot(3,2,4)
pylab.title('2-mer Positional Importance', fontdict)
pylab.pcolor(poim[1], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
ticks_str.append(l1+l2)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 16])
# 3-mers
pylab.subplot(3,2,6)
pylab.title('3-mer Positional Importance', fontdict)
pylab.pcolor(poim[2], shading='flat') ;
i=0 ;
ticks=[] ;
ticks_str=[] ;
for l1 in ['A', 'C', 'G', 'T']:
for l2 in ['A', 'C', 'G', 'T']:
for l3 in ['A', 'C', 'G', 'T']:
if numpy.mod(i,4)==0:
ticks_str.append(l1+l2+l3)
ticks.append(0.5+i) ;
i+=1 ;
pylab.yticks(ticks, ticks_str, fontsize=5)
pylab.axis([0, max_len, 0, 64])
# x-axis on last two figures
for plot in [5, 6]:
pylab.subplot(3,2,plot)
pylab.xlabel('sequence position', size=5)
# finishing up
for plot in xrange(0,6):
pylab.subplot(3,2,plot+1)
pylab.xticks(fontsize=5)
for plot in [1,3,5]:
pylab.subplot(3,2,plot)
pylab.yticks(fontsize=5)
pylab.subplots_adjust(hspace=0.35) ;
# write to file
warnings.filterwarnings('ignore','Could not match*')
pylab.savefig('/tmp/temppylabfig.png')
shutil.move('/tmp/temppylabfig.png',poimfilename)
| bsd-3-clause |
jobovy/galpy | tests/test_galpypaper.py | 1 | 26615 | # Test that all of the examples in the galpy paper run
from __future__ import print_function, division
import os
import numpy
import pytest
def test_overview():
from galpy.potential import NFWPotential
np= NFWPotential(normalize=1.)
from galpy.orbit import Orbit
o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.])
from galpy.actionAngle import actionAngleSpherical
aA= actionAngleSpherical(pot=np)
js= aA(o)
assert numpy.fabs((js[0]-0.00980542)/js[0]) < 10.**-3., 'Action calculation in the overview section has changed'
assert numpy.fabs((js[1]-1.1)/js[0]) < 10.**-3., 'Action calculation in the overview section has changed'
assert numpy.fabs((js[2]-0.00553155)/js[0]) < 10.**-3., 'Action calculation in the overview section has changed'
from galpy.df import quasiisothermaldf
qdf= quasiisothermaldf(1./3.,0.2,0.1,1.,1.,
pot=np,aA=aA)
assert numpy.fabs((qdf(o)-61.57476085)/61.57476085) < 10.**-3., 'qdf calculation in the overview section has changed'
return None
def test_import():
import galpy
import galpy.potential
import galpy.orbit
import galpy.actionAngle
import galpy.df
import galpy.util
return None
def test_units():
# Import changed because of bovy_conversion --> conversion name change
from galpy.util import conversion
print(conversion.force_in_pcMyr2(220.,8.))#pc/Myr^2
assert numpy.fabs(conversion.force_in_pcMyr2(220.,8.)-6.32793804994) < 10.**-4., 'unit conversion has changed'
print(conversion.dens_in_msolpc3(220.,8.))#Msolar/pc^3
# Loosen tolerances including mass bc of 0.025% change in Msun in astropyv2
assert numpy.fabs((conversion.dens_in_msolpc3(220.,8.)-0.175790330079)/0.175790330079) < 0.0003, 'unit conversion has changed'
print(conversion.surfdens_in_msolpc2(220.,8.))#Msolar/pc^2
assert numpy.fabs((conversion.surfdens_in_msolpc2(220.,8.)-1406.32264063)/1406.32264063) < 0.0003, 'unit conversion has changed'
print(conversion.mass_in_1010msol(220.,8.))#10^10 Msolar
assert numpy.fabs((conversion.mass_in_1010msol(220.,8.)-9.00046490005)/9.00046490005) < 0.0003, 'unit conversion has changed'
print(conversion.freq_in_Gyr(220.,8.))#1/Gyr
assert numpy.fabs(conversion.freq_in_Gyr(220.,8.)-28.1245845523) < 10.**-4., 'unit conversion has changed'
print(conversion.time_in_Gyr(220.,8.))#Gyr
assert numpy.fabs(conversion.time_in_Gyr(220.,8.)-0.0355560807712) < 10.**-4., 'unit conversion has changed'
return None
def test_potmethods():
from galpy.potential import DoubleExponentialDiskPotential
dp= DoubleExponentialDiskPotential(normalize=1.,
hr=3./8.,hz=0.3/8.)
dp(1.,0.1) # The potential itself at R=1., z=0.1
assert numpy.fabs(dp(1.,0.1)+1.1037196286636572) < 10.**-4., 'potmethods has changed'
dp.Rforce(1.,0.1) # The radial force
assert numpy.fabs(dp.Rforce(1.,0.1)+0.9147659436328015) < 10.**-4., 'potmethods has changed'
dp.zforce(1.,0.1) # The vertical force
assert numpy.fabs(dp.zforce(1.,0.1)+0.50056789703079607) < 10.**-4., 'potmethods has changed'
dp.R2deriv(1.,0.1) # The second radial derivative
# Loosened tolerance, because new (more precise) calculation differs by 3e-4
assert numpy.fabs(dp.R2deriv(1.,0.1)+1.0189440730205248) < 3 * 10.**-4., 'potmethods has changed'
dp.z2deriv(1.,0.1) # The second vertical derivative
# Loosened tolerance, because new (more precise) calculation differs by 4e-4
assert numpy.fabs(dp.z2deriv(1.,0.1)-1.0648350937842703) < 4 * 10.**-4., 'potmethods has changed'
dp.Rzderiv(1.,0.1) # The mixed radial,vertical derivative
assert numpy.fabs(dp.Rzderiv(1.,0.1)+1.1872449759212851) < 10.**-4., 'potmethods has changed'
dp.dens(1.,0.1) # The density
assert numpy.fabs(dp.dens(1.,0.1)-0.076502355610946121) < 10.**-4., 'potmethods has changed'
dp.dens(1.,0.1,forcepoisson=True) # Using Poisson's eqn.
assert numpy.fabs(dp.dens(1.,0.1,forcepoisson=True)-0.076446652249682681) < 10.**-4., 'potmethods has changed'
dp.mass(1.,0.1) # The mass
assert numpy.fabs(dp.mass(1.,0.1)-0.7281629803939751) < 10.**-4., 'potmethods has changed'
dp.vcirc(1.) # The circular velocity at R=1.
assert numpy.fabs(dp.vcirc(1.)-1.0) < 10.**-4., 'potmethods has changed' # By definition, because of normalize=1.
dp.omegac(1.) # The rotational frequency
assert numpy.fabs(dp.omegac(1.)-1.0) < 10.**-4., 'potmethods has changed' # Also because of normalize=1.
dp.epifreq(1.) # The epicycle frequency
# Loosened tolerance, because new (more precise) calculation differs by 1e-3
assert numpy.fabs(dp.epifreq(1.)-1.3301123099210266) < 2 * 10.**-3., 'potmethods has changed'
dp.verticalfreq(1.) # The vertical frequency
# Loosened tolerance, because new (more precise) calculation differs by 1e-3
assert numpy.fabs(dp.verticalfreq(1.)-3.7510872575640293) < 10.**-3., 'potmethods has changed'
dp.flattening(1.,0.1) #The flattening (see caption)
assert numpy.fabs(dp.flattening(1.,0.1)-0.42748757564198159) < 10.**-4., 'potmethods has changed'
dp.lindbladR(1.75,m='corotation') # co-rotation resonance
assert numpy.fabs(dp.lindbladR(1.75,m='corotation')-0.540985051273488) < 10.**-4., 'potmethods has changed'
return None
from galpy.potential import Potential
def smoothInterp(t,dt,tform):
"""Smooth interpolation in time, following Dehnen (2000)"""
if t < tform: smooth= 0.
elif t > (tform+dt): smooth= 1.
else:
xi= 2.*(t-tform)/dt-1.
smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5)
return smooth
class TimeInterpPotential(Potential):
"""Potential that smoothly interpolates in time between two static potentials"""
def __init__(self,pot1,pot2,dt=100.,tform=50.):
"""pot1= potential for t < tform, pot2= potential for t > tform+dt, dt: time over which to turn on pot2,
tform: time at which the interpolation is switched on"""
Potential.__init__(self,amp=1.)
self._pot1= pot1
self._pot2= pot2
self._tform= tform
self._dt= dt
return None
def _Rforce(self,R,z,phi=0.,t=0.):
smooth= smoothInterp(t,self._dt,self._tform)
return (1.-smooth)*self._pot1.Rforce(R,z)+smooth*self._pot2.Rforce(R,z)
def _zforce(self,R,z,phi=0.,t=0.):
smooth= smoothInterp(t,self._dt,self._tform)
return (1.-smooth)*self._pot1.zforce(R,z)+smooth*self._pot2.zforce(R,z)
def test_TimeInterpPotential():
#Just to check that the code above has run properly
from galpy.potential import LogarithmicHaloPotential, \
MiyamotoNagaiPotential
lp= LogarithmicHaloPotential(normalize=1.)
mp= MiyamotoNagaiPotential(normalize=1.)
tip= TimeInterpPotential(lp,mp)
assert numpy.fabs(tip.Rforce(1.,0.1,t=10.)-lp.Rforce(1.,0.1)) < 10.**-8., 'TimeInterPotential does not work as expected'
assert numpy.fabs(tip.Rforce(1.,0.1,t=200.)-mp.Rforce(1.,0.1)) < 10.**-8., 'TimeInterPotential does not work as expected'
return None
@pytest.mark.skip(reason="Test does not work correctly")
def test_potentialAPIChange_warning():
# Test that a warning is displayed about the API change for evaluatePotentials etc. functions from what is given in the galpy paper
#Turn warnings into errors to test for them
import warnings
from galpy.util import galpyWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always",galpyWarning)
import galpy.potential
raisedWarning= False
for wa in w:
raisedWarning= (str(wa.message) == "A major change in versions > 1.1 is that all galpy.potential functions and methods take the potential as the first argument; previously methods such as evaluatePotentials, evaluateDensities, etc. would be called with (R,z,Pot), now they are called as (Pot,R,z) for greater consistency across the codebase")
if raisedWarning: break
assert raisedWarning, "Importing galpy.potential does not raise warning about evaluatePotentials API change"
return None
def test_orbitint():
import numpy
from galpy.potential import MWPotential2014
from galpy.potential import evaluatePotentials as evalPot
from galpy.orbit import Orbit
E, Lz= -1.25, 0.6
o1= Orbit([0.8,0.,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.)),0.])
ts= numpy.linspace(0.,100.,2001)
o1.integrate(ts,MWPotential2014)
o1.plot(xrange=[0.3,1.],yrange=[-0.2,0.2],color='k')
o2= Orbit([0.8,0.3,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.-0.3**2./2.)),0.])
o2.integrate(ts,MWPotential2014)
o2.plot(xrange=[0.3,1.],yrange=[-0.2,0.2],color='k')
return None
# Currently fails because o.jr(type='adiabatic') doesn't yet work for new Orbits
@pytest.mark.xfail(raises=NotImplementedError,strict=True)
def test_orbmethods():
from galpy.orbit import Orbit
from galpy.potential import MWPotential2014
# 8/17/2019: added explicit z=0.025, because that was the default at the
# time of the galpy paper, but the default has been changed
o= Orbit([0.8,0.3,0.75,0.,0.2,0.],zo=0.025) # setup R,vR,vT,z,vz,phi
times= numpy.linspace(0.,10.,1001) # Output times
o.integrate(times,MWPotential2014) # Integrate
o.E() # Energy
assert numpy.fabs(o.E()+1.2547650648697966) < 10.**-5., 'Orbit method does not work as expected'
o.L() # Angular momentum
assert numpy.all(numpy.fabs(o.L()-numpy.array([[ 0. , -0.16, 0.6 ]])) < 10.**-5.), 'Orbit method does not work as expected'
o.Jacobi(OmegaP=0.65) #Jacobi integral E-OmegaP Lz
assert numpy.fabs(o.Jacobi(OmegaP=0.65)-numpy.array([-1.64476506])) < 10.**-5., 'Orbit method does not work as expected'
o.ER(times[-1]), o.Ez(times[-1]) # Rad. and vert. E at end
assert numpy.fabs(o.ER(times[-1])+1.27601734263047) < 10.**-5., 'Orbit method does not work as expected'
assert numpy.fabs(o.Ez(times[-1])-0.021252201847851909) < 10.**-5., 'Orbit method does not work as expected'
o.rperi(), o.rap(), o.zmax() # Peri-/apocenter r, max. |z|
assert numpy.fabs(o.rperi()-0.44231993168097) < 10.**-5., 'Orbit method does not work as expected'
assert numpy.fabs(o.rap()-0.87769030382105) < 10.**-5., 'Orbit method does not work as expected'
assert numpy.fabs(o.zmax()-0.077452357289016) < 10.**-5., 'Orbit method does not work as expected'
o.e() # eccentricity (rap-rperi)/(rap+rperi)
assert numpy.fabs(o.e()-0.32982348199330563) < 10.**-5., 'Orbit method does not work as expected'
o.R(2.,ro=8.) # Cylindrical radius at time 2. in kpc
assert numpy.fabs(o.R(2.,ro=8.)-3.5470772876920007) < 10.**-3., 'Orbit method does not work as expected'
o.vR(5.,vo=220.) # Cyl. rad. velocity at time 5. in km/s
assert numpy.fabs(o.vR(5.,vo=220.)-45.202530965094553) < 10.**-3., 'Orbit method does not work as expected'
o.ra(1.), o.dec(1.) # RA and Dec at t=1. (default settings)
# 5/12/2016: test weakened, because improved galcen<->heliocen
# transformation has changed these, but still close
assert numpy.fabs(o.ra(1.)-numpy.array([ 288.19277])) < 10.**-1., 'Orbit method does not work as expected'
assert numpy.fabs(o.dec(1.)-numpy.array([ 18.98069155])) < 10.**-1., 'Orbit method does not work as expected'
o.jr(type='adiabatic'), o.jz() # R/z actions (ad. approx.)
assert numpy.fabs(o.jr(type='adiabatic')-0.05285302231137586) < 10.**-3., 'Orbit method does not work as expected'
assert numpy.fabs(o.jz()-0.006637988850751242) < 10.**-3., 'Orbit method does not work as expected'
# Rad. period w/ Staeckel approximation w/ focal length 0.5,
o.Tr(type='staeckel',delta=0.5,ro=8.,vo=220.) # in Gyr
assert numpy.fabs(o.Tr(type='staeckel',delta=0.5,ro=8.,vo=220.)-0.1039467864018446) < 10.**-3., 'Orbit method does not work as expected'
o.plot(d1='R',d2='z') # Plot the orbit in (R,z)
o.plot3d() # Plot the orbit in 3D, w/ default [x,y,z]
return None
def test_orbsetup():
from galpy.orbit import Orbit
o= Orbit([25.,10.,2.,5.,-2.,50.],radec=True,ro=8.,
vo=220.,solarmotion=[-11.1,25.,7.25])
return None
def test_surfacesection():
#Preliminary code
import numpy
from galpy.potential import MWPotential2014
from galpy.potential import evaluatePotentials as evalPot
from galpy.orbit import Orbit
E, Lz= -1.25, 0.6
o1= Orbit([0.8,0.,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.)),0.])
ts= numpy.linspace(0.,100.,2001)
o1.integrate(ts,MWPotential2014)
o2= Orbit([0.8,0.3,Lz/0.8,0.,numpy.sqrt(2.*(E-evalPot(MWPotential2014,0.8,0.)-(Lz/0.8)**2./2.-0.3**2./2.)),0.])
o2.integrate(ts,MWPotential2014)
def surface_section(Rs,zs,vRs):
# Find points where the orbit crosses z from - to +
shiftzs= numpy.roll(zs,-1)
indx= (zs[:-1] < 0.)*(shiftzs[:-1] > 0.)
return (Rs[:-1][indx],vRs[:-1][indx])
# Calculate and plot the surface of section
ts= numpy.linspace(0.,1000.,20001) # long integration
o1.integrate(ts,MWPotential2014)
o2.integrate(ts,MWPotential2014)
sect1Rs,sect1vRs=surface_section(o1.R(ts),o1.z(ts),o1.vR(ts))
sect2Rs,sect2vRs=surface_section(o2.R(ts),o2.z(ts),o2.vR(ts))
from matplotlib.pyplot import plot, xlim, ylim
plot(sect1Rs,sect1vRs,'bo',mec='none')
xlim(0.3,1.); ylim(-0.69,0.69)
plot(sect2Rs,sect2vRs,'yo',mec='none')
return None
def test_adinvariance():
from galpy.potential import IsochronePotential
from galpy.orbit import Orbit
from galpy.actionAngle import actionAngleIsochrone
# Initialize two different IsochronePotentials
ip1= IsochronePotential(normalize=1.,b=1.)
ip2= IsochronePotential(normalize=0.5,b=1.)
# Use TimeInterpPotential to interpolate smoothly
tip= TimeInterpPotential(ip1,ip2,dt=100.,tform=50.)
# Integrate: 1) Orbit in the first isochrone potential
o1= Orbit([1.,0.1,1.1,0.0,0.1,0.])
ts= numpy.linspace(0.,50.,1001)
o1.integrate(ts,tip)
o1.plot(d1='x',d2='y',xrange=[-1.6,1.6],yrange=[-1.6,1.6],
color='b')
# 2) Orbit in the transition
o2= o1(ts[-1]) # Last time step => initial time step
ts2= numpy.linspace(50.,150.,1001)
o2.integrate(ts2,tip)
o2.plot(d1='x',d2='y',overplot=True,color='g')
# 3) Orbit in the second isochrone potential
o3= o2(ts2[-1])
ts3= numpy.linspace(150.,200.,1001)
o3.integrate(ts3,tip)
o3.plot(d1='x',d2='y',overplot=True,color='r')
# Now we calculate energy, maximum height, and mean radius
print(o1.E(pot=ip1), (o1.rperi()+o1.rap())/2, o1.zmax())
assert numpy.fabs(o1.E(pot=ip1)+2.79921356237) < 10.**-4., 'Energy in the adiabatic invariance test is different'
assert numpy.fabs((o1.rperi()+o1.rap())/2-1.07854158141) < 10.**-4., 'mean radius in the adiabatic invariance test is different'
assert numpy.fabs(o1.zmax()-0.106331362938) < 10.**-4., 'zmax in the adiabatic invariance test is different'
print(o3.E(pot=ip2), (o3.rperi()+o3.rap())/2, o3.zmax())
assert numpy.fabs(o3.E(pot=ip2)+1.19677002624) < 10.**-4., 'Energy in the adiabatic invariance test is different'
assert numpy.fabs((o3.rperi()+o3.rap())/2-1.39962036137) < 10.**-4., 'mean radius in the adiabatic invariance test is different'
assert numpy.fabs(o3.zmax()-0.138364269321) < 10.**-4., 'zmax in the adiabatic invariance test is different'
# The orbit has clearly moved to larger radii,
# the actions are however conserved from beginning to end
aAI1= actionAngleIsochrone(ip=ip1); print(aAI1(o1))
js= aAI1(o1)
assert numpy.fabs(js[0]-numpy.array([ 0.00773779])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[1]-numpy.array([ 1.1])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[2]-numpy.array([ 0.0045361])) < 10.**-4., 'action in the adiabatic invariance test is different'
aAI2= actionAngleIsochrone(ip=ip2); print(aAI2(o3))
js= aAI2(o3)
assert numpy.fabs(js[0]-numpy.array([ 0.00773812])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[1]-numpy.array([ 1.1])) < 10.**-4., 'action in the adiabatic invariance test is different'
assert numpy.fabs(js[2]-numpy.array([ 0.0045361])) < 10.**-4., 'action in the adiabatic invariance test is different'
return None
def test_diskdf():
from galpy.df import dehnendf
# Init. dehnendf w/ flat rot., hr=1/3, hs=1, and sr(1)=0.2
df= dehnendf(beta=0.,profileParams=(1./3.,1.0,0.2))
# Same, w/ correction factors to scale profiles
dfc= dehnendf(beta=0.,profileParams=(1./3.,1.0,0.2),
correct=True,niter=20)
if True:
# Log. diff. between scale and DF surf. dens.
numpy.log(df.surfacemass(0.5)/df.targetSurfacemass(0.5))
assert numpy.fabs(numpy.log(df.surfacemass(0.5)/df.targetSurfacemass(0.5))+0.056954077791649592) < 10.**-4., 'diskdf does not behave as expected'
# Same for corrected DF
numpy.log(dfc.surfacemass(0.5)/dfc.targetSurfacemass(0.5))
assert numpy.fabs(numpy.log(dfc.surfacemass(0.5)/dfc.targetSurfacemass(0.5))+4.1440377205802041e-06) < 10.**-4., 'diskdf does not behave as expected'
# Log. diff between scale and DF sr
numpy.log(df.sigmaR2(0.5)/df.targetSigma2(0.5))
assert numpy.fabs(numpy.log(df.sigmaR2(0.5)/df.targetSigma2(0.5))+0.12786083001363127) < 10.**-4., 'diskdf does not behave as expected'
# Same for corrected DF
numpy.log(dfc.sigmaR2(0.5)/dfc.targetSigma2(0.5))
assert numpy.fabs(numpy.log(dfc.sigmaR2(0.5)/dfc.targetSigma2(0.5))+6.8065001252214986e-06) < 10.**-4., 'diskdf does not behave as expected'
# Evaluate DF w/ R,vR,vT
df(numpy.array([0.9,0.1,0.8]))
assert numpy.fabs(df(numpy.array([0.9,0.1,0.8]))-numpy.array(0.1740247246180417)) < 10.**-4., 'diskdf does not behave as expected'
# Evaluate corrected DF w/ Orbit instance
from galpy.orbit import Orbit
dfc(Orbit([0.9,0.1,0.8]))
assert numpy.fabs(dfc(Orbit([0.9,0.1,0.8]))-numpy.array(0.16834863725552207)) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the mean velocities
df.meanvR(0.9), df.meanvT(0.9)
assert numpy.fabs(df.meanvR(0.9)) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(df.meanvT(0.9)-0.91144428051168291) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the velocity dispersions
numpy.sqrt(dfc.sigmaR2(0.9)), numpy.sqrt(dfc.sigmaT2(0.9))
assert numpy.fabs(numpy.sqrt(dfc.sigmaR2(0.9))-0.22103383792719539) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(numpy.sqrt(dfc.sigmaT2(0.9))-0.17613725303902811) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the skew of the velocity distribution
df.skewvR(0.9), df.skewvT(0.9)
assert numpy.fabs(df.skewvR(0.9)) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(df.skewvT(0.9)+0.47331638366025863) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the kurtosis of the velocity distribution
df.kurtosisvR(0.9), df.kurtosisvT(0.9)
assert numpy.fabs(df.kurtosisvR(0.9)+0.13561300880237059) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(df.kurtosisvT(0.9)-0.12612702099300721) < 10.**-4., 'diskdf does not behave as expected'
# Calculate a higher-order moment of the velocity DF
df.vmomentsurfacemass(1.,6.,2.)/df.surfacemass(1.)
assert numpy.fabs(df.vmomentsurfacemass(1.,6.,2.)/df.surfacemass(1.)-0.00048953492205559054) < 10.**-4., 'diskdf does not behave as expected'
# Calculate the Oort functions
dfc.oortA(1.), dfc.oortB(1.), dfc.oortC(1.), dfc.oortK(1.)
assert numpy.fabs(dfc.oortA(1.)-0.40958989067012197) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(dfc.oortB(1.)+0.49396172114486514) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(dfc.oortC(1.)) < 10.**-4., 'diskdf does not behave as expected'
assert numpy.fabs(dfc.oortK(1.)) < 10.**-4., 'diskdf does not behave as expected'
# Sample Orbits from the DF, returns list of Orbits
numpy.random.seed(1)
os= dfc.sample(n=100,returnOrbit=True,nphi=1)
# check that these have the right mean radius = 2hr=2/3
rs= numpy.array([o.R() for o in os])
assert numpy.fabs(numpy.mean(rs)-2./3.) < 0.1
# Sample vR and vT at given R, check their mean
vrvt= dfc.sampleVRVT(0.7,n=500,target=True); vt= vrvt[:,1]
assert numpy.fabs(numpy.mean(vrvt[:,0])) < 0.05
assert numpy.fabs(numpy.mean(vt)-dfc.meanvT(0.7)) < 0.01
# Sample Orbits along a given line-of-sight
os= dfc.sampleLOS(45.,n=1000)
return None
def test_oort():
from galpy.df import dehnendf
df= dehnendf(beta=0.,correct=True,niter=20,
profileParams=(1./3.,1.,0.1))
va= 1.-df.meanvT(1.) # asymmetric drift
A= df.oortA(1.)
B= df.oortB(1.)
return None
def test_qdf():
from galpy.df import quasiisothermaldf
from galpy.potential import MWPotential2014
from galpy.actionAngle import actionAngleStaeckel
# Setup actionAngle instance for action calcs
aAS= actionAngleStaeckel(pot=MWPotential2014,delta=0.45,
c=True)
# Quasi-iso df w/ hr=1/3, hsr/z=1, sr(1)=0.2, sz(1)=0.1
df= quasiisothermaldf(1./3.,0.2,0.1,1.,1.,aA=aAS,
pot=MWPotential2014)
# Evaluate DF w/ R,vR,vT,z,vz
df(0.9,0.1,0.8,0.05,0.02)
assert numpy.fabs(df(0.9,0.1,0.8,0.05,0.02)-numpy.array([ 123.57158928])) < 10.**-4., 'qdf does not behave as expected'
# Evaluate DF w/ Orbit instance, return ln
from galpy.orbit import Orbit
df(Orbit([0.9,0.1,0.8,0.05,0.02]),log=True)
assert numpy.fabs(df(Orbit([0.9,0.1,0.8,0.05,0.02]),log=True)-numpy.array([ 4.81682066])) < 10.**-4., 'qdf does not behave as expected'
# Evaluate DF marginalized over vz
df.pvRvT(0.1,0.9,0.9,0.05)
assert numpy.fabs(df.pvRvT(0.1,0.9,0.9,0.05)-2.*23.273310451852243) < 10.**-4., 'qdf does not behave as expected'
#NOTE: The pvRvT() function has changed with respect to the version used in Bovy (2015).
# As of January 2018, a prefactor of 2 has been added (=nsigma/2 with default nsigma=4),
# to account for the correct Gauss-Legendre integration normalization.
# Evaluate DF marginalized over vR,vT
df.pvz(0.02,0.9,0.05)
assert numpy.fabs(df.pvz(0.02,0.9,0.05)-50.949586235238172) < 10.**-4., 'qdf does not behave as expected'
# Calculate the density
df.density(0.9,0.05)
assert numpy.fabs(df.density(0.9,0.05)-12.73725936526167) < 10.**-4., 'qdf does not behave as expected'
# Estimate the DF's actual density scale length at z=0
df.estimate_hr(0.9,0.)
assert numpy.fabs(df.estimate_hr(0.9,0.)-0.322420336223) < 10.**-2., 'qdf does not behave as expected'
# Estimate the DF's actual surface-density scale length
df.estimate_hr(0.9,None)
assert numpy.fabs(df.estimate_hr(0.9,None)-0.38059909132766462) < 10.**-4., 'qdf does not behave as expected'
# Estimate the DF's density scale height
df.estimate_hz(0.9,0.02)
assert numpy.fabs(df.estimate_hz(0.9,0.02)-0.064836202345657207) < 10.**-4., 'qdf does not behave as expected'
# Calculate the mean velocities
df.meanvR(0.9,0.05), df.meanvT(0.9,0.05),
df.meanvz(0.9,0.05)
assert numpy.fabs(df.meanvR(0.9,0.05)-3.8432265354618213e-18) < 10.**-4., 'qdf does not behave as expected'
assert numpy.fabs(df.meanvT(0.9,0.05)-0.90840425173325279) < 10.**-4., 'qdf does not behave as expected'
assert numpy.fabs(df.meanvz(0.9,0.05)+4.3579787517991084e-19) < 10.**-4., 'qdf does not behave as expected'
# Calculate the velocity dispersions
from numpy import sqrt
sqrt(df.sigmaR2(0.9,0.05)), sqrt(df.sigmaz2(0.9,0.05))
assert numpy.fabs(sqrt(df.sigmaR2(0.9,0.05))-0.22695537077102387) < 10.**-4., 'qdf does not behave as expected'
assert numpy.fabs(sqrt(df.sigmaz2(0.9,0.05))-0.094215523962105044) < 10.**-4., 'qdf does not behave as expected'
# Calculate the tilt of the velocity ellipsoid
# 2017/10-28: CHANGED bc tilt now returns angle in rad, no longer in deg
df.tilt(0.9,0.05)
assert numpy.fabs(df.tilt(0.9,0.05)-2.5166061974413765/180.*numpy.pi) < 10.**-4., 'qdf does not behave as expected'
# Calculate a higher-order moment of the velocity DF
df.vmomentdensity(0.9,0.05,6.,2.,2.,gl=True)
assert numpy.fabs(df.vmomentdensity(0.9,0.05,6.,2.,2.,gl=True)-0.0001591100892366438) < 10.**-4., 'qdf does not behave as expected'
# Sample velocities at given R,z, check mean
numpy.random.seed(1)
vs= df.sampleV(0.9,0.05,n=500); mvt= numpy.mean(vs[:,1])
assert numpy.fabs(numpy.mean(vs[:,0])) < 0.05 # vR
assert numpy.fabs(mvt-df.meanvT(0.9,0.05)) < 0.01 #vT
assert numpy.fabs(numpy.mean(vs[:,2])) < 0.05 # vz
return None
def test_coords():
from galpy.util import coords
ra, dec, dist= 161., 50., 8.5
pmra, pmdec, vlos= -6.8, -10., -115.
# Convert to Galactic and then to rect. Galactic
ll, bb= coords.radec_to_lb(ra,dec,degree=True)
pmll, pmbb= coords.pmrapmdec_to_pmllpmbb(pmra,pmdec,ra,dec,degree=True)
X,Y,Z= coords.lbd_to_XYZ(ll,bb,dist,degree=True)
vX,vY,vZ= coords.vrpmllpmbb_to_vxvyvz(vlos,pmll,pmbb,X,Y,Z,XYZ=True)
# Convert to cylindrical Galactocentric
# Assuming Sun's distance to GC is (8,0.025) in (R,z)
R,phi,z= coords.XYZ_to_galcencyl(X,Y,Z,Xsun=8.,Zsun=0.025)
vR,vT,vz= coords.vxvyvz_to_galcencyl(vX,vY,vZ,R,phi,Z,vsun=[-10.1,244.,6.7],galcen=True)
# 5/12/2016: test weakened, because improved galcen<->heliocen
# transformation has changed these, but still close
print(R,phi,z,vR,vT,vz)
assert numpy.fabs(R-12.51328515156942) < 10.**-1., 'Coordinate transformation has changed'
assert numpy.fabs(phi-0.12177409073433249) < 10.**-1., 'Coordinate transformation has changed'
assert numpy.fabs(z-7.1241282354856228) < 10.**-1., 'Coordinate transformation has changed'
assert numpy.fabs(vR-78.961682923035966) < 10.**-1., 'Coordinate transformation has changed'
assert numpy.fabs(vT+241.49247772351964) < 10.**-1., 'Coordinate transformation has changed'
assert numpy.fabs(vz+102.83965442188689) < 10.**-1., 'Coordinate transformation has changed'
return None
| bsd-3-clause |
epfl-lts2/pygsp | pygsp/graphs/nngraphs/grid2dimgpatches.py | 1 | 1461 | # -*- coding: utf-8 -*-
# prevent circular import in Python < 3.5
from pygsp.graphs import Graph, Grid2d, ImgPatches
class Grid2dImgPatches(Graph):
r"""Union of a patch graph with a 2D grid graph.
Parameters
----------
img : array
Input image.
aggregate: callable, optional
Function to aggregate the weights ``Wp`` of the patch graph and the
``Wg`` of the grid graph. Default is ``lambda Wp, Wg: Wp + Wg``.
kwargs : dict
Parameters passed to :class:`ImgPatches`.
See Also
--------
ImgPatches
Grid2d
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from skimage import data, img_as_float
>>> img = img_as_float(data.camera()[::64, ::64])
>>> G = graphs.Grid2dImgPatches(img)
>>> fig, axes = plt.subplots(1, 2)
>>> _ = axes[0].spy(G.W, markersize=2)
>>> _ = G.plot(ax=axes[1])
"""
def __init__(self, img, aggregate=lambda Wp, Wg: Wp + Wg, **kwargs):
self.Gg = Grid2d(img.shape[0], img.shape[1])
self.Gp = ImgPatches(img, **kwargs)
W = aggregate(self.Gp.W, self.Gg.W)
super(Grid2dImgPatches, self).__init__(W,
coords=self.Gg.coords,
plotting=self.Gg.plotting)
def _get_extra_repr(self):
attrs = self.Gg._get_extra_repr()
attrs.update(self.Gp._get_extra_repr())
return attrs
| bsd-3-clause |
jreback/pandas | pandas/tests/util/test_assert_almost_equal.py | 2 | 12536 | import numpy as np
import pytest
from pandas import DataFrame, Index, Series, Timestamp
import pandas._testing as tm
def _assert_almost_equal_both(a, b, **kwargs):
"""
Check that two objects are approximately equal.
This check is performed commutatively.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
**kwargs
The arguments passed to `tm.assert_almost_equal`.
"""
tm.assert_almost_equal(a, b, **kwargs)
tm.assert_almost_equal(b, a, **kwargs)
def _assert_not_almost_equal(a, b, **kwargs):
"""
Check that two objects are not approximately equal.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
**kwargs
The arguments passed to `tm.assert_almost_equal`.
"""
try:
tm.assert_almost_equal(a, b, **kwargs)
msg = f"{a} and {b} were approximately equal when they shouldn't have been"
pytest.fail(msg=msg)
except AssertionError:
pass
def _assert_not_almost_equal_both(a, b, **kwargs):
"""
Check that two objects are not approximately equal.
This check is performed commutatively.
Parameters
----------
a : object
The first object to compare.
b : object
The second object to compare.
**kwargs
The arguments passed to `tm.assert_almost_equal`.
"""
_assert_not_almost_equal(a, b, **kwargs)
_assert_not_almost_equal(b, a, **kwargs)
@pytest.mark.parametrize(
"a,b,check_less_precise",
[(1.1, 1.1, False), (1.1, 1.100001, True), (1.1, 1.1001, 2)],
)
def test_assert_almost_equal_deprecated(a, b, check_less_precise):
# GH#30562
with tm.assert_produces_warning(FutureWarning):
_assert_almost_equal_both(a, b, check_less_precise=check_less_precise)
@pytest.mark.parametrize(
"a,b",
[
(1.1, 1.1),
(1.1, 1.100001),
(np.int16(1), 1.000001),
(np.float64(1.1), 1.1),
(np.uint32(5), 5),
],
)
def test_assert_almost_equal_numbers(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize(
"a,b",
[
(1.1, 1),
(1.1, True),
(1, 2),
(1.0001, np.int16(1)),
# The following two examples are not "almost equal" due to tol.
(0.1, 0.1001),
(0.0011, 0.0012),
],
)
def test_assert_not_almost_equal_numbers(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize(
"a,b",
[
(1.1, 1.1),
(1.1, 1.100001),
(1.1, 1.1001),
(0.000001, 0.000005),
(1000.0, 1000.0005),
# Testing this example, as per #13357
(0.000011, 0.000012),
],
)
def test_assert_almost_equal_numbers_atol(a, b):
# Equivalent to the deprecated check_less_precise=True
_assert_almost_equal_both(a, b, rtol=0.5e-3, atol=0.5e-3)
@pytest.mark.parametrize("a,b", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)])
def test_assert_not_almost_equal_numbers_atol(a, b):
_assert_not_almost_equal_both(a, b, atol=1e-3)
@pytest.mark.parametrize(
"a,b",
[
(1.1, 1.1),
(1.1, 1.100001),
(1.1, 1.1001),
(1000.0, 1000.0005),
(1.1, 1.11),
(0.1, 0.101),
],
)
def test_assert_almost_equal_numbers_rtol(a, b):
_assert_almost_equal_both(a, b, rtol=0.05)
@pytest.mark.parametrize("a,b", [(0.000011, 0.000012), (0.000001, 0.000005)])
def test_assert_not_almost_equal_numbers_rtol(a, b):
_assert_not_almost_equal_both(a, b, rtol=0.05)
@pytest.mark.parametrize(
"a,b,rtol",
[
(1.00001, 1.00005, 0.001),
(-0.908356 + 0.2j, -0.908358 + 0.2j, 1e-3),
(0.1 + 1.009j, 0.1 + 1.006j, 0.1),
(0.1001 + 2.0j, 0.1 + 2.001j, 0.01),
],
)
def test_assert_almost_equal_complex_numbers(a, b, rtol):
_assert_almost_equal_both(a, b, rtol=rtol)
_assert_almost_equal_both(np.complex64(a), np.complex64(b), rtol=rtol)
_assert_almost_equal_both(np.complex128(a), np.complex128(b), rtol=rtol)
@pytest.mark.parametrize(
"a,b,rtol",
[
(0.58310768, 0.58330768, 1e-7),
(-0.908 + 0.2j, -0.978 + 0.2j, 0.001),
(0.1 + 1j, 0.1 + 2j, 0.01),
(-0.132 + 1.001j, -0.132 + 1.005j, 1e-5),
(0.58310768j, 0.58330768j, 1e-9),
],
)
def test_assert_not_almost_equal_complex_numbers(a, b, rtol):
_assert_not_almost_equal_both(a, b, rtol=rtol)
_assert_not_almost_equal_both(np.complex64(a), np.complex64(b), rtol=rtol)
_assert_not_almost_equal_both(np.complex128(a), np.complex128(b), rtol=rtol)
@pytest.mark.parametrize("a,b", [(0, 0), (0, 0.0), (0, np.float64(0)), (0.00000001, 0)])
def test_assert_almost_equal_numbers_with_zeros(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [(0.001, 0), (1, 0)])
def test_assert_not_almost_equal_numbers_with_zeros(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("a,b", [(1, "abc"), (1, [1]), (1, object())])
def test_assert_not_almost_equal_numbers_with_mixed(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize(
"left_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"]
)
@pytest.mark.parametrize(
"right_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"]
)
def test_assert_almost_equal_edge_case_ndarrays(left_dtype, right_dtype):
# Empty compare.
_assert_almost_equal_both(
np.array([], dtype=left_dtype),
np.array([], dtype=right_dtype),
check_dtype=False,
)
def test_assert_almost_equal_dicts():
_assert_almost_equal_both({"a": 1, "b": 2}, {"a": 1, "b": 2})
@pytest.mark.parametrize(
"a,b",
[
({"a": 1, "b": 2}, {"a": 1, "b": 3}),
({"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}),
({"a": 1}, 1),
({"a": 1}, "abc"),
({"a": 1}, [1]),
],
)
def test_assert_not_almost_equal_dicts(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize("val", [1, 2])
def test_assert_almost_equal_dict_like_object(val):
dict_val = 1
real_dict = {"a": val}
class DictLikeObj:
def keys(self):
return ("a",)
def __getitem__(self, item):
if item == "a":
return dict_val
func = (
_assert_almost_equal_both if val == dict_val else _assert_not_almost_equal_both
)
func(real_dict, DictLikeObj(), check_dtype=False)
def test_assert_almost_equal_strings():
_assert_almost_equal_both("abc", "abc")
@pytest.mark.parametrize(
"a,b", [("abc", "abcd"), ("abc", "abd"), ("abc", 1), ("abc", [1])]
)
def test_assert_not_almost_equal_strings(a, b):
_assert_not_almost_equal_both(a, b)
@pytest.mark.parametrize(
"a,b", [([1, 2, 3], [1, 2, 3]), (np.array([1, 2, 3]), np.array([1, 2, 3]))]
)
def test_assert_almost_equal_iterables(a, b):
_assert_almost_equal_both(a, b)
@pytest.mark.parametrize(
"a,b",
[
# Class is different.
(np.array([1, 2, 3]), [1, 2, 3]),
# Dtype is different.
(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])),
# Can't compare generators.
(iter([1, 2, 3]), [1, 2, 3]),
([1, 2, 3], [1, 2, 4]),
([1, 2, 3], [1, 2, 3, 4]),
([1, 2, 3], 1),
],
)
def test_assert_not_almost_equal_iterables(a, b):
_assert_not_almost_equal(a, b)
def test_assert_almost_equal_null():
_assert_almost_equal_both(None, None)
@pytest.mark.parametrize("a,b", [(None, np.NaN), (None, 0), (np.NaN, 0)])
def test_assert_not_almost_equal_null(a, b):
_assert_not_almost_equal(a, b)
@pytest.mark.parametrize(
"a,b",
[
(np.inf, np.inf),
(np.inf, float("inf")),
(np.array([np.inf, np.nan, -np.inf]), np.array([np.inf, np.nan, -np.inf])),
(
np.array([np.inf, None, -np.inf], dtype=np.object_),
np.array([np.inf, np.nan, -np.inf], dtype=np.object_),
),
],
)
def test_assert_almost_equal_inf(a, b):
_assert_almost_equal_both(a, b)
def test_assert_not_almost_equal_inf():
_assert_not_almost_equal_both(np.inf, 0)
@pytest.mark.parametrize(
"a,b",
[
(Index([1.0, 1.1]), Index([1.0, 1.100001])),
(Series([1.0, 1.1]), Series([1.0, 1.100001])),
(np.array([1.1, 2.000001]), np.array([1.1, 2.0])),
(DataFrame({"a": [1.0, 1.1]}), DataFrame({"a": [1.0, 1.100001]})),
],
)
def test_assert_almost_equal_pandas(a, b):
_assert_almost_equal_both(a, b)
def test_assert_almost_equal_object():
a = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]
b = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]
_assert_almost_equal_both(a, b)
def test_assert_almost_equal_value_mismatch():
msg = "expected 2\\.00000 but got 1\\.00000, with rtol=1e-05, atol=1e-08"
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(1, 2)
@pytest.mark.parametrize(
"a,b,klass1,klass2",
[(np.array([1]), 1, "ndarray", "int"), (1, np.array([1]), "int", "ndarray")],
)
def test_assert_almost_equal_class_mismatch(a, b, klass1, klass2):
msg = f"""numpy array are different
numpy array classes are different
\\[left\\]: {klass1}
\\[right\\]: {klass2}"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(a, b)
def test_assert_almost_equal_value_mismatch1():
msg = """numpy array are different
numpy array values are different \\(66\\.66667 %\\)
\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]
\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))
def test_assert_almost_equal_value_mismatch2():
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(np.array([1, 2]), np.array([1, 3]))
def test_assert_almost_equal_value_mismatch3():
msg = """numpy array are different
numpy array values are different \\(16\\.66667 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(
np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])
)
def test_assert_almost_equal_value_mismatch4():
msg = """numpy array are different
numpy array values are different \\(25\\.0 %\\)
\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]
\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(np.array([[1, 2], [3, 4]]), np.array([[1, 3], [3, 4]]))
def test_assert_almost_equal_shape_mismatch_override():
msg = """Index are different
Index shapes are different
\\[left\\]: \\(2L*,\\)
\\[right\\]: \\(3L*,\\)"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]), obj="Index")
def test_assert_almost_equal_unicode():
# see gh-20503
msg = """numpy array are different
numpy array values are different \\(33\\.33333 %\\)
\\[left\\]: \\[á, à, ä\\]
\\[right\\]: \\[á, à, å\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(np.array(["á", "à", "ä"]), np.array(["á", "à", "å"]))
def test_assert_almost_equal_timestamp():
a = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-01")])
b = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")])
msg = """numpy array are different
numpy array values are different \\(50\\.0 %\\)
\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]
\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal(a, b)
def test_assert_almost_equal_iterable_length_mismatch():
msg = """Iterable are different
Iterable length are different
\\[left\\]: 2
\\[right\\]: 3"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal([1, 2], [3, 4, 5])
def test_assert_almost_equal_iterable_values_mismatch():
msg = """Iterable are different
Iterable values are different \\(50\\.0 %\\)
\\[left\\]: \\[1, 2\\]
\\[right\\]: \\[1, 3\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_almost_equal([1, 2], [1, 3])
| bsd-3-clause |
berkeley-stat159/project-alpha | final/image_scripts/tsa_plots.py | 1 | 3616 | """
This is a script that does some time series analysis on a single voxel
for subject 1. It relies heavily on the statsmodels module, and since
there aren't really any built-in functions at this time, there is no
corresponding file in the functions or tests directories. If writing
additional functions becomes necessary, I will implement and test them
as needed.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import statsmodels.api as sm
from statsmodels.graphics.api import qqplot
import nibabel as nib
# Relative path to subject 1 data
pathtodata = "../../data/ds009/sub001/"
# Path to directory to save images.
location_of_images="../../images/"
# Load in the image for Subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
# Pull out a single voxel.
voxel = data[41, 47, 2]
plt.plot(voxel)
plt.close()
# Sort of a curve = nonconstant mean.
# Variance also seems to be funky toward the ends.
plt.hist(voxel)
plt.close()
# Long right tail.
qqplot(voxel, line='q')
plt.close()
# More-or-less normal, with deviations at tails.
# Box-Cox method to find best power transformation.
bc = stats.boxcox(voxel)
bc[1] # Lambda pretty close to 0, so try log transformation.
print("Log transforming data.")
# Log transform the data.
lvoxel = np.log(voxel)
plt.plot(lvoxel)
plt.close()
plt.hist(lvoxel)
plt.close()
qqplot(lvoxel, line='q')
plt.close()
# Plots look pretty similar, but skewness has been eliminated.
# Try looking at the first difference.
diff1 = lvoxel[:-1]-lvoxel[1:]
plt.plot(diff1)
plt.close()
# Mean looks like it could be constant.
plt.hist(diff1)
plt.close()
qqplot(diff1, line='q')
plt.close()
# QQplot still shows some deviations from normality at tails.
print("Using first difference to gain approximate stationarity.")
# Assume that the first difference is approximately normal.
# Autocorrelation plot. First lag is significant.
sm.graphics.tsa.plot_acf(diff1, lags=20)
plt.close()
# Partial autocorrelation plot. Dies down slowly.
sm.graphics.tsa.plot_pacf(diff1, lags=20)
plt.close()
# Might be an IMA(1, 1)
# Or, since autocorrelation also doesn't quite die out, could be an
# ARIMA model with p>0 and q>0.
# Let's look at different ARMA models.
res = sm.tsa.arma_order_select_ic(diff1, ic=['aic', 'bic'])
res
# Both AIC and BIC suggest ARIMA(1,1,1).
# Fit an ARIMA(1,1,1).
arima111 = sm.tsa.ARIMA(lvoxel, (1,1,1)).fit(disp=0)
arima111.params
# Fitted values look reasonable compared to first difference.
plt.plot(diff1)
plt.plot(arima111.resid)
plt.close()
# Residuals look normally distributed.
qqplot(arima111.resid, line='q')
plt.close()
# Autocorrelation and partial autocorrelation plots look fine.
sm.graphics.tsa.plot_acf(arima111.resid, lags=20)
plt.close()
sm.graphics.tsa.plot_pacf(arima111.resid, lags=20)
plt.close()
# Use first half of the observations to predict the second half.
print("Suggested model is ARIMA(1,1,1).")
preds = arima111.predict(start=len(diff1)//2+1)
times = range(1,len(diff1)+1)
plt.plot(times[len(diff1)//2:], diff1[len(diff1)//2:], 'b')
plt.plot(times[len(diff1)//2:], preds, 'r')
hand_obs = mlines.Line2D([], [], color="b", label="Observed")
hand_fore = mlines.Line2D([], [], color="r", label="Forecast")
plt.legend(handles=[hand_obs, hand_fore])
plt.title('Second Half of Observations')
plt.xlabel('Time')
plt.ylabel('Hemoglobin Response')
plt.savefig(location_of_images+"ts-preds.png")
| bsd-3-clause |
zaxtax/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 27 | 7466 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
classicboyir/BuildingMachineLearningSystemsWithPython | ch04/build_lda.py | 22 | 2443 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
try:
import nltk.corpus
except ImportError:
print("nltk not found")
print("please install it")
raise
from scipy.spatial import distance
import numpy as np
from gensim import corpora, models
import sklearn.datasets
import nltk.stem
from collections import defaultdict
english_stemmer = nltk.stem.SnowballStemmer('english')
stopwords = set(nltk.corpus.stopwords.words('english'))
stopwords.update(['from:', 'subject:', 'writes:', 'writes'])
class DirectText(corpora.textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
try:
dataset = sklearn.datasets.load_mlcomp("20news-18828", "train",
mlcomp_root='./data')
except:
print("Newsgroup data not found.")
print("Please download from http://mlcomp.org/datasets/379")
print("And expand the zip into the subdirectory data/")
print()
print()
raise
otexts = dataset.data
texts = dataset.data
texts = [t.decode('utf-8', 'ignore') for t in texts]
texts = [t.split() for t in texts]
texts = [map(lambda w: w.lower(), t) for t in texts]
texts = [filter(lambda s: not len(set("+-.?!()>@012345689") & set(s)), t)
for t in texts]
texts = [filter(lambda s: (len(s) > 3) and (s not in stopwords), t)
for t in texts]
texts = [map(english_stemmer.stem, t) for t in texts]
usage = defaultdict(int)
for t in texts:
for w in set(t):
usage[w] += 1
limit = len(texts) / 10
too_common = [w for w in usage if usage[w] > limit]
too_common = set(too_common)
texts = [filter(lambda s: s not in too_common, t) for t in texts]
corpus = DirectText(texts)
dictionary = corpus.dictionary
try:
dictionary['computer']
except:
pass
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=dictionary.id2token)
thetas = np.zeros((len(texts), 100))
for i, c in enumerate(corpus):
for ti, v in model[c]:
thetas[i, ti] += v
distances = distance.squareform(distance.pdist(thetas))
large = distances.max() + 1
for i in range(len(distances)):
distances[i, i] = large
print(otexts[1])
print()
print()
print()
print(otexts[distances[1].argmin()])
| mit |
michaelbramwell/sms-tools | software/transformations_interface/hpsMorph_function.py | 24 | 7354 | # function for doing a morph between two sounds using the hpsModel
import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import get_window
import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../transformations/'))
import hpsModel as HPS
import hpsTransformations as HPST
import harmonicTransformations as HT
import utilFunctions as UF
def analysis(inputFile1='../../sounds/violin-B3.wav', window1='blackman', M1=1001, N1=1024, t1=-100,
minSineDur1=0.05, nH=60, minf01=200, maxf01=300, f0et1=10, harmDevSlope1=0.01, stocf=0.1,
inputFile2='../../sounds/soprano-E4.wav', window2='blackman', M2=901, N2=1024, t2=-100,
minSineDur2=0.05, minf02=250, maxf02=500, f0et2=10, harmDevSlope2=0.01):
"""
Analyze two sounds with the harmonic plus stochastic model
inputFile: input sound file (monophonic with sampling rate of 44100)
window: analysis window type (rectangular, hanning, hamming, blackman, blackmanharris)
M: analysis window size
N: fft size (power of two, bigger or equal than M)
t: magnitude threshold of spectral peaks
minSineDur: minimum duration of sinusoidal tracks
nH: maximum number of harmonics
minf0: minimum fundamental frequency in sound
maxf0: maximum fundamental frequency in sound
f0et: maximum error accepted in f0 detection algorithm
harmDevSlope: allowed deviation of harmonic tracks, higher harmonics have higher allowed deviation
stocf: decimation factor used for the stochastic approximation
returns inputFile: input file name; fs: sampling rate of input file,
hfreq, hmag: harmonic frequencies, magnitude; stocEnv: stochastic residual
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# read input sounds
(fs1, x1) = UF.wavread(inputFile1)
(fs2, x2) = UF.wavread(inputFile2)
# compute analysis windows
w1 = get_window(window1, M1)
w2 = get_window(window2, M2)
# compute the harmonic plus stochastic models
hfreq1, hmag1, hphase1, stocEnv1 = HPS.hpsModelAnal(x1, fs1, w1, N1, H, t1, nH, minf01, maxf01, f0et1, harmDevSlope1, minSineDur1, Ns, stocf)
hfreq2, hmag2, hphase2, stocEnv2 = HPS.hpsModelAnal(x2, fs2, w2, N2, H, t2, nH, minf02, maxf02, f0et2, harmDevSlope2, minSineDur2, Ns, stocf)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram stochastic component of sound 1
plt.subplot(2,1,1)
numFrames = int(stocEnv1[:,0].size)
sizeEnv = int(stocEnv1[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs1)
binFreq = (.5*fs1)*np.arange(sizeEnv*maxplotfreq/(.5*fs1))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv1[:,:sizeEnv*maxplotfreq/(.5*fs1)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 1
if (hfreq1.shape[1] > 0):
harms = np.copy(hfreq1)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs1)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram of sound 1')
# plot spectrogram stochastic component of sound 2
plt.subplot(2,1,2)
numFrames = int(stocEnv2[:,0].size)
sizeEnv = int(stocEnv2[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs2)
binFreq = (.5*fs2)*np.arange(sizeEnv*maxplotfreq/(.5*fs2))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(stocEnv2[:,:sizeEnv*maxplotfreq/(.5*fs2)+1]))
plt.autoscale(tight=True)
# plot harmonic on top of stochastic spectrogram of sound 2
if (hfreq2.shape[1] > 0):
harms = np.copy(hfreq2)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs2)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram of sound 2')
plt.tight_layout()
plt.show(block=False)
return inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2
def transformation_synthesis(inputFile1, fs, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2,
hfreqIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1]), hmagIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1]), stocIntp = np.array([0, 0, .1, 0, .9, 1, 1, 1])):
"""
Transform the analysis values returned by the analysis function and synthesize the sound
inputFile1: name of input file 1
fs: sampling rate of input file 1
hfreq1, hmag1, stocEnv1: hps representation of sound 1
inputFile2: name of input file 2
hfreq2, hmag2, stocEnv2: hps representation of sound 2
hfreqIntp: interpolation factor between the harmonic frequencies of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
hmagIntp: interpolation factor between the harmonic magnitudes of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
stocIntp: interpolation factor between the stochastic representation of the two sounds, 0 is sound 1 and 1 is sound 2 (time,value pairs)
"""
# size of fft used in synthesis
Ns = 512
# hop size (has to be 1/4 of Ns)
H = 128
# morph the two sounds
yhfreq, yhmag, ystocEnv = HPST.hpsMorph(hfreq1, hmag1, stocEnv1, hfreq2, hmag2, stocEnv2, hfreqIntp, hmagIntp, stocIntp)
# synthesis
y, yh, yst = HPS.hpsModelSynth(yhfreq, yhmag, np.array([]), ystocEnv, Ns, H, fs)
# write output sound
outputFile = 'output_sounds/' + os.path.basename(inputFile1)[:-4] + '_hpsMorph.wav'
UF.wavwrite(y, fs, outputFile)
# create figure to plot
plt.figure(figsize=(12, 9))
# frequency range to plot
maxplotfreq = 15000.0
# plot spectrogram of transformed stochastic compoment
plt.subplot(2,1,1)
numFrames = int(ystocEnv[:,0].size)
sizeEnv = int(ystocEnv[0,:].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = (.5*fs)*np.arange(sizeEnv*maxplotfreq/(.5*fs))/sizeEnv
plt.pcolormesh(frmTime, binFreq, np.transpose(ystocEnv[:,:sizeEnv*maxplotfreq/(.5*fs)+1]))
plt.autoscale(tight=True)
# plot transformed harmonic on top of stochastic spectrogram
if (yhfreq.shape[1] > 0):
harms = np.copy(yhfreq)
harms = harms*np.less(harms,maxplotfreq)
harms[harms==0] = np.nan
numFrames = int(harms[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.plot(frmTime, harms, color='k', ms=3, alpha=1)
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.autoscale(tight=True)
plt.title('harmonics + stochastic spectrogram')
# plot the output sound
plt.subplot(2,1,2)
plt.plot(np.arange(y.size)/float(fs), y)
plt.axis([0, y.size/float(fs), min(y), max(y)])
plt.ylabel('amplitude')
plt.xlabel('time (sec)')
plt.title('output sound: y')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# analysis
inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2 = analysis()
# transformation and synthesis
transformation_synthesis (inputFile1, fs1, hfreq1, hmag1, stocEnv1, inputFile2, hfreq2, hmag2, stocEnv2)
plt.show()
| agpl-3.0 |
moyomot/text_classification | data_helpers.py | 1 | 12962 | import re
import string
import numpy as np
import pandas as pd
from gensim.models import KeyedVectors
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from logs import logger
WORD2VEC_PATH = 'dataset/embedding/GoogleNews-vectors-negative300.bin'
def clean_str(text):
text = re.sub(r"[^A-Za-z0-9^,!?.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"n\'t", " n\'t", text)
text = re.sub(r"\(", " ", text)
text = re.sub(r"\)", " ", text)
text = re.sub(r"\?", " \? ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"#", " ", text)
text = re.sub(r":", " : ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r"\s{2,}", " ", text)
return text.strip().lower()
class DataSet:
@classmethod
def load_word2vec(cls):
try:
word2vec = KeyedVectors.load_word2vec_format(WORD2VEC_PATH, binary=True)
except Exception as e:
raise e
return word2vec
def load(self, column_names):
with open(self.TRAIN_PATH, "r") as file:
self.df_train = pd.read_csv(file, names=column_names, header=None)
with open(self.TEST_PATH, "r") as file:
self.df_test = pd.read_csv(file, names=column_names, header=None)
def tfidf_transformer(self):
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(self.X_train)
tfidf_transformer = TfidfTransformer()
self.X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
self.y_train_labels = self.y_train
logger.info('train text size is {size}'.format(size=len(self.X_train)))
logger.info('train label size is {size}'.format(size=len(self.y_train_labels)))
X_test_counts = count_vect.transform(self.X_test)
self.X_test_tfidf = tfidf_transformer.fit_transform(X_test_counts)
self.y_test_labels = self.y_test
logger.info('test text size is {size}'.format(size=len(self.X_test)))
logger.info('test label size is {size}'.format(size=len(self.y_test_labels)))
def embedding_transfomer(self):
tokenizer = Tokenizer(num_words=self.MAX_NB_WORDS)
tokenizer.fit_on_texts(self.X_train + self.X_test)
X_train_sequences = tokenizer.texts_to_sequences(self.X_train)
X_test_sequences = tokenizer.texts_to_sequences(self.X_test)
self.X_train = pad_sequences(X_train_sequences, maxlen=self.MAX_SEQUENCE_LENGTH)
self.X_test = pad_sequences(X_test_sequences, maxlen=self.MAX_SEQUENCE_LENGTH)
self.y_train = to_categorical(np.asarray(self.y_train))
self.y_test = to_categorical(np.asarray(self.y_test))
word2vec = DataSet.load_word2vec()
self.word_index = tokenizer.word_index
embedding_matrix = np.zeros((len(self.word_index), self.EMBEDDING_DIM))
for word, i in self.word_index.items():
if word in word2vec.vocab:
embedding_matrix[i] = word2vec.word_vec(word)
self.embedding_matrix = embedding_matrix
class AgNews(DataSet):
TRAIN_PATH = 'dataset/ag_news_csv/train.csv'
TEST_PATH = 'dataset/ag_news_csv/test.csv'
COLUMN_NAMES = ['category_id', 'title', 'description']
def __init__(self):
self.MAX_NB_WORDS = 200000
self.EMBEDDING_DIM = 300
self.MAX_SEQUENCE_LENGTH = 1000
def create_embedding_dataset(self):
self.load(self.COLUMN_NAMES)
self.X_train = list(self.df_train.title + self.df_train.description)
self.X_train = [clean_str(text) for text in self.X_train]
self.y_train = list(self.df_train.category_id-1)
self.X_test = list(self.df_test.title + self.df_test.description)
self.X_test = [clean_str(text) for text in self.X_test]
self.y_test = list(self.df_test.category_id-1)
self.category_size = len(self.df_test.groupby('category_id'))
self.embedding_transfomer()
def create_character_level_dataset(self):
chars = list(string.ascii_lowercase + string.digits + string.punctuation) + ['\n']
vocab = {char: id for id, char in enumerate(chars)}
vocab2vec = {}
for char, id in vocab.items():
x = np.zeros(len(vocab), dtype=np.int)
x[id] = 1
vocab2vec[char] = x
self.load(self.COLUMN_NAMES)
X_train = list(self.df_train.title + self.df_train.description)
X_train = [clean_str(text) for text in X_train]
X_train = [list(x.replace(' ', '')[:200]) for x in X_train]
X_train = np.array([x if len(x) == 200 else x + (['#'] * (200 - len(x))) for x in X_train])
self.X_train = np.array([[vocab2vec[char] for char in text] for text in X_train])
logger.info(self.X_train.shape)
self.y_train = np.array(self.df_train.category_id-1)
X_test = list(self.df_test.title + self.df_test.description)
X_test = [clean_str(text) for text in X_test]
X_test = [list(x.replace(' ', '')[:200]) for x in X_test]
X_test = np.array([x if len(x) == 200 else x + (['#'] * (200 - len(x))) for x in X_test])
self.X_test = np.array([[vocab2vec[char] for char in text] for text in X_test])
logger.info(self.X_test.shape)
self.y_test = np.array(self.df_test.category_id-1)
self.category_size = len(self.df_test.groupby('category_id'))
self.y_train = to_categorical(np.asarray(self.y_train))
self.y_test = to_categorical(np.asarray(self.y_test))
def create_tfidf_dataset(self):
self.load(self.COLUMN_NAMES)
self.X_train = list(self.df_train.title + self.df_train.description)
self.X_train = [clean_str(text) for text in self.X_train]
self.y_train = list(self.df_train.category_id-1)
self.X_test = list(self.df_test.title + self.df_test.description)
self.X_test = [clean_str(text) for text in self.X_test]
self.y_test = list(self.df_test.category_id-1)
self.tfidf_transformer()
class YahooAnswers(DataSet):
TRAIN_PATH = 'dataset/yahoo_answers_csv/train.csv'
TEST_PATH = 'dataset/yahoo_answers_csv/test.csv'
COLUMN_NAMES = ['category_id', 'title', 'question', 'answer']
def __init__(self):
self.MAX_NB_WORDS = 1200000
self.EMBEDDING_DIM = 300
self.MAX_SEQUENCE_LENGTH = 2000
def create_embedding_dataset(self):
self.load(self.COLUMN_NAMES)
self.X_train = list(self.df_train.title.fillna(" ") + self.df_train.question.fillna(" ") + self.df_train.answer.fillna(" "))
self.X_train = [clean_str(text) for text in self.X_train]
self.y_train = list(self.df_train.category_id-1)
self.X_test = list(self.df_test.title.fillna(" ") + self.df_test.question.fillna(" ") + self.df_test.answer.fillna(" "))
self.X_test = [clean_str(text) for text in self.X_test]
self.y_test = list(self.df_test.category_id-1)
self.category_size = len(self.df_test.groupby('category_id'))
self.embedding_transfomer()
def create_tfidf_dataset(self):
self.load(self.COLUMN_NAMES)
self.X_train = list(self.df_train.title.fillna(" ") + self.df_train.question.fillna(" ") + self.df_train.answer.fillna(" "))
self.X_train = [clean_str(text) for text in self.X_train]
self.y_train = list(self.df_train.category_id-1)
self.X_test = list(self.df_test.title.fillna(" ") + self.df_test.question.fillna(" ") + self.df_test.answer.fillna(" "))
self.X_test = [clean_str(text) for text in self.X_test]
self.y_test = list(self.df_test.category_id-1)
self.tfidf_transformer()
def create_character_level_dataset(self):
chars = list(string.ascii_lowercase + string.digits + string.punctuation) + ['\n']
vocab = {char: id for id, char in enumerate(chars)}
vocab2vec = {}
for char, id in vocab.items():
x = np.zeros(len(vocab), dtype=np.int)
x[id] = 1
vocab2vec[char] = x
self.load(self.COLUMN_NAMES)
X_train = list(
self.df_train.title.fillna(" ") + self.df_train.question.fillna(" ") + self.df_train.answer.fillna(" "))
X_train = [clean_str(text) for text in X_train]
X_train = [list(x.replace(' ', '')[:200]) for x in X_train]
X_train = np.array([x if len(x) == 200 else x + (['#'] * (200 - len(x))) for x in X_train])
self.X_train = np.array([[vocab2vec[char] for char in text] for text in X_train])
logger.info(self.X_train.shape)
self.y_train = np.array(self.df_train.category_id - 1)
X_test = list(
self.df_test.title.fillna(" ") + self.df_test.question.fillna(" ") + self.df_test.answer.fillna(" "))
X_test = [clean_str(text) for text in X_test]
X_test = [list(x.replace(' ', '')[:200]) for x in X_test]
X_test = np.array([x if len(x) == 200 else x + (['#'] * (200 - len(x))) for x in X_test])
self.X_test = np.array([[vocab2vec[char] for char in text] for text in X_test])
logger.info(self.X_test.shape)
self.y_test = np.array(self.df_test.category_id - 1)
self.category_size = len(self.df_test.groupby('category_id'))
class YelpReviewPolarity(DataSet):
TRAIN_PATH = 'dataset/yelp_review_polarity_csv/train.csv'
TEST_PATH = 'dataset/yelp_review_polarity_csv/test.csv'
COLUMN_NAMES = ['category_id', 'description']
def __init__(self):
self.MAX_NB_WORDS = 600000
self.EMBEDDING_DIM = 300
self.MAX_SEQUENCE_LENGTH = 2000
def create_embedding_dataset(self):
self.load(self.COLUMN_NAMES)
self.X_train = list(self.df_train.description.fillna(" "))
self.X_train = [clean_str(text) for text in self.X_train]
self.y_train = list(self.df_train.category_id-1)
self.X_test = list(self.df_test.description.fillna(" "))
self.X_test = [clean_str(text) for text in self.X_test]
self.y_test = list(self.df_test.category_id-1)
self.category_size = len(self.df_test.groupby('category_id'))
self.embedding_transfomer()
def create_tfidf_dataset(self):
self.load(self.COLUMN_NAMES)
self.X_train = list(self.df_train.description.fillna(" "))
self.X_train = [clean_str(text) for text in self.X_train]
self.y_train = list(self.df_train.category_id-1)
self.X_test = list(self.df_test.description.fillna(" "))
self.X_test = [clean_str(text) for text in self.X_test]
self.y_test = list(self.df_test.category_id-1)
self.tfidf_transformer()
def create_character_level_dataset(self):
chars = list(string.ascii_lowercase + string.digits + string.punctuation) + ['\n']
vocab = {char: id for id, char in enumerate(chars)}
vocab2vec = {}
for char, id in vocab.items():
x = np.zeros(len(vocab), dtype=np.int)
x[id] = 1
vocab2vec[char] = x
self.load(self.COLUMN_NAMES)
X_train = list(self.df_train.description)
X_train = [clean_str(text) for text in X_train]
X_train = [list(x.replace(' ', '')[:200]) for x in X_train]
X_train = np.array([x if len(x) == 200 else x + (['#'] * (200 - len(x))) for x in X_train])
self.X_train = np.array([[vocab2vec[char] for char in text] for text in X_train])
logger.info(self.X_train.shape)
self.y_train = np.array(self.df_train.category_id - 1)
X_test = list(self.df_test.description)
X_test = [clean_str(text) for text in X_test]
X_test = [list(x.replace(' ', '')[:200]) for x in X_test]
X_test = np.array([x if len(x) == 200 else x + (['#'] * (200 - len(x))) for x in X_test])
self.X_test = np.array([[vocab2vec[char] for char in text] for text in X_test])
logger.info(self.X_test.shape)
self.y_test = np.array(self.df_test.category_id - 1)
self.category_size = len(self.df_test.groupby('category_id'))
self.y_train = to_categorical(np.asarray(self.y_train))
self.y_test = to_categorical(np.asarray(self.y_test))
| mit |
KhaledSharif/quantopian-ensemble-methods | one-classifier.py | 1 | 9032 | import talib
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
import numpy as np
import pandas
def initialize(context):
set_symbol_lookup_date('2012-01-01')
# Parameters to be changed
context.model = ExtraTreesClassifier(n_estimators=300)
context.lookback = 14
context.history_range = 1000
context.beta_coefficient = 0.0
context.percentage_change = 0.025
context.maximum_leverage = 2.0
context.number_of_stocks = 150
context.maximum_pe_ratio = 8
context.maximum_market_cap = 0.1e9
context.starting_probability = 0.5
# End of parameters
schedule_function(create_model, date_rules.month_start(), time_rules.market_open())
schedule_function(rebalance, date_rules.month_start(), time_rules.market_open())
schedule_function(trade, date_rules.every_day(), time_rules.market_open())
context.algorithm_returns = []
context.longs = []
context.shorts = []
context.training_stocks = symbols('SPY')
context.trading_stocks = []
context.beta = 1.0
context.beta_list = []
context.completed = False
def before_trading_start(context):
if context.completed: return
fundamental_df = get_fundamentals(query(fundamentals.valuation.market_cap)
.filter(fundamentals.company_reference.primary_exchange_id == 'NAS' or
fundamentals.company_reference.primary_exchange_id == 'NYSE')
.filter(fundamentals.valuation_ratios.pe_ratio < context.maximum_pe_ratio)
.filter(fundamentals.valuation.market_cap < context.maximum_market_cap)
.order_by(fundamentals.valuation.market_cap.desc())
.limit(context.number_of_stocks))
update_universe(fundamental_df.columns.values)
context.trading_stocks = [stock for stock in fundamental_df]
context.completed = True
def create_model(context, data):
X = []
Y = []
for S in context.training_stocks:
recent_prices = history(context.history_range, '1d', 'price')[S].values
recent_lows = history(context.history_range, '1d', 'low')[S].values
recent_highs = history(context.history_range, '1d', 'high')[S].values
recent_closes = history(context.history_range, '1d', 'close_price')[S].values
atr = talib.ATR(recent_highs, recent_lows, recent_closes, timeperiod=14)
prev_close = np.roll(recent_closes, 2)
upside_signal = (recent_prices - (prev_close + atr)).tolist()
downside_signal = (prev_close - (recent_prices + atr)).tolist()
price_changes = np.diff(recent_prices).tolist()
upper, middle, lower = talib.BBANDS(recent_prices,timeperiod=10,nbdevup=2,nbdevdn=2,matype=1)
upper = upper.tolist()
middle = middle.tolist()
lower = lower.tolist()
for i in range(15, context.history_range-context.lookback-1):
Z = price_changes[i:i+context.lookback] + upside_signal[i:i+context.lookback] + downside_signal[i:i+context.lookback] +\
upper[i:i+context.lookback] + middle[i:i+context.lookback] + lower[i:i+context.lookback]
if (np.any(np.isnan(Z)) or not np.all(np.isfinite(Z))): continue
X.append(Z)
if abs(price_changes[i+context.lookback]) > abs(price_changes[i]*(1+context.percentage_change)):
if price_changes[i+context.lookback] > 0:
Y.append(+1)
else:
Y.append(-1)
else:
Y.append(0)
context.model.fit(X, Y)
def rebalance(context, data):
context.completed = False
def trade(context, data):
if (context.account.leverage > context.maximum_leverage): return
if not context.model: return
for stock in context.trading_stocks:
if stock not in data:
context.trading_stocks.remove(stock)
for stock in context.trading_stocks:
if stock.security_end_date < get_datetime():
context.trading_stocks.remove(stock)
if stock in security_lists.leveraged_etf_list:
context.trading_stocks.remove(stock)
for one_stock in context.trading_stocks:
if get_open_orders(one_stock): continue
recent_prices = history(context.lookback+30, '1d', 'price')[one_stock].values
recent_lows = history(context.lookback+30, '1d', 'low')[one_stock].values
recent_highs = history(context.lookback+30, '1d', 'high')[one_stock].values
recent_closes = history(context.lookback+30, '1d', 'close_price')[one_stock].values
if (np.any(np.isnan(recent_prices)) or not np.all(np.isfinite(recent_prices))): continue
if (np.any(np.isnan(recent_lows)) or not np.all(np.isfinite(recent_lows))): continue
if (np.any(np.isnan(recent_highs)) or not np.all(np.isfinite(recent_highs))): continue
if (np.any(np.isnan(recent_closes)) or not np.all(np.isfinite(recent_closes))): continue
atr = talib.ATR(recent_highs, recent_lows, recent_closes, timeperiod=14)
prev_close = np.roll(recent_closes, 2)
upside_signal = (recent_prices - (prev_close + atr)).tolist()
downside_signal = (prev_close - (recent_prices + atr)).tolist()
price_changes = np.diff(recent_prices).tolist()
upper, middle, lower = talib.BBANDS(recent_prices,timeperiod=10,nbdevup=2,nbdevdn=2,matype=1)
upper = upper.tolist()
middle = middle.tolist()
lower = lower.tolist()
L = context.lookback
Z = price_changes[-L:] + upside_signal[-L:] + downside_signal[-L:] + upper[-L:] + middle[-L:] + lower[-L:]
if (np.any(np.isnan(Z)) or not np.all(np.isfinite(Z))): continue
prediction = context.model.predict(Z)
predict_proba = context.model.predict_proba(Z)
probability = predict_proba[0][prediction+1]
p_desired = context.starting_probability + 0.1*context.portfolio.returns
if probability > p_desired:
if prediction > 0:
if one_stock in context.shorts:
order_target_percent(one_stock, 0)
context.shorts.remove(one_stock)
elif not one_stock in context.longs:
context.longs.append(one_stock)
elif prediction < 0:
if one_stock in context.longs:
order_target_percent(one_stock, 0)
context.longs.remove(one_stock)
elif not one_stock in context.shorts:
context.shorts.append(one_stock)
else:
order_target_percent(one_stock, 0)
if one_stock in context.longs: context.longs.remove(one_stock)
elif one_stock in context.shorts: context.shorts.remove(one_stock)
if get_open_orders(): return
for one_stock in context.longs:
if not one_stock in context.trading_stocks:
context.longs.remove(one_stock)
else:
order_target_percent(one_stock, context.maximum_leverage/(len(context.longs)+len(context.shorts)))
for one_stock in context.shorts:
if not one_stock in context.trading_stocks:
context.shorts.remove(one_stock)
else:
order_target_percent(one_stock, (-1.0)*context.maximum_leverage/(len(context.longs)+len(context.shorts)))
order_target_percent(symbol('SPY'), (-1.0)*context.maximum_leverage*(context.beta*context.beta_coefficient))
def estimateBeta(priceY,priceX):
algorithm_returns = priceY
benchmark_returns = (priceX/np.roll(priceX,1)-1).dropna().values
if len(algorithm_returns) <> len(benchmark_returns):
minlen = min(len(algorithm_returns), len(benchmark_returns))
if minlen > 2:
algorithm_returns = algorithm_returns[-minlen:]
benchmark_returns = benchmark_returns[-minlen:]
else:
return 1.00
returns_matrix = np.vstack([algorithm_returns, benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return beta
def handle_data(context, data):
record(cash = context.portfolio.cash/(1000000))
record(lev = context.account.leverage)
context.algorithm_returns.append(context.portfolio.returns)
if len(context.algorithm_returns) > 30:
recent_prices = history(len(context.algorithm_returns), '1d', 'price')[symbol('SPY')]
context.beta_list.append(estimateBeta(pandas.Series(context.algorithm_returns[-30:]), recent_prices))
if len(context.beta_list) > 7: context.beta_list.pop(0)
context.beta = np.mean(context.beta_list)
record(Beta=context.beta)
| mit |
xiaoxiamii/scikit-learn | sklearn/feature_selection/tests/test_base.py | 143 | 3670 | import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform([feature_names])
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform([feature_names_t])
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
| bsd-3-clause |
Subsets and Splits