repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
michael-pacheco/dota2-predictor
|
training/cross_validation.py
|
2
|
2397
|
""" Module responsible with training, evaluating and cross validation of data """
import logging
import numpy as np
from sklearn.externals import joblib
from sklearn.metrics import roc_auc_score, accuracy_score
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def evaluate(train_data, test_data, cv=5, save_model=None):
""" Given train data, performs cross validation using cv folds, then calculates the score on
test data. The metric used is roc_auc_score from sklearn. Before training, the data is
normalized and the scaler used is saved for scaling the test data in the same manner.
Args:
train_data: list containing x_train and y_train
test_data: list containing x_test and y_test
cv: number of folds to use in cross validation
save_model: if given, the model is saved to this path
Returns:
train size, test size, roc_auc score
"""
x_train, y_train = train_data
x_test, y_test = test_data
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
cross_val_mean = -1
if cv > 0:
model = LogisticRegression(C=0.005, random_state=42)
cross_val_scores = cross_val_score(model, x_train, y_train, cv=cv, scoring='roc_auc',
n_jobs=-1)
cross_val_mean = np.mean(cross_val_scores)
logger.info("Cross validation scores over the training set (%d folds): %.3f +/- %.3f", cv,
cross_val_mean,
np.std(cross_val_scores))
model = LogisticRegression(C=0.005, random_state=42)
model.fit(x_train, y_train)
probabilities = model.predict_proba(x_test)
roc_auc = roc_auc_score(y_test, probabilities[:, 1])
labels = model.predict(x_test)
acc_score = accuracy_score(y_test, labels)
if save_model:
model_dict = {}
model_dict['scaler'] = scaler
model_dict['model'] = model
joblib.dump(model_dict, save_model)
logger.info("Test ROC AUC: %.3f", roc_auc)
logger.info("Test accuracy score: %.3f", acc_score)
return (x_train.shape[0], x_test.shape[0], cross_val_mean, roc_auc, acc_score)
|
mit
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/matplotlib/finance.py
|
10
|
42914
|
"""
A collection of functions for collecting, analyzing and plotting
financial data.
This module is deprecated in 2.0 and has been moved to a module called
`mpl_finance`.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange, zip
import contextlib
import os
import warnings
from six.moves.urllib.request import urlopen
import datetime
import numpy as np
from matplotlib import colors as mcolors, verbose, get_cachedir
from matplotlib.dates import date2num
from matplotlib.cbook import iterable, mkdirs, warn_deprecated
from matplotlib.collections import LineCollection, PolyCollection
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.patches import Rectangle
from matplotlib.transforms import Affine2D
warn_deprecated(
since=2.0,
message=("The finance module has been deprecated in mpl 2.0 and will "
"be removed in mpl 2.2. Please use the module mpl_finance "
"instead."))
if six.PY3:
import hashlib
def md5(x):
return hashlib.md5(x.encode())
else:
from hashlib import md5
cachedir = get_cachedir()
# cachedir will be None if there is no writable directory.
if cachedir is not None:
cachedir = os.path.join(cachedir, 'finance.cache')
else:
# Should only happen in a restricted environment (such as Google App
# Engine). Deal with this gracefully by not caching finance data.
cachedir = None
stock_dt_ohlc = np.dtype([
(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('close'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
stock_dt_ochl = np.dtype(
[(str('date'), object),
(str('year'), np.int16),
(str('month'), np.int8),
(str('day'), np.int8),
(str('d'), np.float), # mpl datenum
(str('open'), np.float),
(str('close'), np.float),
(str('high'), np.float),
(str('low'), np.float),
(str('volume'), np.float),
(str('aclose'), np.float)])
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, close, high, low prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, close, high, low, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, close, high, low,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=True)
def parse_yahoo_historical_ohlc(fh, adjusted=True, asobject=False):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
"""
return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject,
ochl=False)
def _parse_yahoo_historical(fh, adjusted=True, asobject=False,
ochl=True):
"""Parse the historical data in file handle fh from yahoo finance.
Parameters
----------
adjusted : bool
If True (default) replace open, high, low, close prices with
their adjusted values. The adjustment is by a scale factor, S =
adjusted_close/close. Adjusted prices are actual prices
multiplied by S.
Volume is not adjusted as it is already backward split adjusted
by Yahoo. If you want to compute dollars traded, multiply volume
by the adjusted close, regardless of whether you choose adjusted
= True|False.
asobject : bool or None
If False (default for compatibility with earlier versions)
return a list of tuples containing
d, open, high, low, close, volume
or
d, open, close, high, low, volume
depending on `ochl`
If None (preferred alternative to False), return
a 2-D ndarray corresponding to the list of tuples.
Otherwise return a numpy recarray with
date, year, month, day, d, open, high, low, close,
volume, adjusted_close
where d is a floating poing representation of date,
as returned by date2num, and date is a python standard
library datetime.date instance.
The name of this kwarg is a historical artifact. Formerly,
True returned a cbook Bunch
holding 1-D ndarrays. The behavior of a numpy recarray is
very similar to the Bunch.
ochl : bool
Selects between ochl and ohlc ordering.
Defaults to True to preserve original functionality.
"""
if ochl:
stock_dt = stock_dt_ochl
else:
stock_dt = stock_dt_ohlc
results = []
# datefmt = '%Y-%m-%d'
fh.readline() # discard heading
for line in fh:
vals = line.split(',')
if len(vals) != 7:
continue # add warning?
datestr = vals[0]
#dt = datetime.date(*time.strptime(datestr, datefmt)[:3])
# Using strptime doubles the runtime. With the present
# format, we don't need it.
dt = datetime.date(*[int(val) for val in datestr.split('-')])
dnum = date2num(dt)
open, high, low, close = [float(val) for val in vals[1:5]]
volume = float(vals[5])
aclose = float(vals[6])
if ochl:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, close, high, low, volume, aclose))
else:
results.append((dt, dt.year, dt.month, dt.day,
dnum, open, high, low, close, volume, aclose))
results.reverse()
d = np.array(results, dtype=stock_dt)
if adjusted:
scale = d['aclose'] / d['close']
scale[np.isinf(scale)] = np.nan
d['open'] *= scale
d['high'] *= scale
d['low'] *= scale
d['close'] *= scale
if not asobject:
# 2-D sequence; formerly list of tuples, now ndarray
ret = np.zeros((len(d), 6), dtype=np.float)
ret[:, 0] = d['d']
if ochl:
ret[:, 1] = d['open']
ret[:, 2] = d['close']
ret[:, 3] = d['high']
ret[:, 4] = d['low']
else:
ret[:, 1] = d['open']
ret[:, 2] = d['high']
ret[:, 3] = d['low']
ret[:, 4] = d['close']
ret[:, 5] = d['volume']
if asobject is None:
return ret
return [tuple(row) for row in ret]
return d.view(np.recarray) # Close enough to former Bunch return
def fetch_historical_yahoo(ticker, date1, date2, cachename=None,
dividends=False):
"""
Fetch historical data for ticker between date1 and date2. date1 and
date2 are date or datetime instances, or (year, month, day) sequences.
Parameters
----------
ticker : str
ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str
cachename is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
dividends : bool
set dividends=True to return dividends instead of price data. With
this option set, parse functions will not work
Returns
-------
file_handle : file handle
a file handle is returned
Examples
--------
>>> fh = fetch_historical_yahoo('^GSPC', (2000, 1, 1), (2001, 12, 31))
"""
ticker = ticker.upper()
if iterable(date1):
d1 = (date1[1] - 1, date1[2], date1[0])
else:
d1 = (date1.month - 1, date1.day, date1.year)
if iterable(date2):
d2 = (date2[1] - 1, date2[2], date2[0])
else:
d2 = (date2.month - 1, date2.day, date2.year)
if dividends:
g = 'v'
verbose.report('Retrieving dividends instead of prices')
else:
g = 'd'
urlFmt = ('http://ichart.yahoo.com/table.csv?a=%d&b=%d&' +
'c=%d&d=%d&e=%d&f=%d&s=%s&y=0&g=%s&ignore=.csv')
url = urlFmt % (d1[0], d1[1], d1[2],
d2[0], d2[1], d2[2], ticker, g)
# Cache the finance data if cachename is supplied, or there is a writable
# cache directory.
if cachename is None and cachedir is not None:
cachename = os.path.join(cachedir, md5(url).hexdigest())
if cachename is not None:
if os.path.exists(cachename):
fh = open(cachename)
verbose.report('Using cachefile %s for '
'%s' % (cachename, ticker))
else:
mkdirs(os.path.abspath(os.path.dirname(cachename)))
with contextlib.closing(urlopen(url)) as urlfh:
with open(cachename, 'wb') as fh:
fh.write(urlfh.read())
verbose.report('Saved %s data to cache file '
'%s' % (ticker, cachename))
fh = open(cachename, 'r')
return fh
else:
return urlopen(url)
def quotes_historical_yahoo_ochl(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ochl('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=True)
def quotes_historical_yahoo_ohlc(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
Examples
--------
>>> sp = f.quotes_historical_yahoo_ohlc('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
return _quotes_historical_yahoo(ticker, date1, date2, asobject=asobject,
adjusted=adjusted, cachename=cachename,
ochl=False)
def _quotes_historical_yahoo(ticker, date1, date2, asobject=False,
adjusted=True, cachename=None,
ochl=True):
""" Get historical data for ticker between date1 and date2.
See :func:`parse_yahoo_historical` for explanation of output formats
and the *asobject* and *adjusted* kwargs.
Parameters
----------
ticker : str
stock ticker
date1 : sequence of form (year, month, day), `datetime`, or `date`
start date
date2 : sequence of form (year, month, day), `datetime`, or `date`
end date
cachename : str or `None`
is the name of the local file cache. If None, will
default to the md5 hash or the url (which incorporates the ticker
and date range)
ochl: bool
temporary argument to select between ochl and ohlc ordering
Examples
--------
>>> sp = f.quotes_historical_yahoo('^GSPC', d1, d2,
asobject=True, adjusted=True)
>>> returns = (sp.open[1:] - sp.open[:-1])/sp.open[1:]
>>> [n,bins,patches] = hist(returns, 100)
>>> mu = mean(returns)
>>> sigma = std(returns)
>>> x = normpdf(bins, mu, sigma)
>>> plot(bins, x, color='red', lw=2)
"""
# Maybe enable a warning later as part of a slow transition
# to using None instead of False.
#if asobject is False:
# warnings.warn("Recommend changing to asobject=None")
fh = fetch_historical_yahoo(ticker, date1, date2, cachename)
try:
ret = _parse_yahoo_historical(fh, asobject=asobject,
adjusted=adjusted, ochl=ochl)
if len(ret) == 0:
return None
except IOError as exc:
warnings.warn('fh failure\n%s' % (exc.strerror[1]))
return None
return ret
def plot_day_summary_oclh(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, close, high, low as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=True)
def plot_day_summary_ohlc(ax, quotes, ticksize=3,
colorup='k', colordown='r',
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
return _plot_day_summary(ax, quotes, ticksize=ticksize,
colorup=colorup, colordown=colordown,
ochl=False)
def _plot_day_summary(ax, quotes, ticksize=3,
colorup='k', colordown='r',
ochl=True
):
"""Plots day summary
Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an `Axes` instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
ticksize : int
open/close tick marker in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
lines : list
list of tuples of the lines added (one tuple per quote)
"""
# unfortunately this has a different return type than plot_day_summary2_*
lines = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
else:
color = colordown
vline = Line2D(xdata=(t, t), ydata=(low, high),
color=color,
antialiased=False, # no need to antialias vert lines
)
oline = Line2D(xdata=(t, t), ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize,
)
cline = Line2D(xdata=(t, t), ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT)
lines.extend((vline, oline, cline))
ax.add_line(vline)
ax.add_line(oline)
ax.add_line(cline)
ax.autoscale_view()
return lines
def candlestick_ochl(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, close, high, low as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, close, high, low, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=True)
def candlestick_ohlc(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
As long as the first 5 elements are these values,
the record can be as long as you want (e.g., it may store volume).
time must be in float days format - see date2num
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
return _candlestick(ax, quotes, width=width, colorup=colorup,
colordown=colordown,
alpha=alpha, ochl=False)
def _candlestick(ax, quotes, width=0.2, colorup='k', colordown='r',
alpha=1.0, ochl=True):
"""
Plot the time, open, high, low, close as a vertical line ranging
from low to high. Use a rectangular bar to represent the
open-close span. If close >= open, use colorup to color the bar,
otherwise use colordown
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of quote sequences
data to plot. time must be in float date format - see date2num
(time, open, high, low, close, ...) vs
(time, open, close, high, low, ...)
set by `ochl`
width : float
fraction of a day for the rectangle width
colorup : color
the color of the rectangle where close >= open
colordown : color
the color of the rectangle where close < open
alpha : float
the rectangle alpha level
ochl: bool
argument to select between ochl and ohlc ordering of quotes
Returns
-------
ret : tuple
returns (lines, patches) where lines is a list of lines
added and patches is a list of the rectangle patches added
"""
OFFSET = width / 2.0
lines = []
patches = []
for q in quotes:
if ochl:
t, open, close, high, low = q[:5]
else:
t, open, high, low, close = q[:5]
if close >= open:
color = colorup
lower = open
height = close - open
else:
color = colordown
lower = close
height = open - close
vline = Line2D(
xdata=(t, t), ydata=(low, high),
color=color,
linewidth=0.5,
antialiased=True,
)
rect = Rectangle(
xy=(t - OFFSET, lower),
width=width,
height=height,
facecolor=color,
edgecolor=color,
)
rect.set_alpha(alpha)
lines.append(vline)
patches.append(rect)
ax.add_line(vline)
ax.add_patch(rect)
ax.autoscale_view()
return lines, patches
def _check_input(opens, closes, highs, lows, miss=-1):
"""Checks that *opens*, *highs*, *lows* and *closes* have the same length.
NOTE: this code assumes if any value open, high, low, close is
missing (*-1*) they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
miss : int
identifier of the missing data
Raises
------
ValueError
if the input sequences don't have the same length
"""
def _missing(sequence, miss=-1):
"""Returns the index in *sequence* of the missing data, identified by
*miss*
Parameters
----------
sequence :
sequence to evaluate
miss :
identifier of the missing data
Returns
-------
where_miss: numpy.ndarray
indices of the missing data
"""
return np.where(np.array(sequence) == miss)[0]
same_length = len(opens) == len(highs) == len(lows) == len(closes)
_missopens = _missing(opens)
same_missing = ((_missopens == _missing(highs)).all() and
(_missopens == _missing(lows)).all() and
(_missopens == _missing(closes)).all())
if not (same_length and same_missing):
msg = ("*opens*, *highs*, *lows* and *closes* must have the same"
" length. NOTE: this code assumes if any value open, high,"
" low, close is missing (*-1*) they all must be missing.")
raise ValueError(msg)
def plot_day_summary2_ochl(ax, opens, closes, highs, lows, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, close, high, low, as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
return plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize,
colorup, colordown)
def plot_day_summary2_ohlc(ax, opens, highs, lows, closes, ticksize=4,
colorup='k', colordown='r',
):
"""Represent the time, open, high, low, close as a vertical line
ranging from low to high. The left tick is the open and the right
tick is the close.
*opens*, *highs*, *lows* and *closes* must have the same length.
NOTE: this code assumes if any value open, high, low, close is
missing (*-1*) they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
Returns
-------
ret : list
a list of lines added to the axes
"""
_check_input(opens, highs, lows, closes)
rangeSegments = [((i, low), (i, high)) for i, low, high in
zip(xrange(len(lows)), lows, highs) if low != -1]
# the ticks will be from ticksize to 0 in points at the origin and
# we'll translate these to the i, close location
openSegments = [((-ticksize, 0), (0, 0))]
# the ticks will be from 0 to ticksize in points at the origin and
# we'll translate these to the i, close location
closeSegments = [((0, 0), (ticksize, 0))]
offsetsOpen = [(i, open) for i, open in
zip(xrange(len(opens)), opens) if open != -1]
offsetsClose = [(i, close) for i, close in
zip(xrange(len(closes)), closes) if close != -1]
scale = ax.figure.dpi * (1.0 / 72.0)
tickTransform = Affine2D().scale(scale, 0.0)
colorup = mcolors.to_rgba(colorup)
colordown = mcolors.to_rgba(colordown)
colord = {True: colorup, False: colordown}
colors = [colord[open < close] for open, close in
zip(opens, closes) if open != -1 and close != -1]
useAA = 0, # use tuple here
lw = 1, # and here
rangeCollection = LineCollection(rangeSegments,
colors=colors,
linewidths=lw,
antialiaseds=useAA,
)
openCollection = LineCollection(openSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsOpen,
transOffset=ax.transData,
)
openCollection.set_transform(tickTransform)
closeCollection = LineCollection(closeSegments,
colors=colors,
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsClose,
transOffset=ax.transData,
)
closeCollection.set_transform(tickTransform)
minpy, maxx = (0, len(rangeSegments))
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(openCollection)
ax.add_collection(closeCollection)
return rangeCollection, openCollection, closeCollection
def candlestick2_ochl(ax, opens, closes, highs, lows, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
Preserves the original argument order.
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
closes : sequence
sequence of closing values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
candlestick2_ohlc(ax, opens, highs, lows, closes, width=width,
colorup=colorup, colordown=colordown,
alpha=alpha)
def candlestick2_ohlc(ax, opens, highs, lows, closes, width=4,
colorup='k', colordown='r',
alpha=0.75,
):
"""Represent the open, close as a bar line and high low range as a
vertical line.
NOTE: this code assumes if any value open, low, high, close is
missing they all are missing
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
sequence of opening values
highs : sequence
sequence of high values
lows : sequence
sequence of low values
closes : sequence
sequence of closing values
ticksize : int
size of open and close ticks in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : tuple
(lineCollection, barCollection)
"""
_check_input(opens, highs, lows, closes)
delta = width / 2.
barVerts = [((i - delta, open),
(i - delta, close),
(i + delta, close),
(i + delta, open))
for i, open, close in zip(xrange(len(opens)), opens, closes)
if open != -1 and close != -1]
rangeSegments = [((i, low), (i, high))
for i, low, high in zip(xrange(len(lows)), lows, highs)
if low != -1]
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
useAA = 0, # use tuple here
lw = 0.5, # and here
rangeCollection = LineCollection(rangeSegments,
colors=((0, 0, 0, 1), ),
linewidths=lw,
antialiaseds=useAA,
)
barCollection = PolyCollection(barVerts,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=useAA,
linewidths=lw,
)
minx, maxx = 0, len(rangeSegments)
miny = min([low for low in lows if low != -1])
maxy = max([high for high in highs if high != -1])
corners = (minx, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(rangeCollection)
ax.add_collection(barCollection)
return rangeCollection, barCollection
def volume_overlay(ax, opens, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. The opens and closes
are used to determine the color of the bar. -1 is missing. If a
value is missing on one it must be missing on all
Parameters
----------
ax : `Axes`
an Axes instance to plot to
opens : sequence
a sequence of opens
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
colors = [colord[open < close]
for open, close in zip(opens, closes)
if open != -1 and close != -1]
delta = width / 2.
bars = [((i - delta, 0), (i - delta, v), (i + delta, v), (i + delta, 0))
for i, v in enumerate(volumes)
if v != -1]
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1), ),
antialiaseds=(0,),
linewidths=(0.5,),
)
ax.add_collection(barCollection)
corners = (0, 0), (len(bars), max(volumes))
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
return barCollection
def volume_overlay2(ax, closes, volumes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""
Add a volume overlay to the current axes. The closes are used to
determine the color of the bar. -1 is missing. If a value is
missing on one it must be missing on all
nb: first point is not displayed - it is used only for choosing the
right color
Parameters
----------
ax : `Axes`
an Axes instance to plot to
closes : sequence
a sequence of closes
volumes : sequence
a sequence of volumes
width : int
the bar width in points
colorup : color
the color of the lines where close >= open
colordown : color
the color of the lines where close < open
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
return volume_overlay(ax, closes[:-1], closes[1:], volumes[1:],
colorup, colordown, width, alpha)
def volume_overlay3(ax, quotes,
colorup='k', colordown='r',
width=4, alpha=1.0):
"""Add a volume overlay to the current axes. quotes is a list of (d,
open, high, low, close, volume) and close-open is used to
determine the color of the bar
Parameters
----------
ax : `Axes`
an Axes instance to plot to
quotes : sequence of (time, open, high, low, close, ...) sequences
data to plot. time must be in float date format - see date2num
width : int
the bar width in points
colorup : color
the color of the lines where close1 >= close0
colordown : color
the color of the lines where close1 < close0
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
colorup = mcolors.to_rgba(colorup, alpha)
colordown = mcolors.to_rgba(colordown, alpha)
colord = {True: colorup, False: colordown}
dates, opens, highs, lows, closes, volumes = list(zip(*quotes))
colors = [colord[close1 >= close0]
for close0, close1 in zip(closes[:-1], closes[1:])
if close0 != -1 and close1 != -1]
colors.insert(0, colord[closes[0] >= opens[0]])
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, volume), (right, volume), (right, 0))
for d, open, high, low, close, volume in quotes]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
dates = [d for d, open, high, low, close, volume in quotes]
offsetsBars = [(d, 0) for d in dates]
useAA = 0, # use tuple here
lw = 0.5, # and here
barCollection = PolyCollection(bars,
facecolors=colors,
edgecolors=((0, 0, 0, 1),),
antialiaseds=useAA,
linewidths=lw,
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (min(dates), max(dates))
miny = 0
maxy = max([volume for d, open, high, low, close, volume in quotes])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
#print 'datalim', ax.dataLim.bounds
#print 'viewlim', ax.viewLim.bounds
ax.add_collection(barCollection)
ax.autoscale_view()
return barCollection
def index_bar(ax, vals,
facecolor='b', edgecolor='l',
width=4, alpha=1.0, ):
"""Add a bar collection graph with height vals (-1 is missing).
Parameters
----------
ax : `Axes`
an Axes instance to plot to
vals : sequence
a sequence of values
facecolor : color
the color of the bar face
edgecolor : color
the color of the bar edges
width : int
the bar width in points
alpha : float
bar transparency
Returns
-------
ret : `barCollection`
The `barrCollection` added to the axes
"""
facecolors = (mcolors.to_rgba(facecolor, alpha),)
edgecolors = (mcolors.to_rgba(edgecolor, alpha),)
right = width / 2.0
left = -width / 2.0
bars = [((left, 0), (left, v), (right, v), (right, 0))
for v in vals if v != -1]
sx = ax.figure.dpi * (1.0 / 72.0) # scale for points
sy = ax.bbox.height / ax.viewLim.height
barTransform = Affine2D().scale(sx, sy)
offsetsBars = [(i, 0) for i, v in enumerate(vals) if v != -1]
barCollection = PolyCollection(bars,
facecolors=facecolors,
edgecolors=edgecolors,
antialiaseds=(0,),
linewidths=(0.5,),
offsets=offsetsBars,
transOffset=ax.transData,
)
barCollection.set_transform(barTransform)
minpy, maxx = (0, len(offsetsBars))
miny = 0
maxy = max([v for v in vals if v != -1])
corners = (minpy, miny), (maxx, maxy)
ax.update_datalim(corners)
ax.autoscale_view()
# add these last
ax.add_collection(barCollection)
return barCollection
|
gpl-3.0
|
AIML/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
48
|
12645
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_not_mac_os
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method, random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method', LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has", lda.partial_fit, X_2)
@if_not_mac_os()
def test_lda_multi_jobs():
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=3,
learning_method=method, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_not_mac_os()
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=-1, learning_offset=5.,
total_samples=30, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X, invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X, invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1, learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
newlawrence/poliastro
|
docs/source/conf.py
|
1
|
9340
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# poliastro documentation build configuration file, created by
# sphinx-quickstart on Sat May 24 11:02:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.intersphinx',
'nbsphinx',
'IPython.sphinxext.ipython_console_highlighting',
]
# Custom configuration
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
#Warning suppresses
suppress_warnings = ['image.nonlocal_uri']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'poliastro'
copyright = u'2013-2018, Juan Luis Cano Rodríguez and the poliastro development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.12'
# The full version, including alpha/beta/rc tags.
release = '0.12.dev0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
#Intersphinx configuration
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference', None),
'matplotlib': ('http://matplotlib.org', None)
}
#Nbsphinx configuration
if os.environ.get('READTHEDOCS') == 'True':
nbsphinx_execute = 'never'
else:
nbsphinx_execute = 'always'
# Controls when a cell will time out (defaults to 30; use -1 for no timeout):
nbsphinx_timeout = 60
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# HTML style
html_style = os.path.join("css", "custom.css")
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'poliastrodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'poliastro.tex', 'poliastro Documentation',
'Juan Luis Cano Rodríguez', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'poliastro', 'poliastro Documentation',
['Juan Luis Cano Rodríguez'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'poliastro', 'poliastro Documentation',
'Juan Luis Cano Rodríguez', 'poliastro', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
|
rmcgibbo/numpy
|
numpy/lib/polynomial.py
|
82
|
37957
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros, sort_complex
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Evaluate a polynomial at a point.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
pos_roots = sort_complex(NX.compress(roots.imag > 0, roots))
neg_roots = NX.conjugate(sort_complex(
NX.compress(roots.imag < 0, roots)))
if (len(pos_roots) == len(neg_roots) and
NX.alltrue(neg_roots == pos_roots)):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Evaluate a polynomial at a point.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
weights to apply to the y-coordinates of the sample points.
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (M,) or (M, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond :
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Computes polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, a 1D array of numbers, or an instance of poly1d, "at"
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print p1
1 x + 2
>>> print p2
2
9 x + 5 x + 4
>>> print np.polyadd(p1, p2)
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print p1
2
1 x + 2 x + 3
>>> print p2
2
9 x + 5 x + 1
>>> print np.polymul(p1, p2)
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print np.poly1d(p)
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print p
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
coeffs = None
order = None
variable = None
__hash__ = None
def __init__(self, c_or_r, r=0, variable=None):
if isinstance(c_or_r, poly1d):
for key in c_or_r.__dict__.keys():
self.__dict__[key] = c_or_r.__dict__[key]
if variable is not None:
self.__dict__['variable'] = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if len(c_or_r.shape) > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self.__dict__['coeffs'] = c_or_r
self.__dict__['order'] = len(c_or_r) - 1
if variable is None:
variable = 'x'
self.__dict__['variable'] = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
return not self.__eq__(other)
def __setattr__(self, key, val):
raise ValueError("Attributes cannot be changed this way.")
def __getattr__(self, key):
if key in ['r', 'roots']:
return roots(self.coeffs)
elif key in ['c', 'coef', 'coefficients']:
return self.coeffs
elif key in ['o']:
return self.order
else:
try:
return self.__dict__[key]
except KeyError:
raise AttributeError(
"'%s' has no attribute '%s'" % (self.__class__, key))
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self.__dict__['coeffs'] = NX.concatenate((zr, self.coeffs))
self.__dict__['order'] = key
ind = 0
self.__dict__['coeffs'][ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
bsd-3-clause
|
DeercoderResearch/0.5-CoCo
|
PythonAPI/pycocotools/coco.py
|
5
|
12534
|
__author__ = 'tylin'
__version__ = 1.0
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# segToMask - Convert polygon segmentation to binary mask.
# showAnns - Display the specified annotations.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>segToMask, COCO>showAnns
# Microsoft COCO Toolbox. Version 1.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import datetime
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
from skimage.draw import polygon
class COCO:
def __init__(self, annotation_file='annotations/instances_val2014_1_0.json'):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
print 'loading annotations into memory...'
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
print datetime.datetime.utcnow() - time_t
print 'annotations loaded!'
time_t = datetime.datetime.utcnow()
# create index
print 'creating index...'
imgToAnns = {ann['image_id']: [] for ann in dataset['annotations']}
anns = {ann['id']: [] for ann in dataset['annotations']}
for ann in dataset['annotations']:
imgToAnns[ann['image_id']] += [ann]
anns[ann['id']] = ann
imgs = {im['id']: {} for im in dataset['images']}
for img in dataset['images']:
imgs[img['id']] = img
cats = []
catToImgs = []
if dataset['type'] == 'instances':
cats = {cat['id']: [] for cat in dataset['categories']}
for cat in dataset['categories']:
cats[cat['id']] = cat
catToImgs = {cat['id']: [] for cat in dataset['categories']}
for ann in dataset['annotations']:
catToImgs[ann['category_id']] += [ann['image_id']]
print datetime.datetime.utcnow() - time_t
print 'index created!'
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
self.dataset = dataset
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.datset['info'].items():
print '%s: %s'%(key, value)
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns],[])
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if self.dataset['type'] == 'instances':
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for catId in catIds:
if len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if self.dataset['type'] == 'instances':
ax = plt.gca()
polygons = []
color = []
for ann in anns:
c = np.random.random((1, 3)).tolist()[0]
if not ann['iscrowd']:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((len(seg)/2, 2))
polygons.append(Polygon(poly, True,alpha=0.4))
color.append(c)
else:
# mask
mask = COCO.decodeMask(ann['segmentation'])
img = np.ones( (mask.shape[0], mask.shape[1], 3) )
light_green = np.array([2.0,166.0,101.0])/255
for i in range(3):
img[:,:,i] = light_green[i]
ax.imshow(np.dstack( (img, mask*0.5) ))
p = PatchCollection(polygons, facecolors=color, edgecolors=(0,0,0,1), linewidths=3, alpha=0.4)
ax.add_collection(p)
if self.dataset['type'] == 'captions':
for ann in anns:
print ann['caption']
@staticmethod
def decodeMask(R):
"""
Decode binary mask M encoded via run-length encoding.
:param R (object RLE) : run-length encoding of binary mask
:return: M (bool 2D array) : decoded binary mask
"""
N = len(R['counts'])
M = np.zeros( (R['size'][0]*R['size'][1], ))
n = 0
val = 1
for pos in range(N):
val = not val
for c in range(R['counts'][pos]):
R['counts'][pos]
M[n] = val
n += 1
return M.reshape((R['size']), order='F')
@staticmethod
def encodeMask(M):
"""
Encode binary mask M using run-length encoding.
:param M (bool 2D array) : binary mask to encode
:return: R (object RLE) : run-length encoding of binary mask
"""
[h, w] = M.shape
M = M.flatten(order='F')
N = len(M)
counts_list = []
pos = 0
# counts
counts_list.append(1)
diffs = np.logical_xor(M[0:N-1], M[1:N])
for diff in diffs:
if diff:
pos +=1
counts_list.append(1)
else:
counts_list[pos] += 1
# if array starts from 1. start with 0 counts for 0
if M[0] == 1:
counts_list = [0] + counts_list
return {'size': [h, w],
'counts': counts_list ,
}
@staticmethod
def segToMask( S, h, w ):
"""
Convert polygon segmentation to binary mask.
:param S (float array) : polygon segmentation mask
:param h (int) : target mask height
:param w (int) : target mask width
:return: M (bool 2D array) : binary mask
"""
M = np.zeros((h,w), dtype=np.bool)
for s in S:
N = len(s)
rr, cc = polygon(np.array(s[1:N:2]), np.array(s[0:N:2])) # (y, x)
M[rr, cc] = 1
return M
|
bsd-2-clause
|
trichter/sito
|
stations.py
|
1
|
5209
|
# by TR
from obspy.core import AttribDict
from obspy.core.util import gps2DistAzimuth
from sito.util import gps2DistDegree
import logging
import re
log = logging.getLogger(__name__)
class Stations(AttribDict):
"""
Class for holding station data.
"""
regex = """ # SH-combatible
\s*(?P<name>\w+)\s*
lat:\s*(?P<latitude>-?\+?[\d.]+)\s*
lon:\s*(?P<longitude>-?\+?[\d.]+)\s*
(?:elevation:\s*(?P<elevation>-?\+?[\d.]+)\s*|)
(?:info:\s*(?P<info>.*)|).*$"""
example = """
PB01 lat: -21.0432 lon: -69.4874 elevation: 900 info: Plate_Boundary_Station_PB01,_Chile
PB02 lat: -21.3197 lon: -69.8960 elevation: 1015 info: Plate_Boundary_Station_PB02,_Chile
PB03 lat: -22.0476 lon: -69.7533 elevation: 1460 info: Plate_Boundary_Station_PB03,_Chile
GRC3 lat:+48.8901739 lon: +11.5858216 elevation: 438.0 array:01 xrel: 26.722525 yrel: -89.032738 name:Graefenberg,_F.R.G.
GEA3 lat:48.83501053 lon:13.70003414 array:05 xrel:-0.13108 yrel:-1.12033"""
format_header = 'Stations:\nname lat lon elev info\n'
format_file = '{0:5} {latitude:-8.3f} {longitude:-8.3f} {elevation:6.1f} {info}\n'
@classmethod
def read(cls, filename):
"""
Read station file_ and return instance of Stations.
Format has to be like in Stations.example.
"""
with open(filename, 'r') as file_:
filedata = file_.read()
# Create an iterator over matches in Stations file_
st_matches = re.finditer(cls.regex, filedata, re.VERBOSE + re.MULTILINE)
# Create a list of dictionaries of PDE data
st_list = [i.groupdict() for i in st_matches]
st_dic = {}
for i in st_list:
st_dic[i['name']] = AttribDict({'latitude':float(i['latitude']), 'longitude':float(i['longitude']), 'info':i['info']})
try: st_dic[i['name']]['elevation'] = float(i['elevation'])
except TypeError: pass
log.info('Read station information of stations %s from file_ %s' % (' '.join(sorted(st_dic.keys())), filename))
return cls(st_dic)
def __repr__(self):
return 'Stations({0})'.format(super(Stations, self).__repr__())
def __str__(self):
return self.write(infile=False, header=True)
def write(self, filename=None, infile=True, header=False):
"""
Write station information in a filename or return string with information.
:param filen: filename
:param infile: write into filename or return string
:param header: insert a header
:return: string if infile is False
"""
str_list = []
if header: str_list.append(self.format_header)
for station in self: str_list.append(self.format_file.format(station, ** self[station]))
if infile:
with open(filename, 'w') as f:
f.write(''.join(str_list))
log.info('Write station data to ' + filename)
else:
return ''.join(str_list)
def pick(self, str_keys, replace=True):
"""
Pick stations.
:param str_keys: string with station names eg. 'PB01 PB02'
:param replace: if True the data in the station list is overwritten
:return: instance of station data
"""
newdata = {}
if not str_keys.startswith('-'):
log.info('Pick stations ' + str_keys)
for key in str_keys.split(): newdata[key] = self[key]
else:
log.info('Delete stations ' + str_keys[1:] + ' from list')
for key in self.keys():
if key not in str_keys[1:].split(): newdata[key] = self[key]
if replace:
self.clear()
for key in newdata.keys(): self[key] = newdata[key]
return self.__class__(newdata)
def getNames(self):
"""
Return station names in one string.
"""
return ' '.join(self.keys())
def dist(self, st1, st2, indeg=False):
dist_deg = gps2DistDegree(self[st1].latitude, self[st1].longitude,
self[st2].latitude, self[st2].longitude)
dist_km = gps2DistAzimuth(self[st1].latitude, self[st1].longitude, self[st2].latitude, self[st2].longitude)[0] / 1.e3
if indeg is True:
return dist_deg
elif indeg is False:
return dist_km
else:
return dist_km, dist_deg
def plot(self, basemap, annotate=True, lsize='small', kwargs_an=None, **kwargs_in):
kwargs = dict(marker='o')
kwargs.update(kwargs_in)
if kwargs_an is None:
kwargs_an = {}
for key, val in self.items():
x, y = basemap(val.longitude, val.latitude)
basemap.plot((x,), (y,), **kwargs)
if annotate:
import matplotlib.pylab as plt
plt.annotate(key, (x, y), xytext=(3, 3),
textcoords='offset points', size=lsize, **kwargs_an)
def IPOCStations():
return Stations.read('/home/richter/Data/stations_ipoc.txt')
def ParkfieldStations():
return Stations.read('/home/richter/Data/stations.txt')
if __name__ == '__main__':
pass
|
mit
|
leal26/pyXFOIL
|
examples/morphing/flight_conditions/nonmorphed/range_constant_aoa.py
|
2
|
6181
|
import aeropy.xfoil_module as xf
from aeropy.geometry.airfoil import CST, create_x
from aeropy.morphing.camber_2D import *
from aeropy.aero_module import air_properties, Reynolds, LLT_calculator
from scipy.interpolate import griddata, RegularGridInterpolator
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin_min
import scipy
def aircraft_range_varying_V(f_L, f_LD, AOA):
def to_integrate(weight):
# velocity = 0.514444*108 # m/s (113 KTAS)
def calculate_velocity(AOA):
def residual(V):
CL = f_L([V, AOA])[0]
span = 11
chord_root = span/16.2
return abs(V - math.sqrt(weight/(.5*density*(span*chord_root))))
res = scipy.optimize.minimize(residual, 30, bounds = [[20, 65],])#, options={'ftol':1e-9})
return res.x[0]
velocity = calculate_velocity(AOA)
lift_to_drag = f_LD([velocity, AOA])
span = 10.9728
RPM = 1800
a = 0.3089 # (lb/hr)/BTU
b = 0.008*RPM+19.607 # lb/hr
lbhr_to_kgs = 0.000125998
BHP_to_watt = 745.7
eta = 0.85
thrust = weight/lift_to_drag
power_SI = thrust*velocity/eta
power_BHP = power_SI/BHP_to_watt
mass_flow = (a*power_BHP + b)
mass_flow_SI = mass_flow*lbhr_to_kgs
SFC = mass_flow_SI/thrust
dR = velocity/g/SFC*lift_to_drag/weight
return dR*0.001 # *0.0005399
AOA_list = []
g = 9.81 # kg/ms
fuel = 56*6.01*0.4535*g
initial_weight = 1111*g
final_weight = initial_weight-fuel
x = np.linspace(final_weight, initial_weight, 100)
y = []
for x_i in x:
y.append(to_integrate(x_i)[0])
range = scipy.integrate.simps(y, x)
return range
# ==============================================================================
# Inputs
# ==============================================================================
altitude = 10000 # ft
air_props = air_properties(altitude, unit='feet')
density = air_props['Density']
# data = pandas.read_csv('performance_grid.csv')
# psi_spars = [0.1, 0.3, 0.6, 0.8]
# c_P = 1.0
# ranges = []
# for i in range(len(data.values)):
# AC = data.values[i,0:4]
# velocity = data.values[i,-4]
# AOA = data.values[i,-5]
# cl= data.values[i,-3]
# cd = data.values[i,-2]
# CL, CD = coefficient_LLT(AC, velocity, AOA)
# data.values[i, -3] = CL
# data.values[i, -2] = CD
# data.values[i, -1] = CL/CD
# print(i, CL, CD)
# data = data.drop_duplicates()
import pickle
# f = open('wing.p', 'wb')
# pickle.dump(data, f)
# f.close()
state = 'nonmorphed'
concepts = ['NACA0012', 'NACA4415', 'NACA641212', 'glider']
#
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# # print(aoa)
# # print(velocity)
# # print(cl)
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = concept)
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('cl')
# plt.show()
# plt.figure()
# for concept in concepts:
# mat = scipy.io.loadmat(state + '_' + concept)
# aoa = mat['aoa'][0]
# velocity = mat['V'][0]
# cl = mat['CL'].T
# LD_ratio = mat['lift_to_drag']
# f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
# f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = [20]
# aoas = np.linspace(0,12,100)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = concept)
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.ylabel('Lift-to-drag ratio')
# plt.show()
range_data = {}
plt.figure()
for concept in concepts:
mat = scipy.io.loadmat(state + '_' + concept)
aoa = mat['aoa'][0]
velocity = mat['V'][0]
cl = mat['CL'].T
LD_ratio = mat['lift_to_drag']
f_LD = RegularGridInterpolator((velocity, aoa), LD_ratio, fill_value = 0, bounds_error = False)
f_L = RegularGridInterpolator((velocity, aoa), cl, fill_value = 0, bounds_error = False)
# velocity = np.linspace(20, 65, 7)
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_L(data_i), label = velocity[i])
# # plt.scatter(aoas, f_L((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
# plt.figure()
# aoas = np.linspace(0,12,1000)
# for i in range(len(velocity)):
# data_i = np.array([velocity[i]*np.ones(np.shape(aoas)), aoas]).T
# plt.plot(aoas, f_LD(data_i), label = velocity[i])
# # plt.scatter(aoas, f_LD((aoas, velocity[i]*np.ones(np.shape(aoas)))))
# plt.legend()
# plt.show()
ranges = []
# velocity = np.linspace(20, 60, 5)
for i in range(len(aoa)):
range_i = aircraft_range_varying_V(f_L, f_LD, aoa[i])
# plt.plot(np.arange(len(AOA_i)), AOA_i, label=velocity[i])
# plt.scatter(np.arange(len(AOA_i)),AOA_i)
print(i, aoa[i], range_i)
ranges.append(range_i)
# print(velocity[36])
range_data[concept] = ranges
plt.plot(aoa, ranges, lw=2, label=concept)
f = open('ranges_aoa.p', 'wb')
pickle.dump(range_data, f)
f.close()
# plt.xlim(min(velocity), max(velocity))
# plt.ylim(min(ranges), max(ranges))
plt.xlabel('Angle of Attack')
plt.ylabel('Range (km)')
plt.legend()
plt.show()
|
mit
|
chappers/sklearn-recipes
|
streaming_take2/dpp_classifier_dppsample.py
|
3
|
11109
|
import sklearn
from sklearn.datasets import make_regression, make_classification
from sklearn.linear_model import SGDRegressor, SGDClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.pairwise import euclidean_distances
import pandas as pd
import numpy as np
from scipy import stats
from scipy.stats import wilcoxon
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.decomposition import PCA, KernelPCA
from sklearn.kernel_approximation import Nystroem
from dpp import sample_dpp, decompose_kernel, sample_conditional_dpp, elem_sympoly
import random
def entropy(X):
mm = MinMaxScaler()
X_mm = mm.fit_transform(X)
Dpq = euclidean_distances(X_mm)
D_bar = np.mean([x for x in np.triu(Dpq).flatten() if x != 0])
alpha = -np.log(0.5)/D_bar
sim_pq = np.exp(-alpha * Dpq)
log_sim_pq = np.log(sim_pq)
entropy = -2*np.sum(np.triu(sim_pq*log_sim_pq + ((1-sim_pq)*np.log((1-sim_pq))), 1))
return entropy
def wilcoxon_group(X, f):
"""
Wilcoxon is a very aggressive selector in an unsupervised sense.
Do we require a supervised group selection? (probably)
Probably one that is score based in order to select the "best" ones
similar to OGFS?
"""
# X is a matrix, f is a single vector
if len(X.shape) == 1:
return wilcoxon(X, f).pvalue
# now we shall perform and check each one...and return only the lowest pvalue
return np.max([wilcoxon(x, f) for x in X.T])
"""
Implement DPP version that is similar to what is done above
sketch of solution
------------------
DPP requires a known number of parameters to check at each partial fit!
"""
class DPPClassifier(SGDClassifier):
def __init__(self, loss="log", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, max_iter=None, tol=None, shuffle=True,
verbose=0, epsilon=0.1, n_jobs=1,
random_state=None, learning_rate="optimal", eta0=0.0,
power_t=0.5, class_weight=None, warm_start=False,
average=False, n_iter=None,
intragroup_decay = 0.9, pca_alpha=0.05,
intragroup_alpha=0.05, intergroup_thres=None):
super(DPPClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, max_iter=max_iter, tol=tol,
shuffle=shuffle, verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average, n_iter=n_iter)
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
self.seen_cols = []
self.base_shape = None
self.dpp_k = {'pca': 0, 'kpca':0}
self.unseen_only = False
self.intragroup_alpha = intragroup_alpha
self.intergroup_thres = intergroup_thres if intergroup_thres is not None else epsilon
def _dpp_estimate_k(self, feat_dist):
"""
L is the input kernel
"""
"""
L = decompose_kernel(feat_dist)
k = 0 #len(self.coef_info['cols'])
N = L['D'].shape[0]
E = elem_sympoly(L['D'], N)
el_list = list(range(k+1, N+1))
E_ls = [E[x, -1] for x in el_list]
#print(E_ls)
E_ls = np.abs(E_ls)
E_ls = E_ls / (np.sum(E_ls))
#print(E_ls)
dpp_k_prime = np.random.choice(el_list, p=E_ls)
self.dpp_k['pca'] = dpp_k_prime
self.dpp_k['kpca'] = dpp_k_prime
"""
self.dpp_k['pca'] = None
def add_column_exclusion(self, cols):
self.coef_info['excluded_cols'] = list(self.coef_info['excluded_cols']) + list(cols)
def _fit_columns(self, X_, return_x=True, transform_only=False):
"""
Method filter through "unselected" columns. The goal of this
method is to filter any uninformative columns.
This will be selected based on index only?
If return_x is false, it will only return the boolean mask.
"""
X = X_[X_.columns.difference(self.coef_info['excluded_cols'])]
# order the columns correctly...
col_order = self.coef_info['cols'] + list([x for x in X.columns if x not in self.coef_info['cols']])
X = X[col_order]
return X
def _reg_penalty(self, X):
col_coef = [(col, coef) for col, coef in zip(X.columns.tolist(), self.coef_.flatten()) if np.abs(coef) >= self.intergroup_thres]
self.coef_info['cols'] = [x for x, _ in col_coef]
self.coef_info['coef'] = [x for _, x in col_coef]
self.coef_info['excluded_cols'] = [x for x in self.seen_cols if x not in self.coef_info['cols']]
self.coef_ = np.array(self.coef_info['coef']).reshape(1, -1)
def _dpp_sel(self, X_, y=None):
"""
DPP only relies on X.
We will condition the sampling based on:
* `self.coef_info['cols']`
After sampling it will go ahead and then perform grouped wilcoxon selection.
"""
X = np.array(X_)
cols_to_index = [idx for idx, x in enumerate(X_.columns) if x in self.coef_info['cols']]
unseen_cols_to_index = [idx for idx, x in enumerate(X_.columns) if x not in self.coef_info['cols']]
if X.shape[0] < 1000:
feat_dist = rbf_kernel(X.T)
else:
feat_dist = Nystroem().fit_transform(X.T)
#feat_dist = np.nan_to_num(feat_dist)
unseen_kernel = feat_dist[unseen_cols_to_index, :][:, unseen_cols_to_index]
#print(unseen_kernel.shape)
self._dpp_estimate_k(unseen_kernel)
k = self.dpp_k['pca'] # - len(self.coef_info['cols'])
"""
if k < 1:
# this means k is possibly negative, reevaluate k based only on new incoming feats!
self.unseen_only = True
#k = max(self._dpp_estimate_k(unseen_kernel), int(unseen_kernel.shape[0] * 0.5)+1)
k = unseen_kernel.shape[0]
#print("Unseen only")
#print(k)
"""
feat_index = []
while len(feat_index) == 0:
if len(self.coef_info['cols']) == 0:
feat_index = sample_dpp(decompose_kernel(feat_dist), k=k)
else:
feat_index = sample_conditional_dpp(feat_dist, cols_to_index, k=k)
feat_index = [x for x in feat_index if x is not None]
# select features using entropy measure
# how can we order features from most to least relevant first?
# we chould do it using f test? Or otherwise - presume DPP selects best one first
"""
feat_entropy = []
excl_entropy = []
X_sel = X[:, feat_index]
for idx, feat in enumerate(X_sel.T):
if len(feat_entropy) == 0:
feat_entropy.append(idx)
continue
if entropy(X_sel[:, feat_entropy]) > entropy(X_sel[:, feat_entropy+[idx]]):
feat_entropy.append(idx)
else:
excl_entropy.append(idx)
"""
# iterate over feat_index to determine
# information on wilcoxon test
# as the feat index are already "ordered" as that is how DPP would
# perform the sampling - we will do the single pass in the same
# way it was approached in the OGFS
# feat index will have all previous sampled columns as well...
if not self.unseen_only:
feat_check = []
excl_check = []
X_sel = X[:, feat_index]
for idx, feat in enumerate(X_sel.T):
if len(feat_check) == 0:
feat_check.append(idx)
continue
wilcoxon_pval = wilcoxon_group(X_sel[:, feat_check], feat)
#print("\tWilcoxon: {}".format(wilcoxon_pval))
if wilcoxon_pval < self.intragroup_alpha:
feat_check.append(idx)
else:
excl_check.append(idx)
index_to_col = [col for idx, col in enumerate(X_.columns) if idx in feat_check]
else:
# if we are considering unseen only, we will simply let the regulariser
# act on it, sim. to grafting.
index_to_col = [col for idx, col in enumerate(X_.columns) if idx in feat_index]
self.unseen_only = False # perhaps add more conditions around unseen - i.e. once unseen condition kicks in, it remains active?
self.coef_info['cols'] = list(set(self.coef_info['cols'] + index_to_col))
col_rem = X_.columns.difference(self.coef_info['cols'])
# update column exclusion...
self.coef_info['excluded_cols'] = [x for x in self.coef_info['excluded_cols'] if x not in self.coef_info['cols']]
self.add_column_exclusion(col_rem)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
# TODO: add DPP selection
self.coef_info = {'cols': [], 'coef':[], 'excluded_cols': []}
#self._dpp_sel(X, y)
#X = self._fit_columns(X)
super(DPPClassifier, self).fit(X, y, coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
self._reg_penalty(X)
return self
def partial_fit(self, X, y, sample_weight=None):
X_ = X.copy()
unseen_col_size = len([1 for x in X.columns if x not in self.seen_cols])
self.seen_cols = list(set(self.seen_cols + X.columns.tolist()))
#sample_from_exclude_size = int(len(self.coef_info['excluded_cols']) - (len(self.coef_info['cols'])/2.0))+1
sample_from_exclude_size = int(len(self.coef_info['excluded_cols']) - unseen_col_size)
if sample_from_exclude_size > 0:
cols_excl_sample = random.sample(self.coef_info['excluded_cols'], sample_from_exclude_size)
X = X[X.columns.difference(cols_excl_sample)]
#X = X[X.columns.difference(self.coef_info['excluded_cols'])]
# TODO: add DPP selection
self._dpp_sel(X, y)
X = self._fit_columns(X_)
# now update coefficients
n_samples, n_features = X.shape
coef_list = np.zeros(n_features, dtype=np.float64, order="C")
coef_list[:len(self.coef_info['coef'])] = self.coef_info['coef']
self.coef_ = np.array(coef_list).reshape(1, -1)
super(DPPClassifier, self).partial_fit(X, y, sample_weight=None)
self._reg_penalty(X)
return self
def predict(self, X):
X = self._fit_columns(X, transform_only=True)
return super(DPPClassifier, self).predict(X)
def predict_proba(self, X):
X = self._fit_columns(X, transform_only=True)
return super(DPPClassifier, self).predict_proba(X)
|
mit
|
tawsifkhan/scikit-learn
|
examples/classification/plot_classification_probability.py
|
242
|
2624
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
PyORBIT-Collaboration/py-orbit
|
py/orbit/matching/matching.py
|
2
|
9703
|
import os
import string
import sys
from numpy import *
from scipy.optimize import fsolve
from scipy.optimize import root
from scipy.integrate import odeint
from scipy.constants import c
from matplotlib.pyplot import *
from orbit.teapot import TEAPOT_MATRIX_Lattice
class Twiss:
# Create a simple MAD-like twiss object:
def __init__(self):
self.data = { 'keyword': '',
's': 0.0,
'L': 0.0,
'alfx': 0.0,
'alfy': 0.0,
'betx': 0.0,
'bety': 0.0,
'mux' : 0.0,
'muy' : 0.0,
'Dx': 0.0,
'Dpx': 0.0,
'angle': 0.0,
'k1': 0.0 }
class Optics:
# An container class for twiss objects:
def __init__(self):
self.line = []
def __len__(self):
return len(self.line)
def __getitem__(self,j):
return self.line[j]
def __setitem__(self,j,x):
self.line[j]=x
def add(self, x):
self.line.append(x)
def print_line(self):
for j in xrange(0,len(self.line)):
print j, self.line[j].data['keyword'], "s:", self.line[j].data['s'], "L:", self.line[j].data['L'], 360.0*self.line[j].data['mux'],self.line[j].data['bety'],self.line[j].data['alfy']
def get_element(self, s):
Nb=len(self.line)
if self.line[0].data['s'] >= s and s >= 0.0:
return 0
for j in xrange(1,Nb):
if self.line[j-1].data['s'] < s and self.line[j].data['s'] >=s :
return j
if self.line[Nb-1].data['s'] < s :
return 0
if s < 0.0 :
return Nb-1
else:
print "error: s not in range"
print "STOP."
sys.exit(1)
def get_length(self):
Nb=len(self.line)
return self.line[Nb-1].data['s']
def readtwiss_teapot(self,lattice, bunch):
beamline=Optics()
matrix_lattice = TEAPOT_MATRIX_Lattice(lattice,bunch)
(arrmuX, arrPosAlphaX, arrPosBetaX) = matrix_lattice.getRingTwissDataX()
(arrmuY, arrPosAlphaY, arrPosBetaY) = matrix_lattice.getRingTwissDataY()
(DispersionX, DispersionXP) = matrix_lattice.getRingDispersionDataX()
(DispersionY, DispersionYP) = matrix_lattice.getRingDispersionDataY()
nodes = lattice.getNodes()
for node in nodes:
for j in range(len(arrPosBetaX)):
if (round(lattice.getNodePositionsDict()[node][1],4)==round(arrPosBetaX[j][0],4)):
muX = arrmuX[j][1]
betaX = arrPosBetaX[j][1]
alphaX = arrPosAlphaX[j][1]
dx = DispersionX[j][1]
dmux = DispersionXP[j][1]
muY = arrmuY[j][1]
betaY = arrPosBetaY[j][1]
alphaY = arrPosAlphaY[j][1]
dmuy = DispersionYP[j][1]
if node.getType() == "quad teapot":
k1l = node.getParam("kq")*node.getLength()
else:
k1l = 0.0
if node.getType() == "bend teapot":
angle = node.getParam("theta")
else:
angle = 0.0
beamline.add(1)
j=len(beamline)-1
beamline[j]=Twiss()
beamline[j].data['keyword']=node.getName()
beamline[j].data['marker']=node.getType()
beamline[j].data['s']=round(lattice.getNodePositionsDict()[node][1],4)
beamline[j].data['L']=node.getLength()
beamline[j].data['alfx']=alphaX
beamline[j].data['alfy']=alphaY
beamline[j].data['betx']=betaX
beamline[j].data['bety']=betaY
beamline[j].data['Dx']=dx
beamline[j].data['Dpx']=dmux
beamline[j].data['mux']=muX
beamline[j].data['muy']=muY
beamline[j].data['angle']=angle
beamline[j].data['k1']=k1l
return beamline
#------------------------------------------------------
# Read MADX TFS file
#-------------------------------------------------------
#------------------------------------------------------
# Envelope solver:
# x0, xs0, y0, ys0: initial values
# emitx/y: rms emittance
# Ksc: space charge perveance
#-------------------------------------------------------
class EnvelopeSolver:
def __init__(self,beamline):
self.beamline = beamline
def func_odeint(self,y,s,emitx,emity,sigma_p,Ksc):
jb=self.beamline.get_element(s)
k1=self.beamline[jb].data['k1']
lj=self.beamline[jb].data['L']
anglej=self.beamline[jb].data['angle']
f0=y[1]
f1=-(k1/lj+(anglej/lj)**2)*y[0]+emitx**2/y[0]**3+0.5*Ksc/(y[0]+y[2])+y[4]*sigma_p**2*anglej/(y[0]*lj)
f2=y[3]
f3=(k1/lj)*y[2]+emity**2/y[2]**3+0.5*Ksc/(y[0]+y[2]) # -
f4=y[5]
f5=-(k1/lj+(anglej/lj)**2)*y[4]+0.5*Ksc/(y[0]*(y[0]+y[2]))*y[4]+anglej/lj
return [f0,f1,f2,f3,f4,f5]
def Dfunc_odeint(self,y,s,emitx,emity,sigma_p,Ksc):
jb=self.beamline.get_element(s)
k1=self.beamline[jb].data['k1']
lj=self.beamline[jb].data['L']
anglej=self.beamline[jb].data['angle']
a0=-(k1/lj+(anglej/lj)**2)*y[0]+emitx**2/y[0]**3+0.5*Ksc/(y[0]+y[2])+y[4]*sigma_p**2*anglej/(y[0]*lj)
a1=-(k1/lj+(anglej/lj)**2)*y[1]-3.0*y[1]*emitx**2/y[0]**4-0.5*Ksc*(y[1]+y[3])/(y[0]+y[2])**2+y[5]*sigma_p**2*anglej/(y[0]*lj)-y[4]*y[1]*sigma_p**2*anglej/(y[0]**2*lj)
a2=(k1/lj)*y[2]+emity**2/y[2]**3+0.5*Ksc/(y[0]+y[2]) # -
a3=(k1/lj)*y[3]-3.0*y[3]*emity**2/y[2]**4-0.5*Ksc*(y[1]+y[3])/(y[0]+y[2])**2 # -
a4=-(k1/lj+(anglej/lj)**2)*y[4]+0.5*Ksc/(y[0]*(y[0]+y[2]))*y[4]+anglej/lj
a5=-(k1/lj+(anglej/lj)**2)*y[5]+0.5*Ksc/(y[0]*(y[0]+y[2]))*y[5]-0.5*Ksc/(y[0]*(y[0]+y[2]))**2*y[4]*(y[1]*(y[0]+y[2])+y[0]*(y[1]+y[3]) )
return [a0,a1,a2,a3,a4,a5]
def envelope_odeint(self, emitx, emity, sigma_p, Ksc, x0, xs0, y0, ys0, Dx0, Dxs0):
Np=1000
Nb=len(self.beamline)
Lb=self.beamline[Nb-1].data['s']
s=linspace(0.0,Lb,num=Np)
sol=odeint(self.func_odeint,[x0,xs0,y0,ys0,Dx0,Dxs0],s,args=(emitx,emity,sigma_p,Ksc),Dfun=self.Dfunc_odeint,rtol=1.0e-12,atol=1.0e-12)
envx=sol[:,0]
envxs=sol[:,1]
envy=sol[:,2]
envys=sol[:,3]
Dx=sol[:,4]
Dxs=sol[:,5]
return envx,envxs,envy,envys,Dx,Dxs,s
#------------------------------------------------------
# Match: Periodic solution starting from MADX result
#-------------------------------------------------------
# this is the function for the root searching routine (fsolve)
def func_fsolve(self,x,emitx,emity,sigma_p,Ksc):
envx,envxs,envy,envys,Dx,Dxs,s = self.envelope_odeint(emitx,emity,sigma_p,Ksc,x[0],x[1],x[2],x[3],x[4],x[5])
Nb=len(envx)
return [envx[Nb-1]-x[0],envxs[Nb-1]-x[1],envy[Nb-1]-x[2],envys[Nb-1]-x[3],Dx[Nb-1]-x[4],Dxs[Nb-1]-x[5]]
# root searching using fsolve and initial values from MADX
# returns matched envelopes
def match_root(self, emitx, emity, sigma_p, Ksc):
Nb=len(self.beamline)
# start values
x0=sqrt(self.beamline[Nb-1].data['betx']*emitx)
gamx=(1.0+(self.beamline[Nb-1].data['alfx'])**2)/self.beamline[Nb-1].data['betx']
xs0=-copysign(sqrt(gamx*emitx),self.beamline[Nb-1].data['alfx'])
y0=sqrt(self.beamline[Nb-1].data['bety']*emity)
gamy=(1.0+(self.beamline[Nb-1].data['alfy'])**2)/self.beamline[Nb-1].data['bety']
ys0=-copysign(sqrt(gamy*emity),self.beamline[Nb-1].data['alfy'])
Dx0=self.beamline[Nb-1].data['Dx']
Dxs0=self.beamline[Nb-1].data['Dpx']
# solver
sol = root(self.func_fsolve, [x0,xs0,y0,ys0,Dx0,Dxs0], args=(emitx,emity,sigma_p,Ksc),method='hybr')
x0=sol.x[0]
xs0=sol.x[1]
y0=sol.x[2]
ys0=sol.x[3]
Dx0=sol.x[4]
Dxs0=sol.x[5]
envx,envxs,envy,envys,Dx,Dxs,s = self.envelope_odeint(emitx,emity,sigma_p,Ksc,x0,xs0,y0,ys0,Dx0,Dxs0)
return envx, envxs, envy, envys, Dx, Dxs, s
# returns the matchted twiss parameter at cell entrance
def match_twiss(self, emitx, emity, sigma_p, Ksc):
Nb=len(self.beamline)
# start values
x0=sqrt(self.beamline[Nb-1].data['betx']*emitx)
gamx=(1.0+(self.beamline[Nb-1].data['alfx'])**2)/self.beamline[Nb-1].data['betx']
xs0=-copysign(sqrt(gamx*emitx),self.beamline[Nb-1].data['alfx'])
y0=sqrt(self.beamline[Nb-1].data['bety']*emity)
gamy=(1.0+(self.beamline[Nb-1].data['alfy'])**2)/self.beamline[Nb-1].data['bety']
ys0=-copysign(sqrt(gamy*emity),self.beamline[Nb-1].data['alfy'])
Dx0=self.beamline[Nb-1].data['Dx']
Dxs0=self.beamline[Nb-1].data['Dpx']
# solver
sol = root(self.func_fsolve, [x0,xs0,y0,ys0,Dx0,Dxs0], args=(self.beamline,emitx,emity,sigma_p,Ksc),method='hybr')
x0=sol.x[0]
xs0=sol.x[1]
y0=sol.x[2]
ys0=sol.x[3]
Dx0=sol.x[4]
Dxs0=sol.x[5]
return x0**2/emitx,y0**2/emity,-copysign(sqrt(x0**2*xs0**2/emitx**2),xs0),-copysign(sqrt(y0**2*ys0**2/emity**2),ys0), Dx0, Dxs0
#------------------------------------------------------
# Smooth focusing
#-------------------------------------------------------
def func_smooth(self,x,phase0x,phase0y,length,emitx,emity,Ksc):
kx=(phase0x/length)**2
ky=(phase0y/length)**2
return[emitx**2/x[0]**3-kx*x[0]+0.5*Ksc/(x[0]+x[1]),emity**2/x[1]**3-ky*x[1]+0.5*Ksc/(x[0]+x[1])]
def match_smooth(self,phase0x,phase0y,length,emitx,emity,Ksc):
kx=(phase0x/length)**2
ky=(phase0y/length)**2
x0=(emitx**2/kx)**(1.0/4.0)
y0=(emity**2/ky)**(1.0/4.0)
sol = root(self.func_smooth,[x0,y0],args=(phase0x,phase0y,length,emitx,emity,Ksc),method='hybr')
return sol.x[0]**2/emitx,sol.x[1]**2/emity # beta functions
#------------------------------------------------------
# Calculate phase advance for given envelopes
#-------------------------------------------------------
def phase_advance(self,envx,envy,Dx,emitx,emity,sigma_p,s):
Np=len(s)
phasex=0.0
phasey=0.0
ds=s[1]-s[0]
for j in xrange(0,Np):
phasex+=ds*emitx/(envx[j]**2-(Dx[j]*sigma_p)**2)
phasey+=ds*emity/envy[j]**2
return phasex, phasey
# analytic phase advance depression
# lc: length of the cell
def phase_analytic(self,emitx,emity,Ksc,lc):
return 0.5*Ksc*lc/(4.0*emitx), 0.5*Ksc*lc/(4.0*emity)
#------------------------------------------------------
# Entropy growth rate: pre-factor
#-------------------------------------------------------
def entropy_rate(self,envx,envy,emitx,emity,s,beta0):
Np=len(s)
ratet=0.0
ds=s[1]-s[0]
for j in xrange(0,Np):
Tx=envx[j]**2/emitx**2
Ty=envy[j]**2/emity**2
ratet+=ds/(beta0*c)*0.5*(Tx-Ty)**2/(Tx*Ty)
return ratet
|
mit
|
Winand/pandas
|
pandas/tests/io/test_clipboard.py
|
13
|
4988
|
# -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
from textwrap import dedent
import pytest
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.io.clipboard.exceptions import PyperclipException
from pandas.io.clipboard import clipboard_set
try:
DataFrame({'A': [1, 2]}).to_clipboard()
_DEPS_INSTALLED = 1
except PyperclipException:
_DEPS_INSTALLED = 0
@pytest.mark.single
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
class TestClipboard(object):
@classmethod
def setup_class(cls):
cls.data = {}
cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['float'] = mkdf(5, 3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
# Test columns exceeding "max_colwidth" (GH8305)
_cw = get_option('display.max_colwidth') + 1
cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test GH-5346
max_rows = get_option('display.max_rows')
cls.data['longdf'] = mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
# unicode round trip test for GH 13747, GH 12529
cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
cls.data_types = list(cls.data.keys())
@classmethod
def teardown_class(cls):
del cls.data_types, cls.data
def check_round_trip_frame(self, data_type, excel=None, sep=None,
encoding=None):
data = self.data[data_type]
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
if sep is not None:
result = read_clipboard(sep=sep, index_col=0, encoding=encoding)
else:
result = read_clipboard(encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, sep=',')
self.check_round_trip_frame(dt, sep=r'\s+')
self.check_round_trip_frame(dt, sep='|')
def test_round_trip_frame_string(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, excel=False)
def test_round_trip_frame(self):
for dt in self.data_types:
self.check_round_trip_frame(dt)
def test_read_clipboard_infer_excel(self):
text = dedent("""
John James Charlie Mingus
1 2
4 Harry Carney
""".strip())
clipboard_set(text)
df = pd.read_clipboard()
# excel data is parsed correctly
assert df.iloc[1][1] == 'Harry Carney'
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
clipboard_set(text)
res = pd.read_clipboard()
text = dedent("""
a b
1 2
3 4
""".strip())
clipboard_set(text)
exp = pd.read_clipboard()
tm.assert_frame_equal(res, exp)
def test_invalid_encoding(self):
# test case for testing invalid encoding
data = self.data['string']
with pytest.raises(ValueError):
data.to_clipboard(encoding='ascii')
with pytest.raises(NotImplementedError):
pd.read_clipboard(encoding='ascii')
def test_round_trip_valid_encodings(self):
for enc in ['UTF-8', 'utf-8', 'utf8']:
for dt in self.data_types:
self.check_round_trip_frame(dt, encoding=enc)
|
bsd-3-clause
|
duerrp/pyexperiment
|
pyexperiment/experiment.py
|
1
|
15207
|
"""Framework for quick and clean experiments with python.
For a simple example to adapt to your own needs, check the example
file.
Written by Peter Duerr.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
import unittest
import sys
from datetime import datetime
import argparse
import subprocess
try:
import argcomplete
AUTO_COMPLETION = True
except ImportError:
AUTO_COMPLETION = False
from contextlib import contextmanager
from inspect import getargspec
import multiprocessing
from pyexperiment import conf
from pyexperiment import log
from pyexperiment import state
from pyexperiment.State import StateHandler
from pyexperiment.utils.printers import print_bold # pylint: disable=E0611
from pyexperiment.utils.interactive import embed_interactive
DEFAULT_CONFIG_SPECS = ("[pyexperiment]\n"
"verbosity = option('DEBUG','INFO','WARNING','ERROR',"
"'CRITICAL',default='WARNING')\n"
"log_to_file = boolean(default=False)\n"
"log_filename = string(default=log.txt)\n"
"log_file_verbosity = option('DEBUG','INFO','WARNING',"
"'ERROR','CRITICAL',default='DEBUG')\n"
"rotate_n_logs = integer(min=0, default=5)\n"
"print_timings = boolean(default=False)\n"
"load_state = boolean(default=False)\n"
"save_state = boolean(default=False)\n"
"state_filename = "
"string(default=experiment_state.h5f)\n"
"rotate_n_state_files = integer(min=0, default=5)\n"
"n_replicates = integer(min=1, default=25)\n"
"n_processes = integer(min=1, default={n_processes})\n"
"[[plot]]\n"
"font_size = integer(min=1, default=14)\n"
"label_size = integer(min=1, default=14)\n"
"use_tex = boolean(default=True)\n"
"line_width = integer(min=1, default=4)\n"
"[[[seaborn]]]\n"
"enable = boolean(default=True)\n"
"style = option('darkgrid','whitegrid','dark',"
"'white','ticks',default='darkgrid')\n"
"palette_name = string(default=colorblind)\n"
"desat = float(min=0.0, max=1.0, default=0.6)\n"
"".format(
n_processes=multiprocessing.cpu_count()))
"""Default specification for the experiment's configuration
"""
DEFAULT_CONFIG_FILENAME = "./config.ini"
"""Default name for the configuration file
"""
TESTS = []
"""List of all tests for the experiment. Filled by main.
"""
COMMANDS = []
"""List of all commands for the experiment. Filled by main.
"""
@contextmanager
def logging_context():
"""Initialize and close the logger based on the configuration
"""
# Get options related to logging
verbosity = conf['pyexperiment.verbosity']
log_to_file = conf['pyexperiment.log_to_file']
if (((isinstance(log_to_file, str) and log_to_file == 'True')
or (isinstance(log_to_file, bool) and log_to_file))):
log_filename = conf['pyexperiment.log_filename']
else:
log_filename = None
log_file_verbosity = conf['pyexperiment.log_file_verbosity']
rotate_n_logs = int(conf['pyexperiment.rotate_n_logs'])
# Setup the logger for the configuration
log.initialize(console_level=verbosity,
filename=log_filename,
file_level=log_file_verbosity,
no_backups=rotate_n_logs)
# Give control back, but catch exceptions
try:
yield
except Exception as err:
# Reraise exception after the logger is closed
raise err
finally:
log.close()
# Redefining help should be ok here
def help(*args): # pylint:disable=W0622
"""Shows help for a specified command.
"""
help_dict = dict([(command.__name__,
command.__doc__) for command in COMMANDS])
if len(args) == 0:
print("To get help on a command, use %s help COMMAND" %
sys.argv[0].replace("./", ""))
else:
if args[0] in help_dict.keys():
print(help_dict[args[0]])
else:
print("Command '%s' not available." % args[0])
def show_config():
"""Print the configuration
"""
conf.show()
def save_config(filename):
"""Save a configuration file to a filename
"""
conf.save(filename)
print("Wrote configuration to '%s'" % filename)
def test(*args):
"""Run tests for the experiment
"""
all_tests = []
for test_case in TESTS:
if ((args == () or
test_case.__name__ in args)):
all_tests.append(
unittest.TestLoader().loadTestsFromTestCase(test_case))
suite = unittest.TestSuite(all_tests)
unittest.TextTestRunner(verbosity=2).run(suite)
def show_tests(*args):
"""Show available tests for the experiment
"""
if TESTS == []:
print_bold("No tests available")
else:
print_bold("Available tests:")
for test_case in TESTS:
if ((args == () or
test_case.__name__ in args)):
print("\t"
+ str(test_case.__name__)
+ ":\t"
+ test_case.__doc__.replace(
"\n", "").replace(" ", " "))
def show_state(*arguments):
"""Shows the contents of the state loaded by the configuration or from
the file specified as an argument.
"""
if len(arguments) == 0:
state_file = conf['pyexperiment.state_filename']
else:
state_file = arguments[0]
print_bold("Load state from file '%s'",
state_file)
try:
state.load(state_file, lazy=False, raise_error=True)
except IOError as err:
print(err)
else:
if len(state) > 0:
state.show()
else:
print("State empty")
def activate_autocompletion():
"""Activate auto completion for your experiment with zsh or bash.
Call with eval \"$(script_name activate_autocompletion)\".
In zsh you may need to call `autoload bashcompinit` and
`bashcompinit` first.
"""
process = subprocess.Popen(
["register-python-argcomplete", sys.argv[0].split("/")[-1]],
stdout=subprocess.PIPE)
out, _err = process.communicate()
print(out)
def collect_commands(default, commands):
"""Add default commands
"""
default_commands = [help,
test,
show_tests,
show_config,
save_config,
show_state]
if default is not None and len(getargspec(default).args) > 0:
raise TypeError("Main function cannot take arguments.")
def show_commands():
"""Print the available commands
"""
cmds = commands
if default is not None and default not in cmds:
cmds = [default] + commands
print_bold("Available commands:")
all_commands = cmds + default_commands + [show_commands]
if AUTO_COMPLETION:
all_commands += [activate_autocompletion]
names = [command.__name__ for command in all_commands]
for name in names:
print("\t" + str(name))
all_commands = commands + default_commands + [show_commands]
if AUTO_COMPLETION:
all_commands += [activate_autocompletion]
return all_commands
def format_command_help(default, commands):
"""Format the docstrings of the commands.
"""
string = ("available commands:\n\n" +
"".join([" "
+ "%-22s" % (command.__name__
+ (' (default)'
if command == default else '')
+ ':')
+ "".join(command.__doc__.replace(
"\n ", " ").split(".")[0])
+ "\n"
for command in commands
if command.__doc__ is not None]))
return string
def setup_arg_parser(default, commands, description):
"""Setup the argument parser for the experiment
"""
command_help = format_command_help(default, commands)
arg_parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
epilog=(command_help))
arg_parser.add_argument('command',
help=('choose a command to run'
+ (', running '
+ default.__name__
+ ' by default'
if (default is not None
and default in commands)
else '')),
type=str,
choices=[command.__name__
for command in commands],
nargs='?')
arg_parser.add_argument('argument',
help='argument to the command',
type=str,
nargs='*')
arg_parser.add_argument(
'-c',
'--config',
help='specify a configuration file',
type=str,
default=DEFAULT_CONFIG_FILENAME)
arg_parser.add_argument(
'-o',
'--option',
help='override a configuration option',
type=str,
nargs=2,
metavar=('key', 'value'),
action='append')
arg_parser.add_argument(
'-i',
'--interactive',
action='store_true',
help='drop to interactive prompt after COMMAND')
arg_parser.add_argument(
'--verbosity',
type=str,
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
action='store',
help="choose the console logger's verbosity")
arg_parser.add_argument(
'-v',
action='store_true',
help="shortcut for --verbosity DEBUG")
arg_parser.add_argument(
'-j', '--processes',
nargs=1,
type=int,
action='store',
help="set number of parallel processes used")
arg_parser.add_argument(
'--print-timings',
action='store_true',
help="print logged timings")
if AUTO_COMPLETION:
argcomplete.autocomplete(arg_parser)
return arg_parser
def handle_shortcuts(args):
"""Handle argument shortcuts
"""
# Handle verbosity
if args.verbosity is not None:
if args.option is None:
args.option = []
args.option.append(('pyexperiment.verbosity',
args.verbosity))
elif args.v:
if args.option is None:
args.option = []
args.option.append(('pyexperiment.verbosity',
'DEBUG'))
# Handle --processes
if args.processes:
if args.option is None:
args.option = []
args.option.append(('pyexperiment.n_processes',
str(args.processes[0])))
# Handle --print-timings
if args.print_timings:
if args.option is None:
args.option = []
args.option.append(('pyexperiment.print_timings',
'True'))
def configure(default, commands, config_specs, description):
"""Load configuration from command line arguments and optionally, a
configuration file. Possible command line arguments depend on the
list of supplied commands, the configuration depends on the
supplied configuration specification.
"""
arg_parser = setup_arg_parser(default, commands, description)
args = arg_parser.parse_args()
handle_shortcuts(args)
conf.initialize(args.config,
[option.encode()
for option in config_specs.split('\n')],
args.option,
[option.encode()
for option in DEFAULT_CONFIG_SPECS.split('\n')])
actual_command = default
if args.command is not None:
for command in commands:
if command.__name__ == args.command:
actual_command = command
break
else:
if args.interactive:
actual_command = lambda: None
if actual_command is None:
print("Error: Not enough arguments.")
arg_parser.print_usage()
return None, args.argument, args.interactive
return actual_command, args.argument, args.interactive
def main(default=None,
commands=None,
config_spec="",
tests=None,
description=None):
"""Parses command line arguments and configuration, then runs the
appropriate command.
"""
start_time = datetime.now()
log.debug("Start: '%s'", " ".join(sys.argv))
log.debug("Time: '%s'", start_time)
commands = collect_commands(default, commands or [])
# Configure the application from the command line and get the
# command to be run
run_command, arguments, interactive = configure(
default,
commands,
config_spec,
"Thanks for using %(prog)s."
if description is None else description)
# Store the commands and tests globally
# I believe global is justified here for simplicity
if tests is not None:
global TESTS # pylint:disable=W0603
TESTS = tests
global COMMANDS # pylint:disable=W0603
COMMANDS = commands
# Initialize the main logger based on the configuration
# and handle the state safely
with logging_context(), \
StateHandler(filename=conf['pyexperiment.state_filename'],
load=conf['pyexperiment.load_state'],
save=conf['pyexperiment.save_state'],
rotate_n_files=conf[
'pyexperiment.rotate_n_state_files']):
# Run the command with the supplied arguments
if run_command is not None:
result = run_command(*arguments)
if result is not None:
print(result)
# Drop to the interactive console if necessary, passing the result
if interactive:
embed_interactive(result=result)
# After everything is done, print timings if necessary
if (((isinstance(conf['pyexperiment.print_timings'], bool)
and conf['pyexperiment.print_timings'])
or conf['pyexperiment.print_timings'] == 'True')):
log.print_timings()
end_time = datetime.now()
log.debug("End: '%s'", " ".join(sys.argv))
log.debug("Time: '%s'", end_time)
log.debug("Took: %.3fs", (end_time - start_time).total_seconds())
|
mit
|
peterfpeterson/mantid
|
scripts/MantidIPython/plot_functions.py
|
3
|
4224
|
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""
Plotting functions for use in IPython notebooks that are generated by MantidPlot
"""
import matplotlib.pyplot as plt
# Import Mantid
from mantid.simpleapi import *
import mantid.api as mapi
def _plot_with_options(axes_option, workspace, options_list, plot_number):
"""
Enable/disable legend, grid, limits according to
options (ops) for the given axes (ax).
Plot with or without errorbars.
"""
ws_plot = ConvertToPointData(workspace)
if options_list['errorbars']:
axes_option.errorbar(ws_plot.readX(0), ws_plot.readY(0),
yerr=ws_plot.readE(0), label=workspace.name())
else:
axes_option.plot(ws_plot.readX(0),
ws_plot.readY(0),
label=workspace.name())
axes_option.grid(options_list['grid'])
axes_option.set_xscale(options_list['xScale'])
axes_option.set_yscale(options_list['yScale'])
if options_list['xLimits'] != 'auto':
axes_option.set_xlim(options_list['xLimits'])
if options_list['yLimits'] != 'auto':
axes_option.set_ylim(options_list['yLimits'])
# If a list of titles was given, use it to title each subplot
if hasattr(options_list['title'], "__iter__"):
axes_option.set_title(options_list['title'][plot_number])
if options_list['legend'] and hasattr(options_list['legendLocation'], "__iter__"):
axes_option.legend(loc=options_list['legendLocation'][plot_number])
elif options_list['legend']:
axes_option.legend(loc=options_list['legendLocation'])
def plots(list_of_workspaces, *args, **kwargs):
"""
Create a figure with a subplot for each workspace given.
Workspaces within a group workspace are plotted together in the same subplot.
Examples:
plots(rr)
plots(rr, 'TheGraphTitle')
plots(rr, 'TheGraphTitle', grid=True, legend=True,
xScale='linear', yScale='log', xLimits=[0.008, 0.16])
plots(rr, sharedAxes = False, xLimits = [0, 0.1], yLimits = [1e-5, 2],
Title='ASF070_07 I=1A T=3K dq/q=2%',
legend=True, legendLocation=3, errorbars=False)
"""
if not hasattr(list_of_workspaces, "__iter__"):
list_of_workspaces = [list_of_workspaces]
ops = _process_arguments(args, kwargs)
# Create subplots for workspaces in the list
fig, axes_handle = plt.subplots(1,
len(list_of_workspaces),
sharey=ops['sharedAxes'],
figsize=(6 * len(list_of_workspaces), 4))
if not hasattr(axes_handle, "__iter__"):
axes_handle = [axes_handle]
for plot_number, workspace in enumerate(list_of_workspaces):
if isinstance(workspace, mapi.WorkspaceGroup):
# Plot grouped workspaces on the same axes
for sub_ws in workspace:
_plot_with_options(axes_handle[plot_number], sub_ws, ops, plot_number)
else:
_plot_with_options(axes_handle[plot_number], workspace, ops, plot_number)
# If a single title was given, use it to title the whole figure
if not hasattr(ops['title'], "__iter__"):
fig.suptitle(ops['title'])
plt.show()
return plt.gcf()
def _process_arguments(input_args, input_kwargs):
"""
Build a dictionary of plotting options
"""
key_list = ['title', 'grid', 'legend', 'legendLocation',
'xScale', 'yScale', 'xLimits', 'yLimits', 'sharedAxes', 'errorbars']
default_values = ['', True, True, 1, 'log', 'log', 'auto', 'auto', True, 'True']
# Fill ops with the default values
for i in range(len(input_args)): # copy in values provided in args
default_values[i] = input_args[i]
ops = dict(zip(key_list, default_values))
for k in ops.keys(): # copy in any key word given arguments
ops[k] = input_kwargs.get(k, ops[k])
return ops
|
gpl-3.0
|
ngiangre/DASH_cell_type
|
norm_test_data.py
|
2
|
1783
|
"""Script to normalize test single cell RNA sequencing dataset
and output common subset genes as in the final training set.
"""
import pandas as pd
import sklearn
from sklearn.preprocessing import StandardScaler
info = {'GSE57982': {'filename': 'GSE57982_primaryFpkmMatrix.txt',
'idcol': 'geneSymbol', # column name of gene IDs
'rmcol': ['geneID']}, # columns to remove
'GSE62526': {'filename': 'GSE62526_Normalized_expression_values.txt',
'idcol': 'Gene',
'rmcol': []},
'GSE66117': {'filename': 'GSE66117_CLL_FPKM_values.txt',
'idcol': 'Gene',
'rmcol': ['Description']}
}
with open('final.txt') as fp:
# take the list of final deferentially expressed genes
cols = fp.readline().split()
cols.remove('tissue')
cols.remove('batch')
deg = pd.DataFrame(data=cols, columns=['gene'])
for gse, gseinfo in info.iteritems():
pre_data = pd.read_table(gseinfo['filename'])
# use left join to subset test data
pre_data = pre_data.rename(columns={gseinfo['idcol']: 'gene'})
pre_data = pre_data.drop_duplicates(subset='gene')
pre_data = pd.merge(deg, pre_data, how="left", on='gene')
pre_data.fillna(0, inplace=True)
genes = pre_data['gene']
del pre_data['gene']
for col in gseinfo['rmcol']:
del pre_data[col]
scaler = StandardScaler()
norm = scaler.fit_transform(pre_data.values) # norm across genes for each sample
tnorm = pd.DataFrame(data=norm.T, columns=genes) # transpose to let sample on the row
tnorm.insert(0, column='gene', value=pre_data.columns)
output_fn = gseinfo['filename'] + '.norm'
tnorm.to_csv(output_fn, sep='\t', index=False)
|
cc0-1.0
|
lucidfrontier45/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
2
|
3486
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print __doc__
# Author: Vincent Dubourg <[email protected]>
# License: BSD style
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
spallavolu/scikit-learn
|
sklearn/tests/test_dummy.py
|
186
|
17778
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
|
bsd-3-clause
|
CSB-IG/non-coding-NGS
|
regiones_indel.py
|
1
|
2119
|
import matplotlib
matplotlib.rcParams.update({'font.size': 8})
from sample_code_file_maps import snps, indels, ethnicity_code
from sample_code_file_maps import north, centre, peninsula, admixed
from sample_code_file_maps import mayas, nahuas, tarahumaras, tepehuanos, totonacas, zapotecas
# load indels into a dictionary of sets
north_sets = {}
for s in north:
north_sets[s] = set([v.strip() for v in open( "maps/%s_indel.map" % ethnicity_code[s]).readlines()])
centre_sets = {}
for s in centre:
centre_sets[s] = set([v.strip() for v in open( "maps/%s_indel.map" % ethnicity_code[s]).readlines()])
peninsula_sets = {}
for s in peninsula:
peninsula_sets[s] = set([v.strip() for v in open( "maps/%s_indel.map" % ethnicity_code[s]).readlines()])
north_union = set.union(*[north_sets[n] for n in north])
centre_union = set.union(*[centre_sets[n] for n in centre])
peninsula_union = set.union(*[peninsula_sets[n] for n in peninsula])
from decimal import Decimal
just_north = Decimal(len(north_union - centre_union.union(peninsula_union)))
just_centre = Decimal(len(centre_union - north_union.union(peninsula_union)))
just_peninsula = Decimal(len(peninsula_union - north_union.union(centre_union)))
nIcIp = set.intersection(centre_union, north_union, peninsula_union)
pIc = Decimal(len(peninsula_union.intersection( centre_union) - nIcIp))
pIn = Decimal(len(peninsula_union.intersection( north_union) - nIcIp))
cIn = Decimal(len(centre_union.intersection( north_union) - nIcIp))
nIcIp = Decimal(len(nIcIp))
total = Decimal(just_north + just_centre + cIn + just_peninsula + pIn + pIc + nIcIp)
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3
plt.figure(figsize=(4,4))
v = venn3([float("%.2f" % (just_north/total)),
float("%.2f" % (just_centre/total)),
float("%.2f" % (cIn/total)),
float("%.2f" % (just_peninsula/total)),
float("%.2f" % (pIn/total)),
float("%.2f" % (pIc/total)),
float("%.2f" % (nIcIp/total))], set_labels=('North', 'Centre', 'South'))
plt.savefig('regiones_indel.png')
|
gpl-3.0
|
abhisg/scikit-learn
|
examples/applications/face_recognition.py
|
191
|
5513
|
"""
===================================================
Faces recognition example using eigenfaces and SVMs
===================================================
The dataset used in this example is a preprocessed excerpt of the
"Labeled Faces in the Wild", aka LFW_:
http://vis-www.cs.umass.edu/lfw/lfw-funneled.tgz (233MB)
.. _LFW: http://vis-www.cs.umass.edu/lfw/
Expected results for the top 5 most represented people in the dataset::
precision recall f1-score support
Ariel Sharon 0.67 0.92 0.77 13
Colin Powell 0.75 0.78 0.76 60
Donald Rumsfeld 0.78 0.67 0.72 27
George W Bush 0.86 0.86 0.86 146
Gerhard Schroeder 0.76 0.76 0.76 25
Hugo Chavez 0.67 0.67 0.67 15
Tony Blair 0.81 0.69 0.75 36
avg / total 0.80 0.80 0.80 322
"""
from __future__ import print_function
from time import time
import logging
import matplotlib.pyplot as plt
from sklearn.cross_validation import train_test_split
from sklearn.datasets import fetch_lfw_people
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import RandomizedPCA
from sklearn.svm import SVC
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
###############################################################################
# Download the data, if not already on disk and load it as numpy arrays
lfw_people = fetch_lfw_people(min_faces_per_person=70, resize=0.4)
# introspect the images arrays to find the shapes (for plotting)
n_samples, h, w = lfw_people.images.shape
# for machine learning we use the 2 data directly (as relative pixel
# positions info is ignored by this model)
X = lfw_people.data
n_features = X.shape[1]
# the label to predict is the id of the person
y = lfw_people.target
target_names = lfw_people.target_names
n_classes = target_names.shape[0]
print("Total dataset size:")
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
print("n_classes: %d" % n_classes)
###############################################################################
# Split into a training set and a test set using a stratified k fold
# split into a training and testing set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 150
print("Extracting the top %d eigenfaces from %d faces"
% (n_components, X_train.shape[0]))
t0 = time()
pca = RandomizedPCA(n_components=n_components, whiten=True).fit(X_train)
print("done in %0.3fs" % (time() - t0))
eigenfaces = pca.components_.reshape((n_components, h, w))
print("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
print("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print("Fitting the classifier to the training set")
t0 = time()
param_grid = {'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1], }
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(X_train_pca, y_train)
print("done in %0.3fs" % (time() - t0))
print("Best estimator found by grid search:")
print(clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print("Predicting people's names on the test set")
t0 = time()
y_pred = clf.predict(X_test_pca)
print("done in %0.3fs" % (time() - t0))
print(classification_report(y_test, y_pred, target_names=target_names))
print(confusion_matrix(y_test, y_pred, labels=range(n_classes)))
###############################################################################
# Qualitative evaluation of the predictions using matplotlib
def plot_gallery(images, titles, h, w, n_row=3, n_col=4):
"""Helper function to plot a gallery of portraits"""
plt.figure(figsize=(1.8 * n_col, 2.4 * n_row))
plt.subplots_adjust(bottom=0, left=.01, right=.99, top=.90, hspace=.35)
for i in range(n_row * n_col):
plt.subplot(n_row, n_col, i + 1)
plt.imshow(images[i].reshape((h, w)), cmap=plt.cm.gray)
plt.title(titles[i], size=12)
plt.xticks(())
plt.yticks(())
# plot the result of the prediction on a portion of the test set
def title(y_pred, y_test, target_names, i):
pred_name = target_names[y_pred[i]].rsplit(' ', 1)[-1]
true_name = target_names[y_test[i]].rsplit(' ', 1)[-1]
return 'predicted: %s\ntrue: %s' % (pred_name, true_name)
prediction_titles = [title(y_pred, y_test, target_names, i)
for i in range(y_pred.shape[0])]
plot_gallery(X_test, prediction_titles, h, w)
# plot the gallery of the most significative eigenfaces
eigenface_titles = ["eigenface %d" % i for i in range(eigenfaces.shape[0])]
plot_gallery(eigenfaces, eigenface_titles, h, w)
plt.show()
|
bsd-3-clause
|
btabibian/scikit-learn
|
sklearn/decomposition/tests/test_pca.py
|
9
|
21107
|
import numpy as np
import scipy as sp
from itertools import product
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_less
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.decomposition import RandomizedPCA
from sklearn.decomposition.pca import _assess_dimension_
from sklearn.decomposition.pca import _infer_dimension_
iris = datasets.load_iris()
solver_list = ['full', 'arpack', 'randomized', 'auto']
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
def test_pca_arpack_solver():
# PCA on dense arrays
X = iris.data
d = X.shape[1]
# Loop excluding the extremes, invalid inputs for arpack
for n_comp in np.arange(1, d):
pca = PCA(n_components=n_comp, svd_solver='arpack', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(d), 12)
pca = PCA(n_components=0, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
pca = PCA(n_components=d, svd_solver='arpack', random_state=0)
assert_raises(ValueError, pca.fit, X)
assert_equal(pca.n_components,
PCA(n_components=d,
svd_solver='arpack', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='arpack', random_state=0).svd_solver)
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
def test_no_empty_slice_warning():
# test if we avoid numpy warnings for computing over empty arrays
n_components = 10
n_features = n_components + 2 # anything > n_comps triggered it in 0.16
X = np.random.uniform(-1, 1, size=(n_components, n_features))
pca = PCA(n_components=n_components)
assert_no_warnings(pca.fit, X)
def test_whitening():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
n_components = 30
rank = 50
# some low rank data with correlated features
X = np.dot(rng.randn(n_samples, rank),
np.dot(np.diag(np.linspace(10.0, 1.0, rank)),
rng.randn(rank, n_features)))
# the component-wise variance of the first 50 features is 3 times the
# mean component-wise variance of the remaining 30 features
X[:, :50] *= 3
assert_equal(X.shape, (n_samples, n_features))
# the component-wise variance is thus highly varying:
assert_greater(X.std(axis=0).std(), 43.8)
for solver, copy in product(solver_list, (True, False)):
# whiten the data while projecting to the lower dim subspace
X_ = X.copy() # make sure we keep an original across iterations.
pca = PCA(n_components=n_components, whiten=True, copy=copy,
svd_solver=solver, random_state=0, iterated_power=7)
# test fit_transform
X_whitened = pca.fit_transform(X_.copy())
assert_equal(X_whitened.shape, (n_samples, n_components))
X_whitened2 = pca.transform(X_)
assert_array_almost_equal(X_whitened, X_whitened2)
assert_almost_equal(X_whitened.std(axis=0), np.ones(n_components),
decimal=6)
assert_almost_equal(X_whitened.mean(axis=0), np.zeros(n_components))
X_ = X.copy()
pca = PCA(n_components=n_components, whiten=False, copy=copy,
svd_solver=solver).fit(X_)
X_unwhitened = pca.transform(X_)
assert_equal(X_unwhitened.shape, (n_samples, n_components))
# in that case the output components still have varying variances
assert_almost_equal(X_unwhitened.std(axis=0).std(), 74.1, 1)
# we always center, so no test for non-centering.
# Ignore warnings from switching to more power iterations in randomized_svd
@ignore_warnings
def test_explained_variance():
# Check that PCA output has unit-variance
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full').fit(X)
apca = PCA(n_components=2, svd_solver='arpack', random_state=0).fit(X)
assert_array_almost_equal(pca.explained_variance_,
apca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
apca.explained_variance_ratio_, 3)
rpca = PCA(n_components=2, svd_solver='randomized', random_state=42).fit(X)
assert_array_almost_equal(pca.explained_variance_,
rpca.explained_variance_, 1)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 1)
# compare to empirical variances
X_pca = pca.transform(X)
assert_array_almost_equal(pca.explained_variance_,
np.var(X_pca, axis=0))
X_pca = apca.transform(X)
assert_array_almost_equal(apca.explained_variance_,
np.var(X_pca, axis=0))
X_rpca = rpca.transform(X)
assert_array_almost_equal(rpca.explained_variance_, np.var(X_rpca, axis=0),
decimal=1)
# Same with correlated data
X = datasets.make_classification(n_samples, n_features,
n_informative=n_features-2,
random_state=rng)[0]
pca = PCA(n_components=2).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.explained_variance_ratio_,
rpca.explained_variance_ratio_, 5)
def test_singular_values():
# Check that the PCA output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=2, svd_solver='full',
random_state=rng).fit(X)
apca = PCA(n_components=2, svd_solver='arpack',
random_state=rng).fit(X)
rpca = PCA(n_components=2, svd_solver='randomized',
random_state=rng).fit(X)
assert_array_almost_equal(pca.singular_values_, apca.singular_values_, 12)
assert_array_almost_equal(pca.singular_values_, rpca.singular_values_, 1)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 1)
# Compare to the Frobenius norm
X_pca = pca.transform(X)
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(pca.singular_values_**2.0),
np.linalg.norm(X_pca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 0)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(pca.singular_values_,
np.sqrt(np.sum(X_pca**2.0, axis=0)), 12)
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 2)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
pca = PCA(n_components=3, svd_solver='full', random_state=rng)
apca = PCA(n_components=3, svd_solver='arpack', random_state=rng)
rpca = PCA(n_components=3, svd_solver='randomized', random_state=rng)
X_pca = pca.fit_transform(X)
X_pca /= np.sqrt(np.sum(X_pca**2.0, axis=0))
X_pca[:, 0] *= 3.142
X_pca[:, 1] *= 2.718
X_hat = np.dot(X_pca, pca.components_)
pca.fit(X_hat)
apca.fit(X_hat)
rpca.fit(X_hat)
assert_array_almost_equal(pca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
def test_pca_check_projection():
# Test that the projection of data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
for solver in solver_list:
Yt = PCA(n_components=2, svd_solver=solver).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_pca_inverse():
# Test that the projection of data can be inverted
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='full').fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
# same as above with whitening (approximate reconstruction)
for solver in solver_list:
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_pca_validation():
X = [[0, 1], [1, 0]]
for solver in solver_list:
for n_components in [-1, 3]:
assert_raises(ValueError,
PCA(n_components, svd_solver=solver).fit, X)
def test_randomized_pca_check_projection():
# Test that the projection by randomized PCA on dense data is correct
rng = np.random.RandomState(0)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
Yt = PCA(n_components=2, svd_solver='randomized',
random_state=0).fit(X).transform(Xt)
Yt /= np.sqrt((Yt ** 2).sum())
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_randomized_pca_check_list():
# Test that the projection by randomized PCA on list data is correct
X = [[1.0, 0.0], [0.0, 1.0]]
X_transformed = PCA(n_components=1, svd_solver='randomized',
random_state=0).fit(X).transform(X)
assert_equal(X_transformed.shape, (2, 1))
assert_almost_equal(X_transformed.mean(), 0.00, 2)
assert_almost_equal(X_transformed.std(), 0.71, 2)
def test_randomized_pca_inverse():
# Test that randomized PCA is inversible on dense data
rng = np.random.RandomState(0)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed signal
# (since the data is almost of rank n_components)
pca = PCA(n_components=2, svd_solver='randomized', random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=2)
# same as above with whitening (approximate reconstruction)
pca = PCA(n_components=2, whiten=True, svd_solver='randomized',
random_state=0).fit(X)
Y = pca.transform(X)
Y_inverse = pca.inverse_transform(Y)
relative_max_delta = (np.abs(X - Y_inverse) / np.abs(X).mean()).max()
assert_less(relative_max_delta, 1e-5)
def test_pca_dim():
# Check automated dimensionality setting
rng = np.random.RandomState(0)
n, p = 100, 5
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
pca = PCA(n_components='mle', svd_solver='full').fit(X)
assert_equal(pca.n_components, 'mle')
assert_equal(pca.n_components_, 1)
def test_infer_dim_1():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = (rng.randn(n, p) * .1 + rng.randn(n, 1) * np.array([3, 4, 5, 1, 2]) +
np.array([1, 0, 7, 4, 6]))
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
ll = []
for k in range(p):
ll.append(_assess_dimension_(spect, k, n, p))
ll = np.array(ll)
assert_greater(ll[1], ll.max() - .01 * n)
def test_infer_dim_2():
# TODO: explain what this is testing
# Or at least use explicit variable names...
n, p = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 1)
def test_infer_dim_3():
n, p = 100, 5
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5, 1, 2])
X[10:20] += np.array([6, 0, 7, 2, -1])
X[30:40] += 2 * np.array([-1, 1, -1, 1, -1])
pca = PCA(n_components=p, svd_solver='full')
pca.fit(X)
spect = pca.explained_variance_
assert_greater(_infer_dimension_(spect, n, p), 2)
def test_infer_dim_by_explained_variance():
X = iris.data
pca = PCA(n_components=0.95, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.95)
assert_equal(pca.n_components_, 2)
pca = PCA(n_components=0.01, svd_solver='full')
pca.fit(X)
assert_equal(pca.n_components, 0.01)
assert_equal(pca.n_components_, 1)
rng = np.random.RandomState(0)
# more features than samples
X = rng.rand(5, 20)
pca = PCA(n_components=.5, svd_solver='full').fit(X)
assert_equal(pca.n_components, 0.5)
assert_equal(pca.n_components_, 2)
def test_pca_score():
# Test that probabilistic PCA scoring yields a reasonable score
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert_true(ll1 > ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
def test_svd_solver_auto():
rng = np.random.RandomState(0)
X = rng.uniform(size=(1000, 50))
# case: n_components in (0,1) => 'full'
pca = PCA(n_components=.5)
pca.fit(X)
pca_test = PCA(n_components=.5, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: max(X.shape) <= 500 => 'full'
pca = PCA(n_components=5, random_state=0)
Y = X[:10, :]
pca.fit(Y)
pca_test = PCA(n_components=5, svd_solver='full', random_state=0)
pca_test.fit(Y)
assert_array_almost_equal(pca.components_, pca_test.components_)
# case: n_components >= .8 * min(X.shape) => 'full'
pca = PCA(n_components=50)
pca.fit(X)
pca_test = PCA(n_components=50, svd_solver='full')
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
# n_components >= 1 and n_components < .8 * min(X.shape) => 'randomized'
pca = PCA(n_components=10, random_state=0)
pca.fit(X)
pca_test = PCA(n_components=10, svd_solver='randomized', random_state=0)
pca_test.fit(X)
assert_array_almost_equal(pca.components_, pca_test.components_)
def test_deprecation_randomized_pca():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
depr_message = ("Class RandomizedPCA is deprecated; RandomizedPCA was "
"deprecated in 0.18 and will be "
"removed in 0.20. Use PCA(svd_solver='randomized') "
"instead. The new implementation DOES NOT store "
"whiten ``components_``. Apply transform to get them.")
def fit_deprecated(X):
global Y
rpca = RandomizedPCA(random_state=0)
Y = rpca.fit_transform(X)
assert_warns_message(DeprecationWarning, depr_message, fit_deprecated, X)
Y_pca = PCA(svd_solver='randomized', random_state=0).fit_transform(X)
assert_array_almost_equal(Y, Y_pca)
def test_pca_sparse_input():
X = np.random.RandomState(0).rand(5, 4)
X = sp.sparse.csr_matrix(X)
assert(sp.sparse.issparse(X))
for svd_solver in solver_list:
pca = PCA(n_components=3, svd_solver=svd_solver)
assert_raises(TypeError, pca.fit, X)
def test_pca_bad_solver():
X = np.random.RandomState(0).rand(5, 4)
pca = PCA(n_components=3, svd_solver='bad_argument')
assert_raises(ValueError, pca.fit, X)
|
bsd-3-clause
|
COMBINE-lab/matryoshka_work
|
coredomains-import/python-src/plot-dom-size-freq-data.py
|
1
|
4183
|
import sys
import matplotlib.pyplot as plt
import matplotlib
from pylab import *
import math
import numpy
data_files = sys.argv[1:-1]
print len(data_files)
BR_sizes = []
BR_freqs = []
our_sizes_t = []
our_freqs_t = []
per_gamma = { (int(i*0.01 * 100))/100.0 : {'freq':[], 'size': []} for i in xrange(5, 101, 5)}
for f in data_files:
print f
with open(f, 'r') as f_in:
lines = f_in.readlines()
# print len(lines)
BR_sizes.extend( map(int, lines[0].strip().split(", ") ) )
BR_freqs.extend( map(float, lines[1].strip().split(", ") ) )
for i in xrange(2, len(lines), 3):
gamma = float(lines[i].strip())
our_sizes = map(int, lines[i+1].strip().split(", ") )
our_freqs = map(float, lines[i+2].strip().split(", ") )
# filter!
# sf = zip(our_sizes, our_freqs)
# sf = [ (s,f) for s,f in sf if s < 20 ]
# our_sizes, our_freqs = zip(*sf)
per_gamma[gamma]['freq'].extend( our_freqs)
per_gamma[gamma]['size'].extend( our_sizes)
our_sizes_t += our_sizes
our_freqs_t += our_freqs
# filtering for our data
# sf = zip(our_sizes, our_freqs)
# sf = [ (s,f) for s,f in sf if s < 100 and f < 100 ]
# our_sizes, our_freqs = zip(*sf)
'''
plt.plot(our_sizes, our_freqs, 'k.', alpha=0.1, markersize=20, label="Our domains")
plt.plot(BR_sizes, BR_freqs, 'bD', alpha=0.1, markersize=20, label="B. Ren domains")
plt.xlabel('Domain size')
plt.ylabel('Mean frequency within domains')
plt.legend()
picname = "mean-freq-chr" + str(chromo) + ".pdf"
plt.savefig(picname)
print 'Saved plot to', picname
'''
minx = min(-1, min(min(our_sizes_t) , min(BR_sizes) ) )
maxx = max(max(our_sizes_t) , max(BR_sizes) )
miny = min(min(our_freqs_t) , min(BR_freqs) )
maxy = max(max(our_freqs_t) , max(BR_freqs) )
plt.clf()
fig = plt.figure()
ax = None
i_max = 8 # gamma_max = i_max * 0.05
for i in xrange(1, 8, 2):
print (i+1)/2
ax = fig.add_subplot(i_max/2, 2, i)
g1 = int(0.05 * i * 100) / 100.0
data = per_gamma[g1]
ax.set_title('$\gamma$ = ' + str(g1))
ax.loglog( BR_sizes, BR_freqs, 'bx', markersize=5, alpha=0.6)
# if len(data['size']) < 50:
# A = 0.9
# else:
A= 0.2
ax.loglog( data['size'], data['freq'], 'go', markersize=5, alpha=A)
# print len(data['size'])
plt.xlim( [minx, maxx] )
plt.ylim( [miny, maxy] )
ax = fig.add_subplot(i_max/2, 2, i+1)
g2 = int(0.05 * (i+1) * 100 ) / 100.0
data = per_gamma[g2]
# print len(data['size'])
ax.set_title('$\gamma$ = ' + str(g2))
ax.loglog( BR_sizes, BR_freqs, 'bx', markersize=5, alpha=0.6)
# if len(data['size']) < 50:
# A = 0.9
# else:
# A= 0.3
ax.loglog( data['size'], data['freq'], 'go', markersize=5, alpha=A)
plt.xlim( [minx, maxx] )
plt.ylim( [miny, maxy] )
#fname = "mean-freq-us-BR-chr" + chromo + ".pdf"
#plt.savefig(fname)
#print "Saved double figure to", fname
# plt.show()
font = {'family' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
freqs = [f for V in per_gamma.values() for f in V['freq']]
print 'Was',len(freqs)
mu = np.mean(freqs)
median = np.median(freqs)
sigma = np.std(freqs)
print mu, median, sigma
freqs = filter(lambda x: x <= mu+4*sigma, freqs)
print 'Become',len(freqs)
plt.clf()
plt.xlabel('Mean frequency within domain')
plt.hist(freqs, bins=80, alpha=0.3, normed=True, label="Multiscale")
plt.hist(BR_freqs, bins=40, alpha=0.3, normed=True, label="Dixon et al.")
plt.legend()
plt.savefig("mean-freq-distr.pdf")
# plt.show()
sizes = [s*40 for V in per_gamma.values() for s in V['size']]
BR_sizes = [s*40 for s in BR_sizes]
print 'Was', len(sizes)
mu = np.mean(sizes)
median = np.median(sizes)
sigma = math.sqrt(np.std(sizes))
# print mu, median, sigma
# sizes = filter(lambda x: x <= mu+10*sigma, sizes)
print np.mean(sizes), np.mean(BR_sizes)
# print np.mean(sizes) * 40, np.mean(BR_sizes) * 40
# print np.mean(sizes*40), np.mean(BR_sizes*40)
print np.std(sizes), np.std(BR_sizes)
# print np.std(sizes) * 40, np.std(BR_sizes) * 40
# print np.std(sizes * 40), np.std(BR_sizes*40)
print 'Become',len(sizes)
plt.clf()
plt.xlabel('Domain size, in 40Kb fragments')
plt.hist(sizes, bins=50, alpha=0.3, log=False, normed=True)
plt.hist(BR_sizes, bins=50, alpha=0.3, log=False, normed=True)
plt.savefig("dom-size-distr.pdf")
# plt.show()
|
gpl-3.0
|
xubenben/scikit-learn
|
sklearn/linear_model/passive_aggressive.py
|
97
|
10879
|
# Authors: Rob Zinkov, Mathieu Blondel
# License: BSD 3 clause
from .stochastic_gradient import BaseSGDClassifier
from .stochastic_gradient import BaseSGDRegressor
from .stochastic_gradient import DEFAULT_EPSILON
class PassiveAggressiveClassifier(BaseSGDClassifier):
"""Passive Aggressive Classifier
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
fit_intercept : bool, default=False
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
loss : string, optional
The loss function to be used:
hinge: equivalent to PA-I in the reference paper.
squared_hinge: equivalent to PA-II in the reference paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDClassifier
Perceptron
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="hinge", n_jobs=1, random_state=None,
warm_start=False, class_weight=None):
BaseSGDClassifier.__init__(self,
penalty=None,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
eta0=1.0,
warm_start=warm_start,
class_weight=class_weight,
n_jobs=n_jobs)
self.C = C
self.loss = loss
def partial_fit(self, X, y, classes=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight == 'balanced':
raise ValueError("class_weight 'balanced' is not supported for "
"partial_fit. For 'balanced' weights, use "
"`sklearn.utils.compute_class_weight` with "
"`class_weight='balanced'`. In place of y you "
"can use a large enough subset of the full "
"training set target to properly estimate the "
"class frequency distributions. Pass the "
"resulting weights as the class_weight "
"parameter.")
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr, n_iter=1,
classes=classes, sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "hinge" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="hinge", learning_rate=lr,
coef_init=coef_init, intercept_init=intercept_init)
class PassiveAggressiveRegressor(BaseSGDRegressor):
"""Passive Aggressive Regressor
Read more in the :ref:`User Guide <passive_aggressive>`.
Parameters
----------
C : float
Maximum step size (regularization). Defaults to 1.0.
epsilon : float
If the difference between the current prediction and the correct label
is below this threshold, the model is not updated.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
loss : string, optional
The loss function to be used:
epsilon_insensitive: equivalent to PA-I in the reference paper.
squared_epsilon_insensitive: equivalent to PA-II in the reference
paper.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes,\
n_features]
Weights assigned to the features.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
SGDRegressor
References
----------
Online Passive-Aggressive Algorithms
<http://jmlr.csail.mit.edu/papers/volume7/crammer06a/crammer06a.pdf>
K. Crammer, O. Dekel, J. Keshat, S. Shalev-Shwartz, Y. Singer - JMLR (2006)
"""
def __init__(self, C=1.0, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, loss="epsilon_insensitive",
epsilon=DEFAULT_EPSILON, random_state=None, warm_start=False):
BaseSGDRegressor.__init__(self,
penalty=None,
l1_ratio=0,
epsilon=epsilon,
eta0=1.0,
fit_intercept=fit_intercept,
n_iter=n_iter,
shuffle=shuffle,
verbose=verbose,
random_state=random_state,
warm_start=warm_start)
self.C = C
self.loss = loss
def partial_fit(self, X, y):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._partial_fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr, n_iter=1,
sample_weight=None,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None):
"""Fit linear model with Passive Aggressive algorithm.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
Returns
-------
self : returns an instance of self.
"""
lr = "pa1" if self.loss == "epsilon_insensitive" else "pa2"
return self._fit(X, y, alpha=1.0, C=self.C,
loss="epsilon_insensitive",
learning_rate=lr,
coef_init=coef_init,
intercept_init=intercept_init)
|
bsd-3-clause
|
JeffHeard/terrapyn
|
geocms/views/ows.py
|
1
|
11118
|
import json
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render_to_response
from matplotlib.finance import md5
from osgeo import osr, ogr
import logging
from django.core.urlresolvers import reverse
from terrapyn.ows.views import wms, wfs
from terrapyn.geocms import models, dispatch
from terrapyn.geocms.cache import CacheManager
from terrapyn.geocms.models import Layer, Style
from terrapyn.geocms.utils import authorize
from terrapyn.geocms.rendering import Renderer
from terrapyn.geocms.drivers import shapefile
_log = logging.getLogger('terrapyn.driver_messages')
class WMSAdapter(wms.WMSAdapterBase):
def __init__(
self,
requires_time=False,
requires_elevation=False,
requires_version=False,
base_path=None
):
self.base_path=base_path
super(WMSAdapter, self).__init__([], requires_time, requires_elevation, requires_version)
def layerlist(self):
if self.base_path:
return [l for l in Layer.objects.filter(slug__startswith=self.base_path)]
else:
return [l for l in Layer.objects.all()]
def get_2d_dataset(self, layers, srs, bbox, width, height, styles, bgcolor, transparent, time, elevation, v, filter,
**kwargs):
"""use the driver to render a tile"""
return Renderer().render(kwargs['format'], width, height, bbox, srs, styles, layers, **kwargs)
def get_feature_info(self, wherex, wherey, layers, callback, format, feature_count, srs, filter, fuzziness=0,
**kwargs): # fuzziness of 30 meters by default
"""use the driver to get feature info"""
if srs.lower().startswith('epsg'):
s = osr.SpatialReference()
s.ImportFromEPSG(int(srs[5:]))
srs = s.ExportToProj4()
feature_info = {
layer: Layer.objects.get(slug=layer).data_resource.driver_instance.get_data_for_point(
wherex, wherey, srs, fuzziness=fuzziness, **kwargs
)
for layer in layers
}
return feature_info
def nativesrs(self, layer):
"""Use the resource record to get native SRS"""
resource = Layer.objects.get(slug=layer).data_resource
return resource.native_srs
def nativebbox(self, layer=None):
"""Use the resource record to get the native bounding box"""
if layer:
resource = Layer.objects.get(slug=layer).data_resource
return resource.native_bounding_box.extent
else:
return (-180, -90, 180, 90)
def styles(self):
"""Use the resource record to get the available styles"""
if self.base_path:
return [l.slug for l in Style.objects.filter(slug__startswith=self.base_path)]
else:
return [s.slug for s in Style.objects.all()]
def get_layer_descriptions(self):
"""
This should return a list of dictionaries. Each dictionary should follow this format::
{ ""name"" : layer_"name",
"title" : human_readable_title,
"srs" : spatial_reference_id,
"queryable" : whether or not GetFeatureInfo is supported for this layer,
"minx" : native_west_boundary,
"miny" : native_south_boundary,
"maxx" : native_east_boundary,
"maxy" : native_north_boundary,
"ll_minx" : west_boundary_epsg4326,
"ll_miny" : south_boundary_epsg4326,
"ll_maxx" : east_boundary_epsg4326,
"ll_maxy" : north_boundary_epsg4326,
"styles" : [list_of_style_descriptions]
Each style description in list_of_style_descriptions should follow this format::
{ ""name"" : style_"name",
"title" : style_title,
"legend_width" : style_legend_width,
"legend_height" : style_legend_height,
"legend_url" : style_legend_url
}
"""
layers = self.layerlist()
ret = []
for layer in layers:
desc = {}
ret.append(desc)
desc["name"] = layer.slug
desc['title'] = layer.title
desc['srs'] = layer.data_resource.metadata.first().native_srs
desc['queryable'] = True
desc['minx'], desc['miny'], desc['maxx'], desc[
'maxy'] = layer.data_resource.metadata.first().native_bounding_box.extent # FIXME this is not native
desc['ll_minx'], desc['ll_miny'], desc['ll_maxx'], desc[
'll_maxy'] = layer.data_resource.metadata.first().bounding_box.extent
desc['styles'] = []
desc['styles'].append({
"name": layer.default_style.slug,
'title': layer.default_style.title,
'legend_width': layer.default_style.legend_width,
'legend_height': layer.default_style.legend_height,
'legend_url': layer.default_style.legend.url if layer.default_style.legend else ""
})
for style in layer.styles.all():
desc['styles'].append({
"name": style.slug,
'title': style.title,
'legend_width': style.legend_width,
'legend_height': style.legend_height,
'legend_url': style.legend.url if style.legend else ""
})
return ret
def get_service_boundaries(self):
"""Just go ahead and return the world coordinates"""
return {
"minx": -180.0,
"miny": -90.0,
"maxx": 180.0,
"maxy": 90.0
}
class WMS(wms.WMS):
adapter = WMSAdapter()
class WFSAdapter(wfs.WFSAdapter):
def get_feature_descriptions(self, request, *types):
namespace = request.build_absolute_uri().split('?')[
0] + "/schema" # todo: include https://bitbucket.org/eegg/django-model-schemas/wiki/Home
for type_name in types:
res = get_object_or_404(models.DataResource, slug=type_name)
yield wfs.FeatureDescription(
ns=namespace,
ns_name='terrapyn',
name=res.slug,
abstract=res.description,
title=res.title,
keywords=res.keywords,
srs=res.native_srs,
bbox=res.bounding_box,
schema=namespace + '/' + res.slug
)
def list_stored_queries(self, request):
"""list all the queries associated with drivers"""
sq = super(WFSAdapter, self).list_stored_queries(request)
return sq
def get_features(self, request, parms):
if parms.cleaned_data['stored_query_id']:
squid = "SQ_" + parms.cleaned_data['stored_query_id']
slug = parms.cleaned_data['type_names'] if isinstance(parms.cleaned_data['type_names'], basestring) else \
parms.cleaned_data['type_names'][0]
try:
return models.DataResource.driver_instance.query_operation(squid)(request, **parms.cleaned_data)
except:
raise wfs.OperationNotSupported.at('GetFeatures', 'stored_query_id={squid}'.format(squid=squid))
else:
return self.AdHocQuery(request, **parms.cleaned_data)
def AdHocQuery(self, req,
type_names=None,
filter=None,
filter_language=None,
bbox=None,
sort_by=None,
count=None,
start_index=None,
srs_name=None,
srs_format=None,
max_features=None,
**kwargs
):
model = get_object_or_404(models.DataResource, slug=type_names[0])
driver = model.driver_instance
extra = {}
if filter:
extra['filter'] = json.loads(filter)
if bbox:
extra['bbox'] = bbox
if srs_name:
srs = osr.SpatialReference()
if srs_name.lower().startswith('epsg'):
srs.ImportFromEPSG(int(srs_name[5:]))
else:
srs.ImportFromProj4(srs_name)
extra['srs'] = srs
else:
srs = model.srs
if start_index:
extra['start'] = start_index
count = count or max_features
if count:
extra['count'] = count
if "boundary" in kwargs:
extra['boundary'] = kwargs['boundary']
extra['boundary_type'] = kwargs['boundary_type']
df = driver.as_dataframe(**extra)
if sort_by:
extra['sort_by'] = sort_by
if filter_language and filter_language != 'json':
raise wfs.OperationNotSupported('filter language must be JSON for now')
filename = md5()
filename.update("{name}.{bbox}.{srs_name}x{filter}".format(
name=type_names[0],
bbox=','.join(str(b) for b in bbox),
srs_name=srs_name,
filter=filter
))
filename = filename.hexdigest()
shapefile.ShapefileDriver.from_dataframe(df, filename, srs)
ds = ogr.Open(filename)
return ds
def supports_feature_versioning(self):
return False
class WFS(wfs.WFS):
adapter = WFSAdapter()
def tms(request, layer, z=None, x=None, y=None, **kwargs):
if z and x and y:
z = int(z)
x = int(x)
y = int(y)
table = None
if '#' in layer:
layer_slug, table = layer.split('#')
else:
layer_slug = layer
# dispatch.api_accessed.send(RenderedLayer, instance=layer_instance, user=user)
style = request.GET.get('style', Layer.objects.get(slug=layer_slug).default_style.slug)
cache = CacheManager.get().get_tile_cache([layer], [style])
rendered, png = cache.fetch_tile(z, x, y)
_log.debug("returning tile {z}/{x}/{y} with size {png}".format(z=z, x=x, y=y, png=len(png)))
rsp = HttpResponse(png, content_type='image/png')
rsp['Content-Disposition'] = 'attachment; filename="{z}.{x}.{y}.png"'.format(z=z, x=x, y=y)
return rsp
else:
return HttpResponseRedirect(reverse('layer-page', kwargs={'slug':layer}))
def seed_layer(request, layer):
mnz = int(request.GET['minz'])
mxz = int(request.GET['maxz']) # anything greater would cause a DOS attack. We should do it manually
mnx = int(request.GET['minx'])
mxx = int(request.GET['maxx'])
mny = int(request.GET['miny'])
mxy = int(request.GET['maxy'])
layer = Layer.objects.get(slug=layer)
style = request.GET.get('style', layer.default_style)
user = authorize(request, page=layer, edit=True)
dispatch.api_accessed.send(Layer, instance=layer, user=user)
CacheManager.get().get_tile_cache(layers=[layer], styles=[style]).seed_tiles(mnz, mxz, mnx, mny, mxx, mxy)
return HttpResponse()
|
apache-2.0
|
dhalleine/tensorflow
|
tensorflow/contrib/learn/__init__.py
|
8
|
1912
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# TODO(ptucker,ipolosukhin): Improve descriptions.
"""High level API for learning with TensorFlow.
## Estimators
Train and evaluate TensorFlow models.
@@BaseEstimator
@@Estimator
@@ModeKeys
@@TensorFlowClassifier
@@DNNClassifier
@@DNNRegressor
@@TensorFlowDNNClassifier
@@TensorFlowDNNRegressor
@@TensorFlowEstimator
@@LinearClassifier
@@LinearRegressor
@@TensorFlowLinearClassifier
@@TensorFlowLinearRegressor
@@TensorFlowRNNClassifier
@@TensorFlowRNNRegressor
@@TensorFlowRegressor
## Graph actions
Perform various training, evaluation, and inference actions on a graph.
@@NanLossDuringTrainingError
@@RunConfig
@@evaluate
@@infer
@@run_feeds
@@run_n
@@train
## Input processing
Queue and read batched input data.
@@extract_dask_data
@@extract_dask_labels
@@extract_pandas_data
@@extract_pandas_labels
@@extract_pandas_matrix
@@read_batch_examples
@@read_batch_features
@@read_batch_record_features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import *
from tensorflow.python.util.all_util import make_all
__all__ = make_all(__name__)
__all__.append('datasets')
|
apache-2.0
|
wwliao/methgo
|
scripts/met/met.py
|
2
|
18934
|
#!/usr/bin/env python
from __future__ import division
import os
import re
import argparse
from itertools import izip, compress
from collections import defaultdict
import numpy as np
import pandas as pd
from Bio import SeqIO
import matplotlib.pyplot as plt
def get_parser():
"""
Create a parser and add arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--depth', type=int, default=4, help='minimum read depth, default: 4')
parser.add_argument('-p', '--pmtsize', type=int, default=1000, help='promoter size, default: 1000')
parser.add_argument('-w', '--winsize', type=int, default=200000, help='window size, default: 200000')
parser.add_argument('gtf', help='GTF file')
parser.add_argument('fasta', help='reference genome FASTA file')
parser.add_argument('cgmap', help='CGmap file')
return parser
def const_gtftree(gtffile):
"""
Read a GTF file and convert it to a nested dictionary
"""
gtftree = defaultdict(lambda: defaultdict(list))
with open(gtffile) as infile:
for line in infile:
if not line.startswith('#'):
gene_id = None
transcript_id = None
line = line.strip().split('\t')
chr = line[0]
feature = line[2]
start = int(line[3]) - 1
end = int(line[4])
strand = line[6]
attributes = line[8].split(';')
if feature == 'exon':
for atb in attributes:
if 'gene_id' in atb:
gene_id = atb.strip().split()[1][1:-1]
elif 'transcript_id' in atb:
transcript_id = atb.strip().split()[1][1:-1]
if gene_id and transcript_id:
gtftree[chr][(gene_id, strand)].append((start, end))
return gtftree
def const_ctxstr(reffile):
"""
Construct methylation context strings from a reference genome FASTA file
"""
with open(reffile) as infile:
fasta = SeqIO.to_dict(SeqIO.parse(infile, 'fasta'))
for chr in fasta:
fasta[chr] = str(fasta[chr].seq).upper()
ctxstr = {}
for chr in fasta:
ctxstr[chr] = ['-']*len(fasta[chr])
cg = [match.start() for match in re.finditer(r'(?=(CG))', fasta[chr])]
for pos in cg:
ctxstr[chr][pos] = 'X'
chg = [match.start() for match in re.finditer(r'(?=(C[ACT]G))', fasta[chr])]
for pos in chg:
ctxstr[chr][pos] = 'Y'
chh = [match.start() for match in re.finditer(r'(?=(C[ACT][ACT]))', fasta[chr])]
for pos in chh:
ctxstr[chr][pos] = 'Z'
rcg = [match.start()-1 for match in re.finditer(r'(?<=(CG))', fasta[chr])]
for pos in rcg:
ctxstr[chr][pos] = 'x'
rchg = [match.start()-1 for match in re.finditer(r'(?<=(C[AGT]G))', fasta[chr])]
for pos in rchg:
ctxstr[chr][pos] = 'y'
rchh = [match.start()-1 for match in re.finditer(r'(?<=([AGT][AGT]G))', fasta[chr])]
for pos in rchh:
ctxstr[chr][pos] = 'z'
for chr in ctxstr:
ctxstr[chr] = ''.join(ctxstr[chr])
return ctxstr
def const_cgmap(ctxstr, cgmapfile, readdepth=4):
"""
Construct lists of methylation levels from a CGmap file for rapid access
"""
cgmap = {}
with open(cgmapfile) as infile:
for chr in ctxstr.keys():
cgmap[chr] = ['-' for _ in xrange(len(ctxstr[chr]))]
for line in infile:
line = line.strip().split()
chr = line[0]
pos = int(line[2]) - 1 # Transfer to 0-based
context = line[3]
level = float(line[5])
depth = int(line[7])
if context in ['CG', 'CHG', 'CHH'] and depth >= readdepth:
cgmap[chr][pos] = level
return cgmap
def calc_bulk(ctxstr, cgmap):
"""
Compute the global methylation level in CG/CHG/CHH
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
bulk = defaultdict(list)
for chr in set(ctxstr) & set(cgmap):
for tag, mlevel in izip(ctxstr[chr], cgmap[chr]):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
bulk[inv_ctxs[tag]].append(mlevel)
return bulk
def calc_mlevel(ctxstr, cgmap, gtftree, pmtsize=1000):
"""
Compute the mean methylation level of promoter/gene/exon/intron/IGN in each gene
"""
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
ign = defaultdict(list)
mtable = defaultdict(lambda: defaultdict(lambda: defaultdict(float)))
counter = defaultdict(lambda: defaultdict(int))
for chr in set(ctxstr) & set(cgmap) & set(gtftree):
mask = [1]*len(cgmap[chr])
for (gene_id, strand) in gtftree[chr]:
feature_mlevels = defaultdict(lambda: defaultdict(list))
gstart = min(gtftree[chr][(gene_id, strand)])[0]
gend = max(gtftree[chr][(gene_id, strand)])[1]
mask[gstart:gend] = [0]*(gend - gstart)
if strand == '+':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart-pmtsize:gstart], cgmap[chr][gstart-pmtsize:gstart])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
elif strand == '-':
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gend:gend+pmtsize], cgmap[chr][gend:gend+pmtsize])):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['pmt'].append(mlevel)
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr][gstart:gend], cgmap[chr][gstart:gend])):
tag = tag.upper()
inexon = False
if tag in inv_ctxs and mlevel != '-':
feature_mlevels[inv_ctxs[tag]]['gene'].append(mlevel)
for exon in gtftree[chr][(gene_id, strand)]:
if exon[0] <= pos+gstart < exon[1]:
feature_mlevels[inv_ctxs[tag]]['exon'].append(mlevel)
inexon = True
break
if not inexon:
feature_mlevels[inv_ctxs[tag]]['intron'].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
for feature in ['pmt', 'gene', 'exon', 'intron']:
if feature in feature_mlevels[ctx]:
counter[ctx][feature] += len(feature_mlevels[ctx][feature])
mtable[ctx][gene_id][feature] = np.mean(feature_mlevels[ctx][feature])
else:
counter[ctx][feature] += 0
mtable[ctx][gene_id][feature] = 0.0
for (pos, (tag, mlevel)) in enumerate(izip(ctxstr[chr], cgmap[chr])):
tag = tag.upper()
if (tag in inv_ctxs) and (mask[pos] == 1) and (mlevel != '-'):
ign[inv_ctxs[tag]].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
if len(ign[ctx]) > 0:
ign[ctx] = np.mean(ign[ctx])
else:
ign[ctx] = 0.0
cg_table = pd.DataFrame(mtable['CG']).T
cg_table = cg_table[['pmt', 'gene', 'exon', 'intron']]
chg_table = pd.DataFrame(mtable['CHG']).T
chg_table = chg_table[['pmt', 'gene', 'exon', 'intron']]
chh_table = pd.DataFrame(mtable['CHH']).T
chh_table = chh_table[['pmt', 'gene', 'exon', 'intron']]
return ign, cg_table, chg_table, chh_table
def plot_bar(dataframe, bulk, ctx):
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
dataframe = dataframe*100
plt.switch_backend('Agg')
fig = plt.figure()
ax = fig.add_subplot(111)
ax = dataframe.plot(ax=ax, kind='bar', grid=False, rot=0, color=colors[ctx], ylim=(0, 100))
ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
#ax.spines['bottom'].set_position(('outward', 5))
#ax.spines['left'].set_position(('outward', 5))
ax.tick_params(direction='out', length=6, width=2, labelsize='xx-large', top='off', right='off')
for label in ax.xaxis.get_ticklabels():
label.set_fontweight('bold')
for label in ax.yaxis.get_ticklabels():
label.set_fontweight('bold')
ax.set_title(ctx, fontsize='xx-large', weight='bold')
#ax.axhline(y=np.mean(bulk[ctx])*100, linewidth=2, linestyle='--', color='k')
fig.tight_layout()
return ax
def plot_feature_mlevel(bulk, ign, cg_table, chg_table, chh_table):
cg = cg_table.mean()
cg = cg.set_value('genome', np.mean(bulk['CG']))
cg = cg.set_value('IGN', ign['CG'])
cg = cg[['genome', 'pmt', 'gene', 'exon', 'intron', 'IGN']]
cg.to_csv("CG.txt", sep="\t")
cg_ax = plot_bar(cg, bulk, 'CG')
chg = chg_table.mean()
chg = chg.set_value('genome', np.mean(bulk['CHG']))
chg = chg.set_value('IGN', ign['CHG'])
chg = chg[['genome', 'pmt', 'gene', 'exon', 'intron', 'IGN']]
chg_ax = plot_bar(chg, bulk, 'CHG')
chh = chh_table.mean()
chh = chh.set_value('genome', np.mean(bulk['CHH']))
chh = chh.set_value('IGN', ign['CHH'])
chh = chh[['genome', 'pmt', 'gene', 'exon', 'intron', 'IGN']]
chh_ax = plot_bar(chh, bulk, 'CHH')
return cg_ax, chg_ax, chh_ax
def plot_bulkmean(bulk):
bulk_mean = {}
for ctx in ['CG', 'CHG', 'CHH']:
bulk_mean[ctx] = np.mean(bulk[ctx])
bulk_mean = pd.Series(bulk_mean)*100
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
plt.switch_backend('Agg')
fig = plt.figure()
ax = fig.add_subplot(111)
ax = bulk_mean.plot(ax=ax, kind='bar', grid=False, rot=0, color=[colors[ctx] for ctx in ['CG', 'CHG', 'CHH']], ylim=(0, 100))
ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.tick_params(direction='out', length=6, width=2, labelsize='xx-large', top='off', right='off')
for label in ax.xaxis.get_ticklabels():
label.set_fontweight('bold')
for label in ax.yaxis.get_ticklabels():
label.set_fontweight('bold')
fig.tight_layout()
return ax
def plot_bulkhist(bulk):
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
plt.switch_backend('Agg')
fig = plt.figure(figsize=(8, 3))
axes = {}
for i, ctx in enumerate(['CG', 'CHG', 'CHH']):
if i == 0:
axes[ctx] = fig.add_axes((0.15, 0.25, 0.25, 0.65))
#axes[ctx] = fig.add_subplot(131)
axes[ctx].hist(bulk[ctx], weights=np.repeat(1.0/len(bulk[ctx]), len(bulk[ctx])), color=colors[ctx])
axes[ctx].spines['top'].set_visible(False)
axes[ctx].spines['right'].set_visible(False)
axes[ctx].spines['bottom'].set_linewidth(2)
axes[ctx].spines['left'].set_linewidth(2)
axes[ctx].spines['left'].set_position(('outward', 10))
plt.setp(axes[ctx].get_xticklabels(), visible=False)
axes[ctx].tick_params(axis='y', direction='out', right='off', length=6, width=2, labelsize='xx-large')
axes[ctx].tick_params(axis='x', top='off', bottom='off')
for label in axes[ctx].yaxis.get_ticklabels():
label.set_fontweight('bold')
axes[ctx].set_ylabel('Fraction', fontsize='xx-large', fontweight='bold')
else:
axes[ctx] = fig.add_axes((0.15 + (0.25 + 0.025) * i, 0.25, 0.25, 0.65))
#axes[ctx] = fig.add_subplot(1, 3, i+1)
axes[ctx].hist(bulk[ctx], weights=np.repeat(1.0/len(bulk[ctx]), len(bulk[ctx])), color=colors[ctx])
axes[ctx].spines['top'].set_visible(False)
axes[ctx].spines['left'].set_visible(False)
axes[ctx].spines['right'].set_visible(False)
axes[ctx].spines['bottom'].set_linewidth(2)
axes[ctx].spines['left'].set_linewidth(2)
plt.setp(axes[ctx].get_xticklabels(), visible=False)
plt.setp(axes[ctx].get_yticklabels(), visible=False)
axes[ctx].tick_params(top='off', bottom='off', left='off', right='off')
axes[ctx].set_ylim(0, 1)
axes[ctx].set_yticks(np.arange(0, 1.2, 0.2))
axes[ctx].set_xlim(-0.025, 1.025)
axes[ctx].set_xlabel(ctx, fontsize='xx-large', fontweight='bold')
fig.suptitle('Methylation Level (0 -> 100%)', x=0.55, y=0.1, fontsize='xx-large', fontweight='bold')
return fig
# The alphanum algorithm is from http://www.davekoelle.com/alphanum.html
re_chunk = re.compile("([\D]+|[\d]+)")
re_letters = re.compile("\D+")
re_numbers = re.compile("\d+")
def getchunk(item):
itemchunk = re_chunk.match(item)
# Subtract the matched portion from the original string
# if there was a match, otherwise set it to ""
item = (item[itemchunk.end():] if itemchunk else "")
# Don't return the match object, just the text
itemchunk = (itemchunk.group() if itemchunk else "")
return (itemchunk, item)
def alphanum(a, b):
n = 0
while (n == 0):
# Get a chunk and the original string with the chunk subtracted
(ac, a) = getchunk(a)
(bc, b) = getchunk(b)
# Both items contain only letters
if (re_letters.match(ac) and re_letters.match(bc)):
n = cmp(ac, bc)
else:
# Both items contain only numbers
if (re_numbers.match(ac) and re_numbers.match(bc)):
n = cmp(int(ac), int(bc))
# One item has letters and one item has numbers, or one item is empty
else:
n = cmp(ac, bc)
# Prevent deadlocks
if (n == 0):
n = 1
return n
def calc_genomewide(ctxstr, cgmap, winsize=200000):
inv_ctxs = {'X': 'CG', 'Y': 'CHG', 'Z': 'CHH'}
win_mlevel = defaultdict(list)
win_x = []
pos = 0
chrs = ctxstr.keys()
chrs.sort(cmp=alphanum)
"""
if 'chr' in ctxstr.keys()[0].lower():
chrs = sorted(ctxstr.keys(), key=lambda s: s[3:])
else:
chrs = sorted(ctxstr.keys())
"""
for chr in chrs:
start = 0
while (start + winsize) <= len(ctxstr[chr]):
win_x.append(pos+(winsize/2))
tmp = defaultdict(list)
for tag, mlevel in izip(ctxstr[chr][start:start+winsize], cgmap[chr][start:start+winsize]):
tag = tag.upper()
if tag in inv_ctxs and mlevel != '-':
tmp[inv_ctxs[tag]].append(mlevel)
for ctx in ['CG', 'CHG', 'CHH']:
win_mlevel[ctx].append(np.mean(tmp[ctx])*100)
start += winsize
pos += winsize
return win_x, win_mlevel
def plot_genomewide(ctxstr, gpos, gmlevel):
colors = { 'CG': ( 38/255, 173/255, 84/255),
'CHG': ( 44/255, 180/255, 234/255),
'CHH': (249/255, 42/255, 54/255)}
chrs = ctxstr.keys()
chrs.sort(cmp=alphanum)
"""
if 'chr' in ctxstr.keys()[0].lower():
chrs = sorted(ctxstr.keys(), key=lambda s: s[3:])
else:
chrs = sorted(ctxstr.keys())
"""
#chrs = map(str, range(1, 23)) + ['X', 'Y']
vlines = [0]
for i, chr in enumerate(chrs):
vlines.append(vlines[i] + len(ctxstr[chr]))
plt.switch_backend('Agg')
fig = plt.figure(figsize=(16, 4.5))
ax = fig.add_subplot(111)
ax.plot(gpos, gmlevel['CG'], color=colors['CG'], linewidth=1.5, label='CG')
ax.plot(gpos, gmlevel['CHG'], color=colors['CHG'], linewidth=1.5, label='CHG')
ax.plot(gpos, gmlevel['CHH'], color=colors['CHH'], linewidth=1.5, label='CHH')
ax.set_ylim(0, 100)
ax.set_xlim(0, vlines[-1])
for pos in vlines[1:-1]:
ax.axvline(x=pos, linestyle='--', linewidth=1.5, color='gray')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
ax.spines['left'].set_position(('outward', 10))
for label in ax.xaxis.get_ticklabels():
label.set_fontweight('bold')
for label in ax.yaxis.get_ticklabels():
label.set_fontweight('bold')
ax.tick_params(direction='out', length=6, width=2, labelsize='large', top='off', right='off', bottom='off')
ax.set_xticks([(vlines[i] + vlines[i+1])/2 for i in xrange(len(vlines) - 1)])
ax.set_xticklabels(chrs)
ax.set_xlabel('Chromosome', fontsize='xx-large', fontweight='bold')
ax.set_ylabel('Methylation Level (%)', fontsize='xx-large', fontweight='bold')
ax.legend(loc='upper right', fontsize='large', frameon=False)
fig.tight_layout()
return ax
def main():
parser = get_parser()
args = parser.parse_args()
root = os.path.splitext(os.path.basename(args.cgmap))[0]
ctxstr = const_ctxstr(args.fasta)
cgmap = const_cgmap(ctxstr, args.cgmap, args.depth)
gtftree = const_gtftree(args.gtf)
bulk = calc_bulk(ctxstr, cgmap)
plt.switch_backend('Agg')
bulk_ax = plot_bulkmean(bulk)
fig = bulk_ax.get_figure()
fig.savefig('{}.bulk.mean.png'.format(root), dpi=300)
plt.close(fig)
bulk_fig = plot_bulkhist(bulk)
bulk_fig.savefig('{}.bulk.hist.png'.format(root), dpi=300)
plt.close(fig)
ign, cg_table, chg_table, chh_table = calc_mlevel(ctxstr, cgmap, gtftree, args.pmtsize)
cg_table.to_csv('{}.feature.CG.txt'.format(root), sep='\t', float_format='%.3f')
chg_table.to_csv('{}.feature.CHG.txt'.format(root), sep='\t', float_format='%.3f')
chh_table.to_csv('{}.feature.CHH.txt'.format(root), sep='\t', float_format='%.3f')
cg_ax, chg_ax, chh_ax = plot_feature_mlevel(bulk, ign, cg_table, chg_table, chh_table)
fig = cg_ax.get_figure()
fig.savefig('{}.feature.CG.png'.format(root), dpi=300)
plt.close(fig)
fig = chg_ax.get_figure()
fig.savefig('{}.feature.CHG.png'.format(root), dpi=300)
plt.close(fig)
fig = chh_ax.get_figure()
fig.savefig('{}.feature.CHH.png'.format(root), dpi=300)
plt.close(fig)
gpos, gmlevel = calc_genomewide(ctxstr, cgmap, winsize=args.winsize)
gax = plot_genomewide(ctxstr, gpos, gmlevel)
fig = gax.get_figure()
fig.savefig('{}.genomewide.png'.format(root), dpi=300)
plt.close(fig)
if __name__ == '__main__':
main()
|
mit
|
cjayb/mne-python
|
mne/utils/__init__.py
|
1
|
4359
|
# # # WARNING # # #
# This list must also be updated in doc/_templates/autosummary/class.rst if it
# is changed here!
_doc_special_members = ('__contains__', '__getitem__', '__iter__', '__len__',
'__add__', '__sub__', '__mul__', '__div__',
'__neg__', '__hash__')
from ._bunch import Bunch, BunchConst, BunchConstNamed
from .check import (check_fname, check_version, check_random_state,
_check_fname, _check_subject, _check_pandas_installed,
_check_pandas_index_arguments, _check_mayavi_version,
_check_event_id, _check_ch_locs, _check_compensation_grade,
_check_if_nan, _is_numeric, _ensure_int, _check_preload,
_validate_type, _check_info_inv, _check_pylsl_installed,
_check_channels_spatial_filter, _check_one_ch_type,
_check_rank, _check_option, _check_depth, _check_combine,
_check_path_like, _check_src_normal, _check_stc_units,
_check_pyqt5_version, _check_sphere, _check_time_format,
_check_freesurfer_home, _suggest, _require_version,
_on_missing)
from .config import (set_config, get_config, get_config_path, set_cache_dir,
set_memmap_min_size, get_subjects_dir, _get_stim_channel,
sys_info, _get_extra_data_path, _get_root_dir,
_get_numpy_libs)
from .docs import (copy_function_doc_to_method_doc, copy_doc, linkcode_resolve,
open_docs, deprecated, fill_doc, deprecated_alias,
copy_base_doc_to_subclass_doc)
from .fetching import _fetch_file, _url_to_local_path
from ._logging import (verbose, logger, set_log_level, set_log_file,
use_log_level, catch_logging, warn, filter_out_warnings,
ETSContext, wrapped_stdout, _get_call_line,
ClosingStringIO)
from .misc import (run_subprocess, _pl, _clean_names, pformat, _file_like,
_explain_exception, _get_argvalues, sizeof_fmt,
running_subprocess, _DefaultEventParser)
from .progressbar import ProgressBar
from ._testing import (run_tests_if_main, run_command_if_main,
requires_sklearn,
requires_version, requires_nibabel, requires_mayavi,
requires_good_network, requires_mne, requires_pandas,
requires_h5py, traits_test, requires_pysurfer,
ArgvSetter, SilenceStdout, has_freesurfer, has_mne_c,
_TempDir, has_nibabel, _import_mlab, buggy_mkl_svd,
requires_numpydoc, requires_vtk, requires_freesurfer,
requires_nitime, requires_dipy,
requires_neuromag2ft, requires_pylsl, assert_object_equal,
assert_and_remove_boundary_annot, _raw_annot,
assert_dig_allclose, assert_meg_snr, assert_snr,
assert_stcs_equal, modified_env)
from .numerics import (hashfunc, _compute_row_norms,
_reg_pinv, random_permutation, _reject_data_segments,
compute_corr, _get_inst_data, array_split_idx,
sum_squared, split_list, _gen_events, create_slices,
_time_mask, _freq_mask, grand_average, object_diff,
object_hash, object_size, _apply_scaling_cov,
_undo_scaling_cov, _apply_scaling_array,
_undo_scaling_array, _scaled_array, _replace_md5, _PCA,
_mask_to_onsets_offsets, _array_equal_nan,
_julian_to_cal, _cal_to_julian, _dt_to_julian,
_julian_to_dt, _dt_to_stamp, _stamp_to_dt,
_check_dt, _ReuseCycle)
from .mixin import (SizeMixin, GetEpochsMixin, _prepare_read_metadata,
_prepare_write_metadata, _FakeNoPandas, ShiftTimeMixin)
from .linalg import (_svd_lwork, _repeated_svd, _sym_mat_pow, sqrtm_sym,
dgesdd, dgemm, zgemm, dgemv, ddot, LinAlgError, eigh)
from .dataframe import (_set_pandas_dtype, _scale_dataframe_data,
_convert_times, _build_data_frame)
|
bsd-3-clause
|
danielvdende/incubator-airflow
|
airflow/hooks/druid_hook.py
|
1
|
5802
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import requests
import time
from pydruid.db import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.dbapi_hook import DbApiHook
class DruidHook(BaseHook):
"""
Connection to Druid overlord for ingestion
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:type druid_ingest_conn_id: string
:param timeout: The interval between polling
the Druid job for the status of the ingestion job
:type timeout: int
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
:type max_ingestion_time: int
"""
def __init__(
self,
druid_ingest_conn_id='druid_ingest_default',
timeout=1,
max_ingestion_time=None):
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {'content-type': 'application/json'}
def get_conn_url(self):
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
conn_type = 'http' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', '')
return "{conn_type}://{host}:{port}/{endpoint}".format(**locals())
def submit_indexing_job(self, json_index_spec):
url = self.get_conn_url()
req_index = requests.post(url, json=json_index_spec, headers=self.header)
if (req_index.status_code != 200):
raise AirflowException('Did not get 200 when '
'submitting the Druid job to {}'.format(url))
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json['task']
running = True
sec = 0
while running:
req_status = requests.get("{0}/{1}/status".format(url, druid_task_id))
self.log.info("Job still running for %s seconds...", sec)
sec = sec + 1
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post("{0}/{1}/shutdown".format(url, druid_task_id))
raise AirflowException('Druid ingestion took more than '
'%s seconds', self.max_ingestion_time)
time.sleep(self.timeout)
status = req_status.json()['status']['status']
if status == 'RUNNING':
running = True
elif status == 'SUCCESS':
running = False # Great success!
elif status == 'FAILED':
raise AirflowException('Druid indexing job failed, '
'check console for more info')
else:
raise AirflowException('Could not get status of the job, got %s', status)
self.log.info('Successful index')
class DruidDbApiHook(DbApiHook):
"""
Interact with Druid broker
This hook is purely for users to query druid broker.
For ingestion, please use druidHook.
"""
conn_name_attr = 'druid_broker_conn_id'
default_conn_name = 'druid_broker_default'
supports_autocommit = False
def __init__(self, *args, **kwargs):
super(DruidDbApiHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Establish a connection to druid broker.
"""
conn = self.get_connection(self.druid_broker_conn_id)
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get('endpoint', '/druid/v2/sql'),
scheme=conn.extra_dejson.get('schema', 'http')
)
self.log.info('Get the connection to druid '
'broker on {host}'.format(host=conn.host))
return druid_broker_conn
def get_uri(self):
"""
Get the connection uri for druid broker.
e.g: druid://localhost:8082/druid/v2/sql/
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += ':{port}'.format(port=conn.port)
conn_type = 'druid' if not conn.conn_type else conn.conn_type
endpoint = conn.extra_dejson.get('endpoint', 'druid/v2/sql')
return '{conn_type}://{host}/{endpoint}'.format(
conn_type=conn_type, host=host, endpoint=endpoint)
def set_autocommit(self, conn, autocommit):
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None):
raise NotImplementedError()
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
raise NotImplementedError()
|
apache-2.0
|
samuelefiorini/cgm-tools
|
cgmtools/forecast/kf.py
|
1
|
14439
|
""""[cgm-tools] CGM forecast via Kalman filter."""
######################################################################
# Copyright (C) 2017 Samuele Fiorini, Chiara Martini, Annalisa Barla
#
# GPL-3.0 License
######################################################################
import copy
import numpy as np
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import TimeSeriesSplit
from pykalman import KalmanFilter
__all__ = ['cgmkalmanfilter', 'forecast', 'grid_search']
def cgmkalmanfilter(X0=None, P0=None, F=None, Q=None, X=None, R=None,
random_state=None, em_vars=None, **kwargs):
"""Kalman Filter wrapper that uses compact names notation.
Unspecified KalmanFilter arguments are:
- transition_offsets
- observation_offsets
- n_dim_state
- n_dim_obs
their access granted via **kwargs
"""
return KalmanFilter(transition_matrices=F,
observation_matrices=X,
transition_covariance=Q,
observation_covariance=R,
initial_state_mean=X0,
initial_state_covariance=P0,
random_state=random_state,
em_vars=em_vars, **kwargs)
def forecast(kf=None, n_steps=1, X_old=None, P_old=None, H=None,
y=None, return_first_kf=False):
"""Forecast n_steps-ahead using the input Kalman filter.
Parameters
-------------------
kf : pykalman.standard.KalmanFilter, the (fitted) KF to use
to forecast
n_steps : number, the prediction horizon (default = 1)
H : array of float, the acquisition model
y : array of float, the observation until the last acquired sample
return_first_kf : bool, return the KF used to perform
the first one-step-ahead prediction
Returns
-------------------
y_pred : array of float, the `n_steps` predicted future values
X_new : array of float, the one-step-updated state matrix
P_new : array of float, the one-step-updated state covariance matrix
kf_out : pykalman.standard.KalmanFilter, the KF used to perform
the first one-step-ahead prediction (if return_first_kf=True)
"""
if n_steps <= 0:
raise Exception('n_steps must be at least 1')
# Init predictions
y_pred = np.zeros(n_steps)
# Perform state estimation until the end of y
X_new, P_new = kf.filter(y)
# perform one-step-ahead prediction
X_new, P_new = kf.filter_update(filtered_state_mean=X_new[-1],
filtered_state_covariance=P_new[-1],
observation=y[-1])
y_pred[0] = np.dot(H.reshape(1, 2), X_new.reshape(2, 1))[0][0]
if n_steps < 2:
# Multiple return
ret = [y_pred, X_new, P_new]
if return_first_kf:
ret.append(kf)
return ret
else:
P_old = P_new.copy()
X_old = X_new.copy()
# copy the KF to perform recursive forecast
_kf = copy.deepcopy(kf)
_y_curr = y.copy()
for t in range(1, n_steps - 1):
_y_curr = y_pred[t] # append the predicted y
X_new, P_new = _kf.filter_update(filtered_state_mean=X_old,
filtered_state_covariance=P_old,
observation=_y_curr)
y_pred[t] = np.dot(H.reshape(1, 2), X_new.reshape(2, 1))[0][0]
P_old = P_new.copy()
X_old = X_new.copy()
# Multiple return
ret = [y_pred, X_new, P_new]
if return_first_kf:
ret.append(kf)
return ret
def grid_worker(l2, s2, F, H, tscv, time_series, count, jobs_dump):
"""Grid-search worker."""
Q = np.array([[l2, 0], [0, 0]]) # transition_covariance
R = s2 # observation (co)variance
# Init the vld_error vector for the current order
vld_error = np.zeros(tscv.n_splits - 1)
# Iterate through the CV splits
for cv_count, (tr_index, vld_index) in enumerate(tscv.split(time_series)):
if cv_count == 0: # init X0 and P0 via EM on the first chunk of data
y_0 = time_series.iloc[np.hstack((tr_index,
vld_index))].values.ravel()
# Init KalmanFilter object
kf = cgmkalmanfilter(F=F, Q=Q, R=R, X0=None, P0=None)
kf.em(y_0, em_vars=('initial_state_mean',
'initial_state_covariance'))
else:
y_tr = time_series.iloc[tr_index].values.ravel()
y_vld = time_series.iloc[vld_index].values.ravel()
y_pred, X_new, P_new, kf = forecast(kf=kf,
n_steps=len(y_vld),
H=H, y=y_tr,
return_first_kf=True)
# Save vld error
vld_error[cv_count - 1] = mean_squared_error(y_pred, y_vld)
jobs_dump[count] = (l2, s2, vld_error)
def grid_search(df, lambda2_range, sigma2_range, F=None, H=None, burn_in=300,
n_splits=15, return_mean_vld_error=False,
return_initial_state_mean=False,
return_initial_state_covariance=False, verbose=False):
"""Find the best Kalman filter parameters via grid search cross-validation.
This function perform a grid search of the optimal (lambda2, r)
parameters of the pykalman.KalmanFilter on input data where:
transition_matrix -> F = [[2,-1], [1, 0]] (double-integrated
random-walk model)
transition_covariance -> Q = [[lambda2, 0], [0, 0]]
observation_covariance -> R = [sigma2]
observation_model -> H = [1, 0]
as in [1]. In this function lambda2 and sigma2 are not estimated
using the Bayesian framework described in [1], but they are
obtained via cross-validation. The optimization is ran on ...
Parameters
-------------------
df : DataFrame, the output returned by gluco_extract(return_df=True)
lambda2_range : array of float, grid of Bayesian Kalman filter
regularization parameter Q[0,0]
sigma2_range : array of float, grid of observation (co)variances
F : array of float, the trainsition matrix (default is double-integrated
model)
H : array of float, the observation model (default is [1, 0])
burn_in : number, the number of samples at the beginning of the
time-series that should be splitted to perform grid search
(default = 300)
n_splits : number, the number of splits of the time-series
cross-validation schema (default=15). Your prediction
horizon will be `floor(n_samples / (n_splits + 1))`
return_mean_vld_error : bool, return the average validation error
(default=False)
return_initial_state_mean : bool, return the initial state mean evaluated
via EM on the burn-in samples
return_initial_state_covariance : bool, return the initial state covariance
evaluated via EM on the burn-in samples
verbose : bool, print debug messages (default=False)
Returns
-------------------
lambda2_opt : number
sigma2_opt : number
mean_vld_error : array of float, the array of mean cross-validation error,
size (len(lambda2_range), len(sigma2_range)) (if
return_mean_vld_error = True)
initial_state_mean : array of float, initial state mean evaluated
via EM on the burn-in samples (if
return_initial_state_mean = True)
initial_state_covariance : array of float, initial state covariance
evaluated via EM on the burn-in samples (if
return_initial_state_covariance = True)
References
-------------------
[1] Facchinetti, Andrea, Giovanni Sparacino, and Claudio Cobelli.
"An online self-tunable method to denoise CGM sensor data."
IEEE Transactions on Biomedical Engineering 57.3 (2010): 634-641.
"""
n_samples = df.shape[0]
# Argument check
if n_samples < burn_in:
raise Exception('The number of burn in samples %d should be '
'smaller than the total number of samples '
'%d' % (burn_in, n_samples))
import multiprocessing as mp
if F is None: F = np.array([[2, -1], [1, 0]]) # double integration model)
if H is None: H = np.array([1, 0]) # observation model
# Isolate the burn in samples
time_series = df.iloc[:burn_in]
# Parameter grid definition
# see state covariance and noise variance parameters
param_grid = ParameterGrid({'lambda2': lambda2_range,
'sigma2': sigma2_range})
# Time-series cross validation split
tscv = TimeSeriesSplit(n_splits=n_splits)
# Initialize the cross-validation error matrix of size
# (len(lambda2_range), len(sigma2_range))
mean_vld_error = np.zeros((len(lambda2_range), len(sigma2_range)))
std_vld_error = np.zeros_like(mean_vld_error)
# Positions dictionary
d_lambda = dict(zip(lambda2_range, np.arange(len(lambda2_range))))
d_sigma = dict(zip(sigma2_range, np.arange(len(sigma2_range))))
# Iterate trough the parameters lambda2, sigma2
# i, j index will be used to access the mean_vld_error matrix
jobs = []
manager = mp.Manager()
jobs_dump = manager.dict()
for count, param in enumerate(param_grid):
if verbose: print('trying params {} ...'.format(param))
l2, s2 = param['lambda2'], param['sigma2']
proc = mp.Process(target=grid_worker,
args=(l2, s2, F, H, tscv, time_series,
count, jobs_dump))
jobs.append(proc)
proc.start()
if verbose: print("Job: %d submitted", count)
# Collect results
count = 0
for proc in jobs:
proc.join()
count += 1
if verbose: print("%d jobs collected", count)
for count in jobs_dump.keys():
# Save mean and standard deviation of cross-validation error
# (excluding NaNs)
l2, s2, vld_error = jobs_dump[count]
i, j, = d_lambda[l2], d_sigma[s2]
mean_vld_error[i, j] = np.nanmean(vld_error)
std_vld_error[i, j] = np.nanstd(vld_error)
# Get the optimal orders from the score that we want to optimize
i_opt, j_opt, = np.argwhere(mean_vld_error == np.nanmin(mean_vld_error))[0]
# Multiple returns
lambda2_opt, sigma2_opt = lambda2_range[i_opt], sigma2_range[j_opt]
ret = [lambda2_opt, sigma2_opt]
Q = np.array([[lambda2_opt, 0], [0, 0]]) # transition_covariance
R = sigma2_opt # observation (co)variance
if return_initial_state_mean or return_initial_state_covariance:
kf_refit = cgmkalmanfilter(F=F, Q=Q, R=R, X0=None, P0=None)
X0, P0 = kf_refit.em(time_series,
em_vars=('initial_state_mean',
'initial_state_covariance')).filter(time_series)
if return_mean_vld_error:
ret.append(mean_vld_error)
if return_initial_state_mean:
ret.append(X0[-1])
if return_initial_state_covariance:
ret.append(P0[-1])
return ret
def online_forecast(df, kf, H, ph=18, lambda2=1e-6, sigma2=1,
verbose=False):
"""Recursively fit/predict a Kalman filter on input CGM time-series.
This function recursively fits a Kalman filter on input CGM time series and
evaluates 30/60/90 mins (absolute) error.
Parameters
-------------------
df : DataFrame, the output returned by gluco_extract(..., return_df=True)
kf : pykalman.standard.KalmanFilter, the (initialized) KF to use
to forecast
H : array of float, observation model
ph : number, the prediction horizon. It must be ph > 0
(default=18, i.e. 90 mins (1.5 hours))
lambda2 : number, Bayesian Kalman filter regularization parameter Q[0,0]
sigma2 : number, observation variance
verbose : bool, print debug messages each 100 iterations (default=False)
Returns
-------------------
errs : dictionary, errors at 30/60/90 mins ('err_18', 'err_12', 'err_6')
forecast : dictionary, time-series prediction ['ts'], with std_dev
['sigma'] and confidence interval ['conf_int'].
The output has the same length of the input, but the first
`w_size` elements are set to 0.
"""
# Argument check
if ph <= 0:
raise Exception('ph must be at least 1')
n_samples = df.shape[0]
# Absolute prediction error at 30/60/90 minutes
errs_dict = {'err_18': [], 'err_12': [], 'err_6': []}
# 1 step-ahead predictions
forecast_dict = {'ts': [], 'sigma': [], 'conf_int': []}
time_series = df.values.ravel()
# Init state meand and covariance
X_old = kf.initial_state_mean
P_old = kf.initial_state_covariance
# Acquire one sample at the time
for t in range(1, n_samples - ph):
y_pred, X_new, P_new = forecast(kf=kf, n_steps=ph,
X_old=X_old, P_old=P_old,
H=H, y=time_series[:t])
forecast_dict['ts'].append(y_pred[0])
# Evaluate the errors
y_future_real = time_series[t:t + ph]
abs_pred_err = np.abs(y_pred - y_future_real)
# Save errors
errs_dict['err_18'].append(abs_pred_err[17])
errs_dict['err_12'].append(abs_pred_err[11])
errs_dict['err_6'].append(abs_pred_err[5])
if (t % 200) == 0 and verbose:
print("[:{}]\nErrors: 30' = {:2.3f}\t|\t60' = "
"{:2.3f}\t|\t90' = {:2.3f}".format(t,
errs_dict['err_6'][-1],
errs_dict['err_12'][-1],
errs_dict['err_18'][-1]))
# Return numpy.array
forecast_dict['ts'] = np.array(forecast_dict['ts'])
return errs_dict, forecast_dict
########################################################
|
gpl-3.0
|
jmschrei/scikit-learn
|
benchmarks/bench_plot_randomized_svd.py
|
12
|
17567
|
"""
Benchmarks on the power iterations phase in randomized SVD.
We test on various synthetic and real datasets the effect of increasing
the number of power iterations in terms of quality of approximation
and running time. A number greater than 0 should help with noisy matrices,
which are characterized by a slow spectral decay.
We test several policy for normalizing the power iterations. Normalization
is crucial to avoid numerical issues.
The quality of the approximation is measured by the spectral norm discrepancy
between the original input matrix and the reconstructed one (by multiplying
the randomized_svd's outputs). The spectral norm is always equivalent to the
largest singular value of a matrix. (3) justifies this choice. However, one can
notice in these experiments that Frobenius and spectral norms behave
very similarly in a qualitative sense. Therefore, we suggest to run these
benchmarks with `enable_spectral_norm = False`, as Frobenius' is MUCH faster to
compute.
The benchmarks follow.
(a) plot: time vs norm, varying number of power iterations
data: many datasets
goal: compare normalization policies and study how the number of power
iterations affect time and norm
(b) plot: n_iter vs norm, varying rank of data and number of components for
randomized_SVD
data: low-rank matrices on which we control the rank
goal: study whether the rank of the matrix and the number of components
extracted by randomized SVD affect "the optimal" number of power iterations
(c) plot: time vs norm, varing datasets
data: many datasets
goal: compare default configurations
We compare the following algorithms:
- randomized_svd(..., power_iteration_normalizer='none')
- randomized_svd(..., power_iteration_normalizer='LU')
- randomized_svd(..., power_iteration_normalizer='QR')
- randomized_svd(..., power_iteration_normalizer='auto')
- fbpca.pca() from https://github.com/facebook/fbpca (if installed)
Conclusion
----------
- n_iter=2 appears to be a good default value
- power_iteration_normalizer='none' is OK if n_iter is small, otherwise LU
gives similar errors to QR but is cheaper. That's what 'auto' implements.
References
----------
(1) Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
(2) A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
(3) An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
# Author: Giorgio Patrini
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import gc
import pickle
from time import time
from collections import defaultdict
import os.path
from sklearn.utils import gen_batches
from sklearn.utils.validation import check_random_state
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import (make_low_rank_matrix,
make_sparse_uncorrelated)
from sklearn.datasets import (fetch_lfw_people,
fetch_mldata,
fetch_20newsgroups_vectorized,
fetch_olivetti_faces,
fetch_rcv1)
try:
import fbpca
fbpca_available = True
except ImportError:
fbpca_available = False
# If this is enabled, tests are much slower and will crash with the large data
enable_spectral_norm = False
# TODO: compute approximate spectral norms with the power method as in
# Estimating the largest eigenvalues by the power and Lanczos methods with
# a random start, Jacek Kuczynski and Henryk Wozniakowski, SIAM Journal on
# Matrix Analysis and Applications, 13 (4): 1094-1122, 1992.
# This approximation is a very fast estimate of the spectral norm, but depends
# on starting random vectors.
# Determine when to switch to batch computation for matrix norms,
# in case the reconstructed (dense) matrix is too large
MAX_MEMORY = np.int(2e9)
# The following datasets can be dowloaded manually from:
# CIFAR 10: http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
# SVHN: http://ufldl.stanford.edu/housenumbers/train_32x32.mat
CIFAR_FOLDER = "./cifar-10-batches-py/"
SVHN_FOLDER = "./SVHN/"
datasets = ['low rank matrix', 'lfw_people', 'olivetti_faces', '20newsgroups',
'MNIST original', 'CIFAR', 'a1a', 'SVHN', 'uncorrelated matrix']
big_sparse_datasets = ['big sparse matrix', 'rcv1']
def unpickle(file):
fo = open(file, 'rb')
dict = pickle.load(fo, encoding='latin1')
fo.close()
return dict['data']
def handle_missing_dataset(file_folder):
if not os.path.isdir(file_folder):
print("%s file folder not found. Test skipped." % file_folder)
return 0
def get_data(dataset_name):
print("Getting dataset: %s" % dataset_name)
if dataset_name == 'lfw_people':
X = fetch_lfw_people().data
elif dataset_name == '20newsgroups':
X = fetch_20newsgroups_vectorized().data[:, :100000]
elif dataset_name == 'olivetti_faces':
X = fetch_olivetti_faces().data
elif dataset_name == 'rcv1':
X = fetch_rcv1().data
elif dataset_name == 'CIFAR':
if handle_missing_dataset(CIFAR_FOLDER) == "skip":
return
X1 = [unpickle("%sdata_batch_%d" % (CIFAR_FOLDER, i + 1))
for i in range(5)]
X = np.vstack(X1)
del X1
elif dataset_name == 'SVHN':
if handle_missing_dataset(SVHN_FOLDER) == 0:
return
X1 = sp.io.loadmat("%strain_32x32.mat" % SVHN_FOLDER)['X']
X2 = [X1[:, :, :, i].reshape(32 * 32 * 3) for i in range(X1.shape[3])]
X = np.vstack(X2)
del X1
del X2
elif dataset_name == 'low rank matrix':
X = make_low_rank_matrix(n_samples=500, n_features=np.int(1e4),
effective_rank=100, tail_strength=.5,
random_state=random_state)
elif dataset_name == 'uncorrelated matrix':
X, _ = make_sparse_uncorrelated(n_samples=500, n_features=10000,
random_state=random_state)
elif dataset_name == 'big sparse matrix':
sparsity = np.int(1e6)
size = np.int(1e6)
small_size = np.int(1e4)
data = np.random.normal(0, 1, np.int(sparsity/10))
data = np.repeat(data, 10)
row = np.random.uniform(0, small_size, sparsity)
col = np.random.uniform(0, small_size, sparsity)
X = sp.sparse.csr_matrix((data, (row, col)), shape=(size, small_size))
del data
del row
del col
else:
X = fetch_mldata(dataset_name).data
return X
def plot_time_vs_s(time, norm, point_labels, title):
plt.figure()
colors = ['g', 'b', 'y']
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.plot(time[l], norm[l], label=l, marker='o', c=colors.pop())
else:
plt.plot(time[l], norm[l], label=l, marker='^', c='red')
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -20),
textcoords='offset points', ha='right', va='bottom')
plt.legend(loc="upper right")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def scatter_time_vs_s(time, norm, point_labels, title):
plt.figure()
size = 100
for i, l in enumerate(sorted(norm.keys())):
if l is not "fbpca":
plt.scatter(time[l], norm[l], label=l, marker='o', c='b', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, -80),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
else:
plt.scatter(time[l], norm[l], label=l, marker='^', c='red', s=size)
for label, x, y in zip(point_labels, list(time[l]), list(norm[l])):
plt.annotate(label, xy=(x, y), xytext=(0, 30),
textcoords='offset points', ha='right',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"),
va='bottom', size=11, rotation=90)
plt.legend(loc="best")
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("running time [s]")
def plot_power_iter_vs_s(power_iter, s, title):
plt.figure()
for l in sorted(s.keys()):
plt.plot(power_iter, s[l], label=l, marker='o')
plt.legend(loc="lower right", prop={'size': 10})
plt.suptitle(title)
plt.ylabel("norm discrepancy")
plt.xlabel("n_iter")
def svd_timing(X, n_comps, n_iter, n_oversamples,
power_iteration_normalizer='auto', method=None):
"""
Measure time for decomposition
"""
print("... running SVD ...")
if method is not 'fbpca':
gc.collect()
t0 = time()
U, mu, V = randomized_svd(X, n_comps, n_oversamples, n_iter,
power_iteration_normalizer,
random_state=random_state, transpose=False)
call_time = time() - t0
else:
gc.collect()
t0 = time()
# There is a different convention for l here
U, mu, V = fbpca.pca(X, n_comps, raw=True, n_iter=n_iter,
l=n_oversamples+n_comps)
call_time = time() - t0
return U, mu, V, call_time
def norm_diff(A, norm=2, msg=True):
"""
Compute the norm diff with the original matrix, when randomized
SVD is called with *params.
norm: 2 => spectral; 'fro' => Frobenius
"""
if msg:
print("... computing %s norm ..." % norm)
if norm == 2:
# s = sp.linalg.norm(A, ord=2) # slow
value = sp.sparse.linalg.svds(A, k=1, return_singular_vectors=False)
else:
if sp.sparse.issparse(A):
value = sp.sparse.linalg.norm(A, ord=norm)
else:
value = sp.linalg.norm(A, ord=norm)
return value
def scalable_frobenius_norm_discrepancy(X, U, s, V):
# if the input is not too big, just call scipy
if X.shape[0] * X.shape[1] < MAX_MEMORY:
A = X - U.dot(np.diag(s).dot(V))
return norm_diff(A, norm='fro')
print("... computing fro norm by batches...")
batch_size = 1000
Vhat = np.diag(s).dot(V)
cum_norm = .0
for batch in gen_batches(X.shape[0], batch_size):
M = X[batch, :] - U[batch, :].dot(Vhat)
cum_norm += norm_diff(M, norm='fro', msg=False)
return np.sqrt(cum_norm)
def bench_a(X, dataset_name, power_iter, n_oversamples, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
X_spectral_norm = norm_diff(X, norm=2, msg=False)
all_frobenius = defaultdict(list)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for pi in power_iter:
for pm in ['none', 'LU', 'QR']:
print("n_iter = %d on sklearn - %s" % (pi, pm))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples)
label = "sklearn - %s" % pm
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
print("n_iter = %d on fbca" % (pi))
U, s, V, time = svd_timing(X, n_comps, n_iter=pi,
power_iteration_normalizer=pm,
n_oversamples=n_oversamples,
method='fbpca')
label = "fbpca"
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_spectral, power_iter, title)
title = "%s: Frobenius norm diff vs running time" % (dataset_name)
plot_time_vs_s(all_time, all_frobenius, power_iter, title)
def bench_b(power_list):
n_samples, n_features = 1000, 10000
data_params = {'n_samples': n_samples, 'n_features': n_features,
'tail_strength': .7, 'random_state': random_state}
dataset_name = "low rank matrix %d x %d" % (n_samples, n_features)
ranks = [10, 50, 100]
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for rank in ranks:
X = make_low_rank_matrix(effective_rank=rank, **data_params)
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
for n_comp in [np.int(rank/2), rank, rank*2]:
label = "rank=%d, n_comp=%d" % (rank, n_comp)
print(label)
for pi in power_list:
U, s, V, _ = svd_timing(X, n_comp, n_iter=pi, n_oversamples=2,
power_iteration_normalizer='LU')
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if enable_spectral_norm:
title = "%s: spectral norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_spectral, title)
title = "%s: frobenius norm diff vs n power iteration" % (dataset_name)
plot_power_iter_vs_s(power_iter, all_frobenius, title)
def bench_c(datasets, n_comps):
all_time = defaultdict(list)
if enable_spectral_norm:
all_spectral = defaultdict(list)
all_frobenius = defaultdict(list)
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
if enable_spectral_norm:
X_spectral_norm = norm_diff(X, norm=2, msg=False)
X_fro_norm = norm_diff(X, norm='fro', msg=False)
n_comps = np.minimum(n_comps, np.min(X.shape))
label = "sklearn"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=10,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if fbpca_available:
label = "fbpca"
print("%s %d x %d - %s" %
(dataset_name, X.shape[0], X.shape[1], label))
U, s, V, time = svd_timing(X, n_comps, n_iter=2, n_oversamples=2,
method=label)
all_time[label].append(time)
if enable_spectral_norm:
A = U.dot(np.diag(s).dot(V))
all_spectral[label].append(norm_diff(X - A, norm=2) /
X_spectral_norm)
f = scalable_frobenius_norm_discrepancy(X, U, s, V)
all_frobenius[label].append(f / X_fro_norm)
if len(all_time) == 0:
raise ValueError("No tests ran. Aborting.")
if enable_spectral_norm:
title = "normalized spectral norm diff vs running time"
scatter_time_vs_s(all_time, all_spectral, datasets, title)
title = "normalized Frobenius norm diff vs running time"
scatter_time_vs_s(all_time, all_frobenius, datasets, title)
if __name__ == '__main__':
random_state = check_random_state(1234)
power_iter = np.linspace(0, 6, 7, dtype=int)
n_comps = 50
for dataset_name in datasets:
X = get_data(dataset_name)
if X is None:
continue
print(" >>>>>> Benching sklearn and fbpca on %s %d x %d" %
(dataset_name, X.shape[0], X.shape[1]))
bench_a(X, dataset_name, power_iter, n_oversamples=2,
n_comps=np.minimum(n_comps, np.min(X.shape)))
print(" >>>>>> Benching on simulated low rank matrix with variable rank")
bench_b(power_iter)
print(" >>>>>> Benching sklearn and fbpca default configurations")
bench_c(datasets + big_sparse_datasets, n_comps)
plt.show()
|
bsd-3-clause
|
jaeilepp/eggie
|
mne/preprocessing/tests/test_infomax.py
|
4
|
3922
|
# Authors: Denis A. Engemann <[email protected]>
#
# License: BSD (3-clause)
"""
Test the infomax algorithm.
Parts of this code are taken from scikit-learn
"""
import numpy as np
from numpy.testing import assert_almost_equal
from scipy import stats
from scipy import linalg
from mne.preprocessing.infomax_ import infomax
from mne.utils import requires_sklearn
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
@requires_sklearn
def test_infomax_simple(add_noise=False):
""" Test the infomax algorithm on very simple data.
"""
from sklearn.decomposition import RandomizedPCA
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
algos = [True, False]
for algo in algos:
X = RandomizedPCA(n_components=2, whiten=True).fit_transform(m.T)
k_ = infomax(X, extended=algo)
s_ = np.dot(k_, X.T)
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
@requires_sklearn
def test_non_square_infomax(add_noise=False):
""" Test the infomax algorithm on very simple data.
"""
from sklearn.decomposition import RandomizedPCA
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
n_observed = 6
mixing = rng.randn(n_observed, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(n_observed, n_samples)
center_and_norm(m)
pca = RandomizedPCA(n_components=2, whiten=True, random_state=rng)
m = m.T
m = pca.fit_transform(m)
# we need extended since input signals are sub-gaussian
unmixing_ = infomax(m, random_state=rng, extended=True)
s_ = np.dot(unmixing_, m.T)
# Check that the mixing model described in the docstring holds:
mixing_ = linalg.pinv(unmixing_.T)
assert_almost_equal(m, s_.T.dot(mixing_))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
|
bsd-2-clause
|
JosmanPS/scikit-learn
|
examples/applications/topics_extraction_with_nmf_lda.py
|
133
|
3517
|
"""
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
|
bsd-3-clause
|
greghogan/flink
|
flink-python/pyflink/table/tests/test_aggregate.py
|
2
|
20869
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import datetime
from decimal import Decimal
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pyflink.common import Row, RowKind
from pyflink.fn_execution.state_impl import RemovableConcatIterator
from pyflink.table import DataTypes
from pyflink.table.data_view import ListView, MapView
from pyflink.table.expressions import col
from pyflink.table.udf import AggregateFunction, udaf
from pyflink.testing.test_case_utils import PyFlinkBlinkStreamTableTestCase
class CountAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] = accumulator[0] + 1
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - 1
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] = accumulator[0] + other_acc[0]
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.BIGINT()
class SumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] = accumulator[0] + args[0]
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - args[0]
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] = accumulator[0] + other_acc[0]
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.BIGINT()
class ConcatAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
str_list = [i for i in accumulator[0]]
str_list.sort()
return accumulator[1].join(str_list)
def create_accumulator(self):
return Row([], '')
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[1] = args[1]
accumulator[0].append(args[0])
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0].remove(args[0])
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.STRING()
class ListViewConcatAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[1].join(accumulator[0])
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.STRING()
class CountDistinctAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[1]
def create_accumulator(self):
return Row(MapView(), 0)
def accumulate(self, accumulator, *args):
input_str = args[0]
if accumulator[0].is_empty() or input_str not in accumulator[0] \
or accumulator[0][input_str] is None:
accumulator[0][input_str] = 1
accumulator[1] += 1
else:
accumulator[0][input_str] += 1
if input_str == "clear":
accumulator[0].clear()
accumulator[1] = 0
def retract(self, accumulator, *args):
input_str = args[0]
if accumulator[0].is_empty() or input_str not in accumulator[0]:
return
accumulator[0].put_all({input_str: accumulator[0][input_str] - 1})
if accumulator[0][input_str] <= 0:
accumulator[1] -= 1
accumulator[0][input_str] = None
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.BIGINT()
class TestIterateAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
# test iterate keys
key_set = [i for i in accumulator[0]]
key_set.sort()
# test iterate values
value_set = [str(i) for i in accumulator[0].values()]
value_set.sort()
item_set = {}
# test iterate items
for key, value in accumulator[0].items():
item_set[key] = value
ordered_item_set = collections.OrderedDict()
for key in key_set:
ordered_item_set[key] = str(item_set[key])
try:
# test auto clear the cached iterators
next(iter(accumulator[0].items()))
except StopIteration:
pass
return Row(",".join(key_set),
','.join(value_set),
",".join([":".join(item) for item in ordered_item_set.items()]),
accumulator[1])
def create_accumulator(self):
return Row(MapView(), 0)
def accumulate(self, accumulator, *args):
input_str = args[0]
if input_str not in accumulator[0]:
accumulator[0][input_str] = 1
accumulator[1] += 1
else:
accumulator[0][input_str] += 1
def retract(self, accumulator, *args):
input_str = args[0]
if input_str not in accumulator[0]:
return
accumulator[0][input_str] -= 1
if accumulator[0][input_str] == 0:
# test removable iterator
key_iter = iter(accumulator[0].keys()) # type: RemovableConcatIterator
while True:
try:
key = next(key_iter)
if key == input_str:
key_iter.remove()
except StopIteration:
break
accumulator[1] -= 1
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.MAP_VIEW(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.STRING()),
DataTypes.FIELD("f1", DataTypes.STRING()),
DataTypes.FIELD("f2", DataTypes.STRING()),
DataTypes.FIELD("f3", DataTypes.BIGINT())])
class StreamTableAggregateTests(PyFlinkBlinkStreamTableTestCase):
def test_double_aggregate(self):
self.t_env.register_function("my_count", CountAggregateFunction())
self.t_env.create_temporary_function("my_sum", SumAggregateFunction())
# trigger the finish bundle more frequently to ensure testing the communication
# between RemoteKeyedStateBackend and the StateGrpcService.
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "1")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi2'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select("my_count(a) as a, my_sum(a) as b, c") \
.select("my_count(a) as a, my_sum(b) as b, sum0(b) as c, sum0(b.cast(double)) as d")
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[3, 12, 12, 12.0]], columns=['a', 'b', 'c', 'd']))
def test_mixed_with_built_in_functions_with_retract(self):
self.env.set_parallelism(1)
self.t_env.create_temporary_system_function(
"concat",
ConcatAggregateFunction())
t = self.t_env.from_elements(
[(1, 'Hi_', 1),
(1, 'Hi', 2),
(2, 'Hi_', 3),
(2, 'Hi', 4),
(3, None, None),
(3, None, None),
(4, 'hello2_', 7),
(4, 'hello2', 8),
(5, 'hello_', 9),
(5, 'hello', 10)], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select a, LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
self.t_env.create_temporary_view("retract_table", table_with_retract_message)
result_table = self.t_env.sql_query(
"select concat(b, ',') as a, "
"FIRST_VALUE(b) as b, "
"LAST_VALUE(b) as c, "
"COUNT(c) as d, "
"COUNT(1) as e, "
"LISTAGG(b) as f,"
"LISTAGG(b, '|') as g,"
"MAX(c) as h,"
"MAX(cast(c as float) + 1) as i,"
"MIN(c) as j,"
"MIN(cast(c as decimal) + 1) as k,"
"SUM(c) as l,"
"SUM(cast(c as float) + 1) as m,"
"AVG(c) as n,"
"AVG(cast(c as double) + 1) as o,"
"STDDEV_POP(cast(c as float)),"
"STDDEV_SAMP(cast(c as float)),"
"VAR_POP(cast(c as float)),"
"VAR_SAMP(cast(c as float))"
" from retract_table")
result = [i for i in result_table.execute().collect()]
expected = Row('Hi,Hi,hello,hello2', 'Hi', 'hello', 4, 5, 'Hi,Hi,hello2,hello',
'Hi|Hi|hello2|hello', 10, 11.0, 2, Decimal(3.0), 24, 28.0, 6, 7.0,
3.1622777, 3.6514838, 10.0, 13.333333)
expected.set_row_kind(RowKind.UPDATE_AFTER)
self.assertEqual(result[len(result) - 1], expected)
def test_mixed_with_built_in_functions_without_retract(self):
self.env.set_parallelism(1)
self.t_env.create_temporary_system_function(
"concat",
ConcatAggregateFunction())
t = self.t_env.from_elements(
[('Hi', 2),
('Hi', 4),
(None, None),
('hello2', 8),
('hello', 10)], ['b', 'c'])
self.t_env.create_temporary_view("source", t)
result_table = self.t_env.sql_query(
"select concat(b, ',') as a, "
"FIRST_VALUE(b) as b, "
"LAST_VALUE(b) as c, "
"COUNT(c) as d, "
"COUNT(1) as e, "
"LISTAGG(b) as f,"
"LISTAGG(b, '|') as g,"
"MAX(c) as h,"
"MAX(cast(c as float) + 1) as i,"
"MIN(c) as j,"
"MIN(cast(c as decimal) + 1) as k,"
"SUM(c) as l,"
"SUM(cast(c as float) + 1) as m "
"from source")
result = [i for i in result_table.execute().collect()]
expected = Row('Hi,Hi,hello,hello2', 'Hi', 'hello', 4, 5, 'Hi,Hi,hello2,hello',
'Hi|Hi|hello2|hello', 10, 11.0, 2, Decimal(3.0), 24, 28.0)
expected.set_row_kind(RowKind.UPDATE_AFTER)
self.assertEqual(result[len(result) - 1], expected)
def test_using_decorator(self):
my_count = udaf(CountAggregateFunction(),
accumulator_type=DataTypes.ARRAY(DataTypes.INT()),
result_type=DataTypes.INT())
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.select(my_count(t.a).alias("a"), t.c.alias("b"))
plan = result.explain()
result_type = result.get_schema().get_field_data_type(0)
self.assertTrue(plan.find("PythonGroupAggregate(groupBy=[c], ") >= 0)
self.assertEqual(result_type, DataTypes.INT())
def test_list_view(self):
my_concat = udaf(ListViewConcatAggregateFunction())
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(my_concat(t.b, ',').alias("a"), t.c)
assert_frame_equal(result.to_pandas(),
pd.DataFrame([["Hi,Hi2,Hi,Hi3,Hi3", "hi"],
["Hi,Hi,Hi2,Hi2,Hi3", "Hello"]], columns=['a', 'c']))
def test_map_view(self):
my_count = udaf(CountDistinctAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "1")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.read-cache-size", "1")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.write-cache-size", "1")
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
result = table_with_retract_message.group_by(t.c).select(my_count(t.b).alias("a"), t.c)
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[2, "hello"],
[3, "hi"]], columns=['a', 'c']))
def test_data_view_clear(self):
my_count = udaf(CountDistinctAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "1")
t = self.t_env.from_elements(
[(2, 'hello', 'hello'),
(4, 'clear', 'hello'),
(6, 'hello2', 'hello'),
(8, 'hello', 'hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(my_count(t.b).alias("a"), t.c)
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[2, "hello"]], columns=['a', 'c']))
def test_map_view_iterate(self):
test_iterate = udaf(TestIterateAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().get_configuration().set_string(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().get_configuration().set_string(
"python.state.cache-size", "2")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.read-cache-size", "2")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.write-cache-size", "2")
self.t_env.get_config().get_configuration().set_string(
"python.map-state.iterate-response-batch-size", "2")
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
result = table_with_retract_message.group_by(t.c) \
.select(test_iterate(t.b).alias("a"), t.c) \
.select(col("a").get(0).alias("a"),
col("a").get(1).alias("b"),
col("a").get(2).alias("c"),
col("a").get(3).alias("d"),
t.c.alias("e"))
assert_frame_equal(
result.to_pandas(),
pd.DataFrame([
["hello,hello2", "1,3", 'hello:3,hello2:1', 2, "hello"],
["Hi,Hi2,Hi3", "1,2,3", "Hi:3,Hi2:2,Hi3:1", 3, "hi"]],
columns=['a', 'b', 'c', 'd', 'e']))
def test_distinct_and_filter(self):
self.t_env.create_temporary_system_function(
"concat",
ConcatAggregateFunction())
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
self.t_env.create_temporary_view("retract_table", table_with_retract_message)
result = self.t_env.sql_query(
"select concat(distinct b, '.') as a, "
"concat(distinct b, ',') filter (where c = 'hi') as b, "
"concat(distinct b, ',') filter (where c = 'hello') as c, "
"c as d "
"from retract_table group by c")
assert_frame_equal(result.to_pandas().sort_values(by='a').reset_index(drop=True),
pd.DataFrame([["Hi.Hi2.Hi3", "Hi,Hi2,Hi3", "", "hi"],
["hello.hello2", "", "hello,hello2", "hello"]],
columns=['a', 'b', 'c', 'd']))
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
adamgreenhall/scikit-learn
|
examples/cluster/plot_cluster_iris.py
|
350
|
2593
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
|
bsd-3-clause
|
scorelab/D4D---Drone-4-Dengue
|
d4d-data/detecting_water_retention_areas/tester.py
|
1
|
3176
|
import numpy as np
from skimage.transform import pyramid_gaussian
from imutils.object_detection import non_max_suppression
import imutils
from skimage.feature import hog
from sklearn.externals import joblib
import cv2
from config import *
from skimage import color
import matplotlib.pyplot as plt
import os
import glob
model_path = "./classifier"
def sliding_window(image, window_size, step_size):
for y in xrange(0, image.shape[0], step_size[1]):
for x in xrange(0, image.shape[1], step_size[0]):
yield (x, y, image[y: y + window_size[1], x: x + window_size[0]])
def detector(filename):
#Read the image
im = cv2.imread(filename)
im = imutils.resize(im, width = min(400, im.shape[1]))
min_wdw_sz = (100, 100)
step_size = (10, 10)
downscale = 1.25
#Load the classifier
clf = joblib.load(os.path.join(model_path, 'svm.model'))
#List to store the detections
detections = []
#The current scale of the image
scale = 0
#Downscale the image and iterate
for im_scaled in pyramid_gaussian(im, downscale = downscale):
#The list contains detections at the current scale
#If the width or height of the scaled image is less than
#the width or height of the window, then end the iterations.
if im_scaled.shape[0] < min_wdw_sz[1] or im_scaled.shape[1] < min_wdw_sz[0]:
break
for (x, y, im_window) in sliding_window(im_scaled, min_wdw_sz, step_size):
if im_window.shape[0] != min_wdw_sz[1] or im_window.shape[1] != min_wdw_sz[0]:
continue
im_window = color.rgb2gray(im_window)
#Calculate the HOG features
fd = hog(im_window, orientations, pixels_per_cell, cells_per_block, visualize, normalize)
fd = fd.reshape(1, -1)
pred = clf.predict(fd)
if pred == 1:
if clf.decision_function(fd) > 0.5:
detections.append((int(x * (downscale**scale)), int(y * (downscale**scale)), clf.decision_function(fd),
int(min_wdw_sz[0] * (downscale**scale)),
int(min_wdw_sz[1] * (downscale**scale))))
#Move the the next scale
scale += 1
#Display the results before performing NMS
clone = im.copy()
for (x_tl, y_tl, _, w, h) in detections:
#Draw the detections
cv2.rectangle(im, (x_tl, y_tl), (x_tl + w, y_tl + h), (0, 255, 0), thickness = 2)
rects = np.array([[x, y, x + w, y + h] for (x, y, _, w, h) in detections])
sc = [score[0] for (x, y, score, w, h) in detections]
print filename
sc = np.array(sc)
pick = non_max_suppression(rects, probs = sc, overlapThresh = 0.3)
for(xA, yA, xB, yB) in pick:
cv2.rectangle(clone, (xA, yA), (xB, yB), (0, 255, 0), 2)
plt.axis("off")
plt.imshow(cv2.cvtColor(clone, cv2.COLOR_BGR2RGB))
plt.title("Final Detections after applying NMS")
plt.show()
if __name__ == '__main__':
foldername = './test_image'
filenames = glob.glob(os.path.join(foldername, "*"))
for filename in filenames:
detector(filename)
|
apache-2.0
|
sjsj0101/backtestengine
|
backup/md-tick.py
|
1
|
30923
|
# # encoding: UTF-8
# """
# CTP的底层接口来自'HaiFeng'-PY-AT
# 简化封装采用VN.PY的结构
# """
# from engine.threadEventEngine import *
# from engine.eventType import *
# from ctp.ctp_struct import *
# from ctp.quote import Quote
# from ctp.trade import Trade
# import time
# import pandas as pd
# import datetime
# import random
# ########################################################################
# class MdApi:
# """
# Demo中的行情API封装
# 封装后所有数据自动推送到事件驱动引擎中,由其负责推送到各个监听该事件的回调函数上
#
# 对用户暴露的主动函数包括:
# 登陆 login
# 订阅合约 subscribe
# """
# #----------------------------------------------------------------------
# def __init__(self, eventEngine):
# """
# API对象的初始化函数
# """
# # 事件引擎,所有数据都推送到其中,再由事件引擎进行分发
# self.__eventEngine = eventEngine
# self.q = Quote()
#
# # 请求编号,由api负责管理
# self.__reqid = 0
#
# # 以下变量用于实现连接和重连后的自动登陆
# self.__userid = '008105'
# self.__password = '1'
# self.__brokerid = '9999'
#
# def login(self):
# api = self.q.CreateApi()
# spi = self.q.CreateSpi()
# self.q.RegisterSpi(spi)
# self.q.OnFrontConnected = self.onFrontConnected # 交易服务器登陆相应
# self.q.OnRspUserLogin = self.onRspUserLogin # 用户登陆
# self.q.OnFrontDisconnected = self.onFrontDisconnected
# self.q.OnRspError = self.onRspError
# self.q.OnRspSubMarketData = self.OnRspSubMarketData
# self.q.OnRtnDepthMarketData = self.onRtnDepthMarketData
#
# self.q.RegCB()
# self.q.RegisterFront('tcp://180.168.146.187:10010')
# self.q.Init()
# # self.q.Join()
#
# def put_log_event(self, log): # log事件注册
# event = Event(type=EVENT_LOG)
# event.dict['log'] = log
# self.__eventEngine.sendEvent(event)
#
# def onFrontConnected(self):
# """服务器连接"""
# print('---行情服务器连接成功')
# self.put_log_event('行情服务器连接成功')
# self.q.ReqUserLogin(BrokerID=self.__brokerid, UserID=self.__userid, Password=self.__password)
#
# def onFrontDisconnected(self, n):
# """服务器断开"""
# self.put_log_event('行情服务器连接断开')
#
# def onRspError(self, error, n, last):
# """错误回报"""
# log = '行情错误回报,错误代码:' + str(error.__dict___['ErrorID']) + '错误信息:' + + str(error.__dict___['ErrorMsg'])
# self.put_log_event(log)
#
# def onRspUserLogin(self, data, error, n, last):
# """登陆回报"""
# print('---行情服务器登陆成功')
# if error.__dict__['ErrorID'] == 0:
# log = '行情服务器登陆成功'
# else:
# log = '登陆回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(error.__dict__['ErrorMsg'])
# self.put_log_event(log)
#
# def OnRspSubMarketData(self, data, info, n, last):
# pass
#
# def onRtnDepthMarketData(self, data):
# """行情推送"""
# # 特定合约行情事件
# event2 = Event(type=(EVENT_MARKETDATA_CONTRACT + data.__dict__['InstrumentID']))
# event2.dict['data'] = data.__dict__
# self.__eventEngine.put(event2)
#
# # ----------------------------------------------------------------------
# def subscribe(self, instrumentid):
# """订阅合约"""
# self.q.SubscribeMarketData(pInstrumentID=instrumentid)
#
# def unsubscribe(self, instrumentid):
# """退订合约"""
# self.q.UnSubscribeMarketData(pInstrumentID=instrumentid)
#
# ########################################################################
# class TdApi:
# """
# Demo中的交易API封装
# 主动函数包括:
# login 登陆
# getInstrument 查询合约信息
# getAccount 查询账号资金
# getInvestor 查询投资者
# getPosition 查询持仓
# sendOrder 发单
# cancelOrder 撤单
# """
#
# #----------------------------------------------------------------------
# def __init__(self, eventEngine):
# """API对象的初始化函数"""
# # 事件引擎,所有数据都推送到其中,再由事件引擎进行分发
# self.__eventEngine = eventEngine
# self.t = Trade()
#
# # 请求编号,由api负责管理
# self.__reqid = 0
#
# # 报单编号,由api负责管理
# self.__orderref = random.randrange(start=1000,stop=9000,step=random.randint(10,100) )
#
# # 以下变量用于实现连接和重连后的自动登陆
# self.__userid = '008105'
# self.__password = '1'
# self.__brokerid = '9999'
#
# def login(self):
# api = self.t.CreateApi()
# spi = self.t.CreateSpi()
# self.t.RegisterSpi(spi)
# self.t.OnFrontConnected = self.onFrontConnected # 交易服务器登陆相应
# self.t.OnRspUserLogin = self.onRspUserLogin # 用户登陆
# self.t.OnRtnInstrumentStatus = self.OnRtnInstrumentStatus
# self.t.OnRspSettlementInfoConfirm = self.onRspSettlementInfoConfirm # 结算单确认
# self.t.OnRspQryInstrument = self.onRspQryInstrument # 查询全部交易合约
# self.t.OnRspQryDepthMarketData = self.onRspQryDepthMarketData # tick截面数据
# self.t.OnRspQryInvestorPosition = self.onRspQryInvestorPosition#查询持仓
# self.t.OnRspQryTradingAccount = self.onRspQryTradingAccount#查询账户
# self.t.OnRtnOrder = self.onRtnOrder#报单
# self.t.OnRtnTrade = self.onRtnTrade#成交
# #——————错误事件
# self.t.OnRspOrderInsert = self.onRspOrderInsert
# self.t.OnRspOrderAction =self.onRspOrderAction
# self.t.OnRspError = self.onRspError
#
# self.t.RegCB()
# self.t.RegisterFront('tcp://180.168.146.187:10000')
# self.t.Init()
# # self.t.Join()
#
# def put_log_event(self, log): # log事件注册
# event = Event(type=EVENT_LOG)
# event.dict['log'] = log
# self.__eventEngine.sendEvent(event)
#
# def onFrontConnected(self):
# """服务器连接"""
# print('---交易服务器连接成功')
# self.put_log_event('交易服务器连接成功')
# self.t.ReqUserLogin(BrokerID=self.__brokerid, UserID=self.__userid, Password=self.__password)
#
# def OnRtnInstrumentStatus(self, data):
# pass
#
# def onRspUserLogin(self, data, error, n, last):
# """登陆回报"""
# print('---交易服务器登陆成功')
# if error.__dict__['ErrorID'] == 0:
# self.Investor = data.__dict__['UserID']
# self.BrokerID = data.__dict__['BrokerID']
# log = data.__dict__['UserID'] + '交易服务器登陆成功'
# self.t.ReqSettlementInfoConfirm(self.BrokerID, self.Investor) # 对账单确认
# else:
# log = '登陆回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(error.__dict__['ErrorMsg'])
# self.put_log_event(log)
#
# def onRspSettlementInfoConfirm(self, data, error, n, last):
# """确认结算信息回报"""
# log = '结算信息确认完成'
# self.put_log_event(log)
# time.sleep(1)
# self.getInstrument() # 查询合约资料
#
# def onRspQryInstrument(self, data, error, n, last):
# """
# 合约查询回报
# 由于该回报的推送速度极快,因此不适合全部存入队列中处理,
# 选择先储存在一个本地字典中,全部收集完毕后再推送到队列中
# (由于耗时过长目前使用其他进程读取)
# """
# if error.__dict__['ErrorID'] == 0:
# event = Event(type=EVENT_INSTRUMENT)
# event.dict['data'] = data.__dict__
# event.dict['last'] = last
# self.__eventEngine.sendEvent(event)
# if last == True:
# time.sleep(2)
# self.t.ReqQryDepthMarketData() # 查询合约截面数据
# else:
# log = '合约投资者回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(error.__dict__['ErrorMsg'])
# self.put_log_event(log)
#
# def onRspQryDepthMarketData(self, data, error, n, last):
# # 常规行情事件
# event = Event(type=EVENT_MARKETDATA)
# event.dict['data'] = data.__dict__
# event.dict['last'] = last
# self.__eventEngine.sendEvent(event)
#
# def onRspQryInvestorPosition(self, data, error, n, last):
# """持仓查询回报"""
# if error.__dict__['ErrorID'] == 0:
# event = Event(type=EVENT_POSITION)
# event.dict['data'] = data.__dict__
# event.dict['last'] = last
# self.__eventEngine.sendEvent(event)
# else:
# log = ('持仓查询回报,错误代码:' +str(error.__dict__['ErrorID']) + ', 错误信息:' +str(error.__dict__['ErrorMsg']))
# self.put_log_event(log)
#
# # ----------------------------------------------------------------------
# def onRspQryTradingAccount(self, data, error, n, last):
# """资金账户查询回报"""
# if error.__dict__['ErrorID'] == 0:
# event = Event(type=EVENT_ACCOUNT)
# event.dict['data'] = data.__dict__
# self.__eventEngine.sendEvent(event)
# else:
# log = ('账户查询回报,错误代码:' +str(error.__dict__['ErrorID']) + ', 错误信息:' +str(error.__dict__['ErrorMsg']))
# self.put_log_event(log)
#
# def onRtnTrade(self, data):
# """成交回报"""
# # 常规成交事件
# event1 = Event(type=EVENT_TRADE)
# event1.dict['data'] = data.__dict__
# self.__eventEngine.put(event1)
#
# def onRtnOrder(self, data):
# """报单回报"""
# # 更新最大报单编号
# newref = data.__dict__['OrderRef']
# self.__orderref = max(self.__orderref, int(newref))
# # 常规报单事件
# event1 = Event(type=EVENT_ORDER)
# event1.dict['data'] = data.__dict__
# self.__eventEngine.put(event1)
#
# def onRspOrderInsert(self, data, error, n, last):
# """发单错误(柜台)"""
# log = data.__dict__['InstrumentID'] + ' 发单错误回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(
# error.__dict__['ErrorMsg'])
# self.put_log_event(log)
#
# def onErrRtnOrderInsert(self, data, error):
# """发单错误回报(交易所)"""
# log = data.__dict__['InstrumentID'] + '发单错误回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(
# error.__dict__['ErrorMsg'])
# self.put_log_event(log)
#
# def onRspError(self, error, n, last):
# """错误回报"""
# log = '交易错误回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(error.__dict__['ErrorMsg'])
# self.put_log_event(log)
# # ----------------------------------------------------------------------
# def onRspOrderAction(self, data, error, n, last):
# """撤单错误(柜台)"""
# log = '撤单错误回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(error.__dict__['ErrorMsg'])
# self.put_log_event(log)
# # ----------------------------------------------------------------------
# def onErrRtnOrderAction(self, data, error):
# """撤单错误回报(交易所)"""
# event = Event(type=EVENT_LOG)
# log = data['合约代码'] + ' 撤单错误回报,错误代码:' + str(error.__dict__['ErrorID']) + ', 错误信息:' + str(
# error.__dict__['ErrorMsg'])
# event.dict['log'] = log
# self.__eventEngine.sendEvent(event)
#
# def getInstrument(self):
# """查询合约"""
# self.__reqid = self.__reqid + 1
# self.t.ReqQryInstrument()
# def getAccount(self):
# """查询账户"""
# self.__reqid = self.__reqid + 1
# self.t.ReqQryTradingAccount(self.__brokerid , self.__userid )
# # ----------------------------------------------------------------------
# def getPosition(self):
# """查询持仓"""
# self.__reqid = self.__reqid + 1
# self.t.ReqQryInvestorPosition(self.__brokerid , self.__userid )
#
# def sendorder(self, instrumentid, price, vol, direction, offset):
# """发单"""
# self.__reqid = self.__reqid + 1
# self.__orderref = self.__orderref + 1
# # 限价
# self.t.ReqOrderInsert(BrokerID=self.__brokerid,
# InvestorID=self.__userid,
# InstrumentID=instrumentid,
# OrderRef='{0:>12}'.format(self.__orderref),
# UserID=self.__userid,
# OrderPriceType=OrderPriceTypeType.LimitPrice,
# Direction=direction,
# CombOffsetFlag=offset,
# CombHedgeFlag=HedgeFlagType.Speculation.__char__(),
# LimitPrice=price,
# VolumeTotalOriginal=vol,
# TimeCondition=TimeConditionType.GFD,
# VolumeCondition=VolumeConditionType.AV,
# MinVolume=1,
# ForceCloseReason=ForceCloseReasonType.NotForceClose,
# ContingentCondition=ContingentConditionType.Immediately)
# return self.__orderref
# # 返回订单号,便于某些算法进行动态管理
# # OrderPriceType--LimitPrice 限价单
# # CombHedgeFlag--投机套保标记,默认投机单Speculation
# # TimeConditionType是一个有效期类型类型#当日有效--GFD
# # VolumeConditionType是一个成交量类型类型#任何数量--VolumeConditionType.AV
# # ContingentConditionType是一个触发条件类型,#立即ContingentConditionType.Immediately
#
# def buy(self, symbol, price, vol): # 买开多开
# direction = DirectionType.Buy
# offset = OffsetFlagType.Open.__char__()
# self.sendorder(symbol, price, vol, direction, offset)
#
# def sell(self, symbol, price, vol): # 多平
# direction = DirectionType.Sell
# offset = OffsetFlagType.Close.__char__()
# self.sendorder(symbol, price, vol, direction, offset)
#
# def selltoday(self, symbol, price, vol): # 平今多
# direction = DirectionType.Sell
# offset = OffsetFlagType.CloseToday.__char__()
# self.sendorder(symbol, price, vol, direction, offset)
#
# def short(self, symbol, price, vol): # 卖开空开
# direction = DirectionType.Sell
# offset = OffsetFlagType.Open.__char__()
# self.sendorder(symbol, price, vol, direction, offset)
#
# def cover(self, symbol, price, vol): # 空平
# direction = DirectionType.Buy
# offset = OffsetFlagType.Close.__char__()
# self.sendorder(symbol, price, vol, direction, offset)
#
# def covertoday(self, symbol, price, vol): # 平今空
# direction = DirectionType.Buy
# offset = OffsetFlagType.CloseToday.__char__()
# self.sendorder(symbol, price, vol, direction, offset)
#
# # ----------------------------------------------------------------------
# def cancelOrder(self, order):
# """撤单"""
# # print(order)
# self.__reqid = self.__reqid + 1
# self.t.ReqOrderAction(BrokerID=self.__brokerid,
# InvestorID=self.__userid,
# OrderRef=order['本地报单编号'],
# FrontID=int(order['前置编号']),
# SessionID=int(order['会话编号']),
# OrderSysID=order['报单编号'],
# ActionFlag=ActionFlagType.Delete,
# ExchangeID=order["交易所代码"],
# InstrumentID=order['合约代码'])
#
#
#
# ########################################################################
# class MainEngine:
# """主引擎,负责对API的调度"""
#
# #----------------------------------------------------------------------
# def __init__(self):
# """Constructor"""
# self.ee = EventEngine() # 创建事件驱动引擎
# self.md = MdApi(self.ee) # 创建API接口
# self.td = TdApi(self.ee)
# # self.ee.start() # 启动事件驱动引擎
# # self.ee.register(EVENT_LOG, self.p_log) # 打印测试
# # self.ee.register(EVENT_INSTRUMENT, self.insertInstrument)
# # self.list_instrument = []#保存合约资料
# # self.ee.register(EVENT_MARKETDATA, self.insertMarketData)
# # self.list_marketdata = []#保存合约资料
# # # 循环查询持仓和账户相关
# # self.countGet = 0 # 查询延时计数
# # self.lastGet = 'Position' # 上次查询的性质,先查询账户
# # #持仓和账户
# # #self.ee.register (EVENT_ACCOUNT,self.account)
# # #self.ee.register (EVENT_POSITION,self.position)
# # #持仓和账户数据
# # self.dictaccount ={}
# # self.dictposition ={}
# # # 委托事件
# # self.ee.register (EVENT_ORDER,self.order)
# # self.dictorder={}#报单数据
# # # 成交事件
# # self.ee.register (EVENT_TRADE,self.trader)
# # self.dicttrade={}#成交数据
# # #注册TICK行情
# # self.ee.register(EVENT_MARKETDATA_CONTRACT + 'rb1705', self.deepdata)
#
# #----------------------------------------------------------------------
# def login(self):
# """登陆"""
# self.md.login()
# self.td.login()
# def p_log(self,event):
# print(event.dict['log'])
# def deepdata(self,event):
# data = self.DepthMarketDataField(event.dict['data'])
# print(data)
#
# def DepthMarketDataField(self, var):
# tmp = {}
# tmp["交易日"] = var["TradingDay"]
# tmp["合约代码"] = var["InstrumentID"]
# tmp["交易所代码"] = var["ExchangeID"]
# tmp["最新价"] = var["LastPrice"]
# tmp["昨收盘"] = var["PreClosePrice"]
# tmp["昨持仓量"] = var["PreOpenInterest"]
# tmp["今开盘"] = var["OpenPrice"]
# tmp["最高价"] = var["HighestPrice"]
# tmp["最低价"] = var["LowestPrice"]
# tmp["成交量"] = var["Volume"]
# tmp["成交金额"] = var["Turnover"]
# tmp["持仓量"] = var["OpenInterest"]
# tmp["今收盘"] = var["ClosePrice"]
# tmp["本次结算价"] = var["SettlementPrice"]
# tmp["时间"] = var["UpdateTime"]
# tmp["申买价一"] = var["BidPrice1"]
# tmp["申买量一"] = var["BidVolume1"]
# tmp["申卖价一"] = var["AskPrice1"]
# tmp["申卖量一"] = var["AskVolume1"]
# tmp["当日均价"] = var["AveragePrice"]
# return tmp
#
#
# def insertInstrument(self, event):
# """插入合约对象"""
# data = event.dict['data']
# last = event.dict['last']
# self.list_instrument.append(data)
# if last:#最后一条数据
# # 将查询完成的合约信息保存到本地文件,今日登录可直接使用不再查询
# event = Event(type=EVENT_LOG)
# log = '合约信息查询完成'
# event.dict['log'] = log
# self.ee.sendEvent(event)
# ret = pd.DataFrame(self.list_instrument)
# ret = ret.set_index('InstrumentID')
# ret.to_pickle('Instrument')
# event = Event(type=EVENT_LOG)
# log = '合约信息已经保存'
# event.dict['log'] = log
# self.ee.sendEvent(event)
# #print(ret)
#
# def insertMarketData(self, event):
# data = event.dict['data']
# last = event.dict['last']
# self.list_marketdata.append(data)
# if last:
# # 将查询完成的合约信息保存到本地文件,今日登录可直接使用不再查询
# event = Event(type=EVENT_LOG)
# log = '合约截面数据查询完成'
# event.dict['log'] = log
# self.ee.sendEvent(event)
# ret = pd.DataFrame(self.list_marketdata)
# ret = ret.set_index('InstrumentID')
# ret.to_pickle('MarketData')
# event = Event(type=EVENT_LOG)
# log = '合约截面数据已经保存'
# event.dict['log'] = log
# self.ee.sendEvent(event)
# self._zhuli(ret)#计算主力合约
#
# def _zhuli(self,ret):#计算主力合约,在下单的时候要用,涨跌停板价是市价下单用
# Instrument = pd.read_pickle('Instrument')
# Marketdata = ret#从 insertMarketData函数传递过来的数据
# id_list =['hc','bu','zn','ru','al','cu','rb','ni','sn','p','pp','jd','i','jm','v','l','y','c','m','j','cs','ZC','FG','MA','CF','RM','TA','SR']
# zhuli = []
# for ID in id_list :
# var_I = Instrument.loc[Instrument['ProductID'] == ID]#按合约简称索引
# #print(var_I )
# var_M =[]
# for index in var_I.index:
# var_M.append(Marketdata.ix[index])
# var_M = pd.DataFrame(var_M)#搞到TICK截面
# #print(var_M )
# var_M = var_M.sort_values(by='OpenInterest',ascending= False )#持仓降序索引,以持仓为基准,不以成交量为基准
# #print(var_M )
# index_1 =var_M.index[0]#连一代码
# index_2 =var_M.index[1]#连二代码
# zlinfo = {}
# zlinfo['合约简称'] =ID
# zlinfo["合约名称"] =var_I.ix[ index_1]["InstrumentName"]
# zlinfo['合约代码'] = index_1
# zlinfo['市场代码'] = var_I.ix[ index_1]['ExchangeID']
# zlinfo['合约乘数'] = var_I.ix[ index_1]['VolumeMultiple']
# zlinfo['合约跳价'] =var_I.ix[ index_1]['PriceTick']
# zlinfo['涨停板价'] = var_M.ix[ index_1]['UpperLimitPrice']
# zlinfo['跌停板价'] = var_M.ix[ index_1]['LowerLimitPrice']
# zlinfo['主力持仓'] = var_M.ix[ index_1]['OpenInterest']
# zlinfo['次月合约'] = index_2
# zlinfo['次月持仓'] = var_M.ix[ index_2]['OpenInterest']
# zlinfo['次月涨停'] = var_M.ix[ index_2]['UpperLimitPrice']
# zlinfo['次月跌停'] = var_M.ix[ index_2]['LowerLimitPrice']
# #print(zlinfo)
# zhuli.append(zlinfo)
# zhuli = pd.DataFrame(zhuli)
# zhuli.to_pickle('zl')
# #print(zhuli)
# self.list_instrument=[]
# self.list_marketdata=[]#清空数据,没有用了
# #log事件
# event = Event(type=EVENT_LOG)
# log = '主力合约已经保存'
# event.dict['log'] = log
# self.ee.sendEvent(event)
# ##推送主力合约数据,修改py_ctp目录下eventType ,增加EVENT_PRODUCT事件。暂时用不上。后面要用到。
# event = Event(type=EVENT_PRODUCT)
# event.dict['data'] = zhuli
# self.ee.sendEvent(event)
# # 开始循环查询
# self.ee.register(EVENT_TIMER, self.getAccountPosition)#定时器事件
# self.md.subscribe('rb1705')
#
# def account(self,event):#处理账户事件数据
# self.dictaccount = self.TradingAccountField(event.dict['data'])
# print(self.dictaccount)
# def TradingAccountField(self,var):
# tmp = {}
# tmp["投资者帐号"] = var["AccountID"]
# tmp["静态权益"] = var["PreBalance"]
# tmp["上次存款额"] = var["PreDeposit"]
# tmp["入金金额"] = var["Deposit"]
# tmp["出金金额"] = var["Withdraw"]
# tmp["冻结的保证金"] = var["FrozenMargin"]
# tmp["当前保证金总额"] = var["CurrMargin"]
# tmp["手续费"] = var["Commission"]
# tmp["平仓盈亏"] = var["CloseProfit"]
# tmp["持仓盈亏"] = var["PositionProfit"]
# tmp["动态权益"] = var["Balance"]
# tmp["可用资金"] = var["Available"]
# tmp["可取资金"] = var["WithdrawQuota"]
# tmp["交易日"] = var["TradingDay"]
# tmp["时间"] =datetime.datetime.now()
# return tmp
# def position(self, event):#处理持仓事件数据
# data = self.InvestorPositionField(event.dict['data'])
# last = event.dict['last']
# index = data['合约代码'] + '.' + data['持仓多空方向']
# #理论上很少有锁仓
# self.dictposition[index] =data
# if last == True :
# for key in self.dictposition.keys():
# print (self.dictposition[key] )
#
#
#
#
# def InvestorPositionField(self,var):
# tmp={}
# tmp["合约代码"]=var["InstrumentID"]
# tmp["持仓多空方向"]=var["PosiDirection"]
# tmp["持仓日期"]=var["PositionDate"]
# tmp["上日持仓"]=var["YdPosition"]
# tmp["持仓总数"]=var["Position"]
# tmp["多头冻结"]=var["LongFrozen"]
# tmp["空头冻结"]=var["ShortFrozen"]
# tmp["开仓量"]=var["OpenVolume"]
# tmp["平仓量"]=var["CloseVolume"]
# tmp["开仓金额"]=var["OpenAmount"]
# tmp["平仓金额"]=var["CloseAmount"]
# tmp["持仓成本"]=var["PositionCost"]
# tmp["平仓盈亏"]=var["CloseProfit"]
# tmp["持仓盈亏"]=var["PositionProfit"]
# tmp["上次结算价"]=var["PreSettlementPrice"]
# tmp["本次结算价"]=var["SettlementPrice"]
# tmp["交易日"]=var["TradingDay"]
# tmp["开仓成本"]=var["OpenCost"]
# tmp["今日持仓"]=var["TodayPosition"]
# return tmp
#
#
# def order(self, event):
# data = self.OrderField( event.dict['data'])
# index =data["报单引用"]#OrderRef
# if index not in self.dictorder.keys():
# self.dictorder[index] = data
# else:
# self.dictorder[index] = data
# print('order:',self.dictorder[index])
#
#
# def OrderField(self, var):
# tmp = {}
# tmp["合约代码"] = var["InstrumentID"]
# tmp["交易所代码"] = var["ExchangeID"]
# tmp["报单引用"] = var["OrderRef"]
# tmp["买卖方向"] = var["Direction"]
# tmp["组合开平标志"] = var["CombOffsetFlag"]
# tmp["价格"] = var["LimitPrice"]
# tmp["数量"] = var["VolumeTotalOriginal"]
# tmp["请求编号"] = var["RequestID"]
# tmp["本地报单编号"] = var["OrderLocalID"]
# tmp["报单编号"] = var["OrderSysID"]
# tmp["今成交数量"] = var["VolumeTraded"]
# tmp["剩余数量"] = var["VolumeTotal"]
# tmp["报单日期"] = var["InsertDate"]
# tmp["委托时间"] = var["InsertTime"]
# tmp["前置编号"] = var["FrontID"]
# tmp["会话编号"] = var["SessionID"]
# tmp["状态信息"] = var["StatusMsg"]
# tmp["序号"] = var["SequenceNo"]
# return tmp
#
# def trader(self, event):
# data = self.TradeField(event.dict['data'])
# index = data["报单引用"] # OrderRef
# if index not in self.dicttrade.keys():
# self.dicttrade[index] = data
# print('trade',data)
#
# def TradeField(self, var):
# tmp = {}
# tmp["合约代码"] = var["InstrumentID"]
# tmp["报单引用"] = var["OrderRef"]
# tmp["交易所代码"] = var["ExchangeID"]
# tmp["成交编号"] = var["TradeID"]
# tmp["买卖方向"] = var["Direction"]
# tmp["报单编号"] = var["OrderSysID"]
# tmp["合约在交易所的代码"] = var["ExchangeInstID"]
# tmp["开平标志"] = var["OffsetFlag"]
# tmp["价格"] = var["Price"]
# tmp["数量"] = var["Volume"]
# tmp["成交时期"] = var["TradeDate"]
# tmp["成交时间"] = var["TradeTime"]
# tmp["本地报单编号"] = var["OrderLocalID"]
# tmp["交易日"] = var["TradingDay"]
# return tmp
#
# def getAccountPosition(self, event):
# """循环查询账户和持仓"""
# self.countGet = self.countGet + 1
# # 每5秒发一次查询
# if self.countGet > 5:
# self.countGet = 0 # 清空计数
#
# if self.lastGet == 'Account':
# self.getPosition()
# self.lastGet = 'Position'
# else:
# self.getAccount()
# self.lastGet = 'Account'
# def getAccount(self):
# """查询账户"""
# self.td.getAccount()
# # ----------------------------------------------------------------------
# def getPosition(self):
# """查询持仓"""
# self.td.getPosition()
#
# def buy(self, symbol, price, vol): # 买开多开
# self.td.buy(symbol, price, vol)
#
# def sell(self, symbol, price, vol): # 多平
# self.td.sell(symbol, price, vol)
#
# def selltoday(self, symbol, price, vol): # 平今多
#
# self.td.selltoday(symbol, price, vol)
#
# def short(self, symbol, price, vol): # 卖开空开
#
# self.td.short(symbol, price, vol)
#
# def cover(self, symbol, price, vol): # 空平
#
# self.td.cover(symbol, price, vol)
#
# def covertoday(self, symbol, price, vol): # 平今空
#
# self.td.covertoday(symbol, price, vol)
#
# def cancelOrder(self, order):#撤单
#
# self.td.cancelOrder(order)
#
#
#
# # 直接运行脚本可以进行测试
# if __name__ == '__main__':
# import sys
# # from PyQt5.QtCore import QCoreApplication
# # app = QCoreApplication(sys.argv)
# main = MainEngine()
# main.login()
# # app.exec_()
|
apache-2.0
|
louisLouL/pair_trading
|
capstone_env/lib/python3.6/site-packages/matplotlib/backends/backend_qt5.py
|
2
|
31226
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import functools
import os
import re
import signal
import sys
from six import unichr
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
TimerBase, cursors)
import matplotlib.backends.qt_editor.figureoptions as figureoptions
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
from matplotlib.figure import Figure
from .qt_compat import (
QtCore, QtGui, QtWidgets, _getSaveFileName, is_pyqt5, __version__, QT_API)
backend_version = __version__
# SPECIAL_KEYS are keys that do *not* return their unicode name
# instead they have manually specified names
SPECIAL_KEYS = {QtCore.Qt.Key_Control: 'control',
QtCore.Qt.Key_Shift: 'shift',
QtCore.Qt.Key_Alt: 'alt',
QtCore.Qt.Key_Meta: 'super',
QtCore.Qt.Key_Return: 'enter',
QtCore.Qt.Key_Left: 'left',
QtCore.Qt.Key_Up: 'up',
QtCore.Qt.Key_Right: 'right',
QtCore.Qt.Key_Down: 'down',
QtCore.Qt.Key_Escape: 'escape',
QtCore.Qt.Key_F1: 'f1',
QtCore.Qt.Key_F2: 'f2',
QtCore.Qt.Key_F3: 'f3',
QtCore.Qt.Key_F4: 'f4',
QtCore.Qt.Key_F5: 'f5',
QtCore.Qt.Key_F6: 'f6',
QtCore.Qt.Key_F7: 'f7',
QtCore.Qt.Key_F8: 'f8',
QtCore.Qt.Key_F9: 'f9',
QtCore.Qt.Key_F10: 'f10',
QtCore.Qt.Key_F11: 'f11',
QtCore.Qt.Key_F12: 'f12',
QtCore.Qt.Key_Home: 'home',
QtCore.Qt.Key_End: 'end',
QtCore.Qt.Key_PageUp: 'pageup',
QtCore.Qt.Key_PageDown: 'pagedown',
QtCore.Qt.Key_Tab: 'tab',
QtCore.Qt.Key_Backspace: 'backspace',
QtCore.Qt.Key_Enter: 'enter',
QtCore.Qt.Key_Insert: 'insert',
QtCore.Qt.Key_Delete: 'delete',
QtCore.Qt.Key_Pause: 'pause',
QtCore.Qt.Key_SysReq: 'sysreq',
QtCore.Qt.Key_Clear: 'clear', }
# define which modifier keys are collected on keyboard events.
# elements are (mpl names, Modifier Flag, Qt Key) tuples
SUPER = 0
ALT = 1
CTRL = 2
SHIFT = 3
MODIFIER_KEYS = [('super', QtCore.Qt.MetaModifier, QtCore.Qt.Key_Meta),
('alt', QtCore.Qt.AltModifier, QtCore.Qt.Key_Alt),
('ctrl', QtCore.Qt.ControlModifier, QtCore.Qt.Key_Control),
('shift', QtCore.Qt.ShiftModifier, QtCore.Qt.Key_Shift),
]
if sys.platform == 'darwin':
# in OSX, the control and super (aka cmd/apple) keys are switched, so
# switch them back.
SPECIAL_KEYS.update({QtCore.Qt.Key_Control: 'super', # cmd/apple key
QtCore.Qt.Key_Meta: 'control',
})
MODIFIER_KEYS[0] = ('super', QtCore.Qt.ControlModifier,
QtCore.Qt.Key_Control)
MODIFIER_KEYS[2] = ('ctrl', QtCore.Qt.MetaModifier,
QtCore.Qt.Key_Meta)
cursord = {
cursors.MOVE: QtCore.Qt.SizeAllCursor,
cursors.HAND: QtCore.Qt.PointingHandCursor,
cursors.POINTER: QtCore.Qt.ArrowCursor,
cursors.SELECT_REGION: QtCore.Qt.CrossCursor,
cursors.WAIT: QtCore.Qt.WaitCursor,
}
# make place holder
qApp = None
def _create_qApp():
"""
Only one qApp can exist at a time, so check before creating one.
"""
global qApp
if qApp is None:
app = QtWidgets.QApplication.instance()
if app is None:
# check for DISPLAY env variable on X11 build of Qt
if is_pyqt5():
try:
from PyQt5 import QtX11Extras
is_x11_build = True
except ImportError:
is_x11_build = False
else:
is_x11_build = hasattr(QtGui, "QX11Info")
if is_x11_build:
display = os.environ.get('DISPLAY')
if display is None or not re.search(r':\d', display):
raise RuntimeError('Invalid DISPLAY variable')
qApp = QtWidgets.QApplication([b"matplotlib"])
qApp.lastWindowClosed.connect(qApp.quit)
else:
qApp = app
if is_pyqt5():
try:
qApp.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps)
qApp.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling)
except AttributeError:
pass
def _allow_super_init(__init__):
"""
Decorator for ``__init__`` to allow ``super().__init__`` on PyQt4/PySide2.
"""
if QT_API == "PyQt5":
return __init__
else:
# To work around lack of cooperative inheritance in PyQt4, PySide,
# and PySide2, when calling FigureCanvasQT.__init__, we temporarily
# patch QWidget.__init__ by a cooperative version, that first calls
# QWidget.__init__ with no additional arguments, and then finds the
# next class in the MRO with an __init__ that does support cooperative
# inheritance (i.e., not defined by the PyQt4, PySide, PySide2, sip
# or Shiboken packages), and manually call its `__init__`, once again
# passing the additional arguments.
qwidget_init = QtWidgets.QWidget.__init__
def cooperative_qwidget_init(self, *args, **kwargs):
qwidget_init(self)
mro = type(self).__mro__
next_coop_init = next(
cls for cls in mro[mro.index(QtWidgets.QWidget) + 1:]
if cls.__module__.split(".")[0] not in [
"PyQt4", "sip", "PySide", "PySide2", "Shiboken"])
next_coop_init.__init__(self, *args, **kwargs)
@functools.wraps(__init__)
def wrapper(self, **kwargs):
try:
QtWidgets.QWidget.__init__ = cooperative_qwidget_init
__init__(self, **kwargs)
finally:
# Restore __init__
QtWidgets.QWidget.__init__ = qwidget_init
return wrapper
class TimerQT(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses Qt timer events.
Attributes
----------
interval : int
The time between timer events in milliseconds. Default is 1000 ms.
single_shot : bool
Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
callbacks : list
Stores list of (func, args) tuples that will be called upon timer
events. This list can be manipulated directly, or the functions
`add_callback` and `remove_callback` can be used.
'''
def __init__(self, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timeout() signal to the
# _on_timer method.
self._timer = QtCore.QTimer()
self._timer.timeout.connect(self._on_timer)
self._timer_set_interval()
def _timer_set_single_shot(self):
self._timer.setSingleShot(self._single)
def _timer_set_interval(self):
self._timer.setInterval(self._interval)
def _timer_start(self):
self._timer.start()
def _timer_stop(self):
self._timer.stop()
class FigureCanvasQT(QtWidgets.QWidget, FigureCanvasBase):
# map Qt button codes to MouseEvent's ones:
buttond = {QtCore.Qt.LeftButton: 1,
QtCore.Qt.MidButton: 2,
QtCore.Qt.RightButton: 3,
# QtCore.Qt.XButton1: None,
# QtCore.Qt.XButton2: None,
}
def _update_figure_dpi(self):
dpi = self._dpi_ratio * self.figure._original_dpi
self.figure._set_dpi(dpi, forward=False)
@_allow_super_init
def __init__(self, figure):
_create_qApp()
figure._original_dpi = figure.dpi
super(FigureCanvasQT, self).__init__(figure=figure)
self.figure = figure
self._update_figure_dpi()
w, h = self.get_width_height()
self.resize(w, h)
self.setMouseTracking(True)
# Key auto-repeat enabled by default
self._keyautorepeat = True
# In cases with mixed resolution displays, we need to be careful if the
# dpi_ratio changes - in this case we need to resize the canvas
# accordingly. We could watch for screenChanged events from Qt, but
# the issue is that we can't guarantee this will be emitted *before*
# the first paintEvent for the canvas, so instead we keep track of the
# dpi_ratio value here and in paintEvent we resize the canvas if
# needed.
self._dpi_ratio_prev = None
@property
def _dpi_ratio(self):
# Not available on Qt4 or some older Qt5.
try:
return self.devicePixelRatio()
except AttributeError:
return 1
def get_width_height(self):
w, h = FigureCanvasBase.get_width_height(self)
return int(w / self._dpi_ratio), int(h / self._dpi_ratio)
def enterEvent(self, event):
FigureCanvasBase.enter_notify_event(self, guiEvent=event)
def leaveEvent(self, event):
QtWidgets.QApplication.restoreOverrideCursor()
FigureCanvasBase.leave_notify_event(self, guiEvent=event)
def mouseEventCoords(self, pos):
"""Calculate mouse coordinates in physical pixels
Qt5 use logical pixels, but the figure is scaled to physical
pixels for rendering. Transform to physical pixels so that
all of the down-stream transforms work as expected.
Also, the origin is different and needs to be corrected.
"""
dpi_ratio = self._dpi_ratio
x = pos.x()
# flip y so y=0 is bottom of canvas
y = self.figure.bbox.height / dpi_ratio - pos.y()
return x * dpi_ratio, y * dpi_ratio
def mousePressEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y, button,
guiEvent=event)
def mouseDoubleClickEvent(self, event):
x, y = self.mouseEventCoords(event.pos())
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_press_event(self, x, y,
button, dblclick=True,
guiEvent=event)
def mouseMoveEvent(self, event):
x, y = self.mouseEventCoords(event)
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=event)
def mouseReleaseEvent(self, event):
x, y = self.mouseEventCoords(event)
button = self.buttond.get(event.button())
if button is not None:
FigureCanvasBase.button_release_event(self, x, y, button,
guiEvent=event)
def wheelEvent(self, event):
x, y = self.mouseEventCoords(event)
# from QWheelEvent::delta doc
if event.pixelDelta().x() == 0 and event.pixelDelta().y() == 0:
steps = event.angleDelta().y() / 120
else:
steps = event.pixelDelta().y()
if steps:
FigureCanvasBase.scroll_event(self, x, y, steps, guiEvent=event)
def keyPressEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_press_event(self, key, guiEvent=event)
def keyReleaseEvent(self, event):
key = self._get_key(event)
if key is not None:
FigureCanvasBase.key_release_event(self, key, guiEvent=event)
@property
def keyAutoRepeat(self):
"""
If True, enable auto-repeat for key events.
"""
return self._keyautorepeat
@keyAutoRepeat.setter
def keyAutoRepeat(self, val):
self._keyautorepeat = bool(val)
def resizeEvent(self, event):
# _dpi_ratio_prev will be set the first time the canvas is painted, and
# the rendered buffer is useless before anyways.
if self._dpi_ratio_prev is None:
return
w = event.size().width() * self._dpi_ratio
h = event.size().height() * self._dpi_ratio
dpival = self.figure.dpi
winch = w / dpival
hinch = h / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# pass back into Qt to let it finish
QtWidgets.QWidget.resizeEvent(self, event)
# emit our resize events
FigureCanvasBase.resize_event(self)
def sizeHint(self):
w, h = self.get_width_height()
return QtCore.QSize(w, h)
def minumumSizeHint(self):
return QtCore.QSize(10, 10)
def _get_key(self, event):
if not self._keyautorepeat and event.isAutoRepeat():
return None
event_key = event.key()
event_mods = int(event.modifiers()) # actually a bitmask
# get names of the pressed modifier keys
# bit twiddling to pick out modifier keys from event_mods bitmask,
# if event_key is a MODIFIER, it should not be duplicated in mods
mods = [name for name, mod_key, qt_key in MODIFIER_KEYS
if event_key != qt_key and (event_mods & mod_key) == mod_key]
try:
# for certain keys (enter, left, backspace, etc) use a word for the
# key, rather than unicode
key = SPECIAL_KEYS[event_key]
except KeyError:
# unicode defines code points up to 0x0010ffff
# QT will use Key_Codes larger than that for keyboard keys that are
# are not unicode characters (like multimedia keys)
# skip these
# if you really want them, you should add them to SPECIAL_KEYS
MAX_UNICODE = 0x10ffff
if event_key > MAX_UNICODE:
return None
key = unichr(event_key)
# qt delivers capitalized letters. fix capitalization
# note that capslock is ignored
if 'shift' in mods:
mods.remove('shift')
else:
key = key.lower()
mods.reverse()
return '+'.join(mods + [key])
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting
periodic events through the backend's native event
loop. Implemented only for backends with GUIs.
Other Parameters
----------------
interval : scalar
Timer interval in milliseconds
callbacks : list
Sequence of (func, args, kwargs) where ``func(*args, **kwargs)``
will be executed by the timer every *interval*.
"""
return TimerQT(*args, **kwargs)
def flush_events(self):
global qApp
qApp.processEvents()
def start_event_loop(self, timeout=0):
if hasattr(self, "_event_loop") and self._event_loop.isRunning():
raise RuntimeError("Event loop already running")
self._event_loop = event_loop = QtCore.QEventLoop()
if timeout:
timer = QtCore.QTimer.singleShot(timeout * 1000, event_loop.quit)
event_loop.exec_()
def stop_event_loop(self, event=None):
if hasattr(self, "_event_loop"):
self._event_loop.quit()
class MainWindow(QtWidgets.QMainWindow):
closing = QtCore.Signal()
def closeEvent(self, event):
self.closing.emit()
QtWidgets.QMainWindow.closeEvent(self, event)
class FigureManagerQT(FigureManagerBase):
"""
Attributes
----------
canvas : `FigureCanvas`
The FigureCanvas instance
num : int or str
The Figure number
toolbar : qt.QToolBar
The qt.QToolBar
window : qt.QMainWindow
The qt.QMainWindow
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.canvas = canvas
self.window = MainWindow()
self.window.closing.connect(canvas.close_event)
self.window.closing.connect(self._widgetclosed)
self.window.setWindowTitle("Figure %d" % num)
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.svg')
self.window.setWindowIcon(QtGui.QIcon(image))
# Give the keyboard focus to the figure instead of the
# manager; StrongFocus accepts both tab and click to focus and
# will enable the canvas to process event w/o clicking.
# ClickFocus only takes the focus is the window has been
# clicked
# on. http://qt-project.org/doc/qt-4.8/qt.html#FocusPolicy-enum or
# http://doc.qt.digia.com/qt/qt.html#FocusPolicy-enum
self.canvas.setFocusPolicy(QtCore.Qt.StrongFocus)
self.canvas.setFocus()
self.window._destroying = False
# add text label to status bar
self.statusbar_label = QtWidgets.QLabel()
self.window.statusBar().addWidget(self.statusbar_label)
self.toolbar = self._get_toolbar(self.canvas, self.window)
if self.toolbar is not None:
self.window.addToolBar(self.toolbar)
self.toolbar.message.connect(self.statusbar_label.setText)
tbs_height = self.toolbar.sizeHint().height()
else:
tbs_height = 0
# resize the main window so it will display the canvas with the
# requested size:
cs = canvas.sizeHint()
sbs = self.window.statusBar().sizeHint()
self._status_and_tool_height = tbs_height + sbs.height()
height = cs.height() + self._status_and_tool_height
self.window.resize(cs.width(), height)
self.window.setCentralWidget(self.canvas)
if matplotlib.is_interactive():
self.window.show()
self.canvas.draw_idle()
def notify_axes_change(fig):
# This will be called whenever the current axes is changed
if self.toolbar is not None:
self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.window.raise_()
def full_screen_toggle(self):
if self.window.isFullScreen():
self.window.showNormal()
else:
self.window.showFullScreen()
def _widgetclosed(self):
if self.window._destroying:
return
self.window._destroying = True
try:
Gcf.destroy(self.num)
except AttributeError:
pass
# It seems that when the python session is killed,
# Gcf can get destroyed before the Gcf.destroy
# line is run, leading to a useless AttributeError.
def _get_toolbar(self, canvas, parent):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2QT(canvas, parent, False)
else:
toolbar = None
return toolbar
def resize(self, width, height):
'set the canvas size in pixels'
self.window.resize(width, height + self._status_and_tool_height)
def show(self):
self.window.show()
self.window.activateWindow()
self.window.raise_()
def destroy(self, *args):
# check for qApp first, as PySide deletes it in its atexit handler
if QtWidgets.QApplication.instance() is None:
return
if self.window._destroying:
return
self.window._destroying = True
self.window.destroyed.connect(self._widgetclosed)
if self.toolbar:
self.toolbar.destroy()
self.window.close()
def get_window_title(self):
return six.text_type(self.window.windowTitle())
def set_window_title(self, title):
self.window.setWindowTitle(title)
class NavigationToolbar2QT(NavigationToolbar2, QtWidgets.QToolBar):
message = QtCore.Signal(str)
def __init__(self, canvas, parent, coordinates=True):
""" coordinates: should we show the coordinates on the right? """
self.canvas = canvas
self.parent = parent
self.coordinates = coordinates
self._actions = {}
"""A mapping of toolitem method names to their QActions"""
QtWidgets.QToolBar.__init__(self, parent)
NavigationToolbar2.__init__(self, canvas)
def _icon(self, name):
if is_pyqt5():
name = name.replace('.png', '_large.png')
pm = QtGui.QPixmap(os.path.join(self.basedir, name))
if hasattr(pm, 'setDevicePixelRatio'):
pm.setDevicePixelRatio(self.canvas._dpi_ratio)
return QtGui.QIcon(pm)
def _init_toolbar(self):
self.basedir = os.path.join(matplotlib.rcParams['datapath'], 'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.addSeparator()
else:
a = self.addAction(self._icon(image_file + '.png'),
text, getattr(self, callback))
self._actions[callback] = a
if callback in ['zoom', 'pan']:
a.setCheckable(True)
if tooltip_text is not None:
a.setToolTip(tooltip_text)
if text == 'Subplots':
a = self.addAction(self._icon("qt4_editor_options.png"),
'Customize', self.edit_parameters)
a.setToolTip('Edit axis, curve and image parameters')
self.buttons = {}
# Add the x,y location widget at the right side of the toolbar
# The stretch factor is 1 which means any resizing of the toolbar
# will resize this label instead of the buttons.
if self.coordinates:
self.locLabel = QtWidgets.QLabel("", self)
self.locLabel.setAlignment(
QtCore.Qt.AlignRight | QtCore.Qt.AlignTop)
self.locLabel.setSizePolicy(
QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Ignored))
labelAction = self.addWidget(self.locLabel)
labelAction.setVisible(True)
# reference holder for subplots_adjust window
self.adj_window = None
# Esthetic adjustments - we need to set these explicitly in PyQt5
# otherwise the layout looks different - but we don't want to set it if
# not using HiDPI icons otherwise they look worse than before.
if is_pyqt5():
self.setIconSize(QtCore.QSize(24, 24))
self.layout().setSpacing(12)
if is_pyqt5():
# For some reason, self.setMinimumHeight doesn't seem to carry over to
# the actual sizeHint, so override it instead in order to make the
# aesthetic adjustments noted above.
def sizeHint(self):
size = super(NavigationToolbar2QT, self).sizeHint()
size.setHeight(max(48, size.height()))
return size
def edit_parameters(self):
allaxes = self.canvas.figure.get_axes()
if not allaxes:
QtWidgets.QMessageBox.warning(
self.parent, "Error", "There are no axes to edit.")
return
elif len(allaxes) == 1:
axes, = allaxes
else:
titles = []
for axes in allaxes:
name = (axes.get_title() or
" - ".join(filter(None, [axes.get_xlabel(),
axes.get_ylabel()])) or
"<anonymous {} (id: {:#x})>".format(
type(axes).__name__, id(axes)))
titles.append(name)
item, ok = QtWidgets.QInputDialog.getItem(
self.parent, 'Customize', 'Select axes:', titles, 0, False)
if ok:
axes = allaxes[titles.index(six.text_type(item))]
else:
return
figureoptions.figure_edit(axes, self)
def _update_buttons_checked(self):
# sync button checkstates to match active mode
self._actions['pan'].setChecked(self._active == 'PAN')
self._actions['zoom'].setChecked(self._active == 'ZOOM')
def pan(self, *args):
super(NavigationToolbar2QT, self).pan(*args)
self._update_buttons_checked()
def zoom(self, *args):
super(NavigationToolbar2QT, self).zoom(*args)
self._update_buttons_checked()
def set_message(self, s):
self.message.emit(s)
if self.coordinates:
self.locLabel.setText(s)
def set_cursor(self, cursor):
self.canvas.setCursor(cursord[cursor])
def draw_rubberband(self, event, x0, y0, x1, y1):
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
rect = [int(val) for val in (x0, y0, x1 - x0, y1 - y0)]
self.canvas.drawRectangle(rect)
def remove_rubberband(self):
self.canvas.drawRectangle(None)
def configure_subplots(self):
image = os.path.join(matplotlib.rcParams['datapath'],
'images', 'matplotlib.png')
dia = SubplotToolQt(self.canvas.figure, self.parent)
dia.setWindowIcon(QtGui.QIcon(image))
dia.exec_()
def save_figure(self, *args):
filetypes = self.canvas.get_supported_filetypes_grouped()
sorted_filetypes = sorted(six.iteritems(filetypes))
default_filetype = self.canvas.get_default_filetype()
startpath = os.path.expanduser(
matplotlib.rcParams['savefig.directory'])
start = os.path.join(startpath, self.canvas.get_default_filename())
filters = []
selectedFilter = None
for name, exts in sorted_filetypes:
exts_list = " ".join(['*.%s' % ext for ext in exts])
filter = '%s (%s)' % (name, exts_list)
if default_filetype in exts:
selectedFilter = filter
filters.append(filter)
filters = ';;'.join(filters)
fname, filter = _getSaveFileName(self.parent,
"Choose a filename to save to",
start, filters, selectedFilter)
if fname:
# Save dir for next time, unless empty str (i.e., use cwd).
if startpath != "":
matplotlib.rcParams['savefig.directory'] = (
os.path.dirname(six.text_type(fname)))
try:
self.canvas.figure.savefig(six.text_type(fname))
except Exception as e:
QtWidgets.QMessageBox.critical(
self, "Error saving file", six.text_type(e),
QtWidgets.QMessageBox.Ok, QtWidgets.QMessageBox.NoButton)
class SubplotToolQt(UiSubplotTool):
def __init__(self, targetfig, parent):
UiSubplotTool.__init__(self, None)
self._figure = targetfig
for lower, higher in [("bottom", "top"), ("left", "right")]:
self._widgets[lower].valueChanged.connect(
lambda val: self._widgets[higher].setMinimum(val + .001))
self._widgets[higher].valueChanged.connect(
lambda val: self._widgets[lower].setMaximum(val - .001))
self._attrs = ["top", "bottom", "left", "right", "hspace", "wspace"]
self._defaults = {attr: vars(self._figure.subplotpars)[attr]
for attr in self._attrs}
# Set values after setting the range callbacks, but before setting up
# the redraw callbacks.
self._reset()
for attr in self._attrs:
self._widgets[attr].valueChanged.connect(self._on_value_changed)
for action, method in [("Export values", self._export_values),
("Tight layout", self._tight_layout),
("Reset", self._reset),
("Close", self.close)]:
self._widgets[action].clicked.connect(method)
def _export_values(self):
# Explicitly round to 3 decimals (which is also the spinbox precision)
# to avoid numbers of the form 0.100...001.
dialog = QtWidgets.QDialog()
layout = QtWidgets.QVBoxLayout()
dialog.setLayout(layout)
text = QtWidgets.QPlainTextEdit()
text.setReadOnly(True)
layout.addWidget(text)
text.setPlainText(
",\n".join("{}={:.3}".format(attr, self._widgets[attr].value())
for attr in self._attrs))
# Adjust the height of the text widget to fit the whole text, plus
# some padding.
size = text.maximumSize()
size.setHeight(
QtGui.QFontMetrics(text.document().defaultFont())
.size(0, text.toPlainText()).height() + 20)
text.setMaximumSize(size)
dialog.exec_()
def _on_value_changed(self):
self._figure.subplots_adjust(**{attr: self._widgets[attr].value()
for attr in self._attrs})
self._figure.canvas.draw_idle()
def _tight_layout(self):
self._figure.tight_layout()
for attr in self._attrs:
widget = self._widgets[attr]
widget.blockSignals(True)
widget.setValue(vars(self._figure.subplotpars)[attr])
widget.blockSignals(False)
self._figure.canvas.draw_idle()
def _reset(self):
for attr, value in self._defaults.items():
self._widgets[attr].setValue(value)
def error_msg_qt(msg, parent=None):
if not isinstance(msg, six.string_types):
msg = ','.join(map(str, msg))
QtWidgets.QMessageBox.warning(None, "Matplotlib",
msg, QtGui.QMessageBox.Ok)
def exception_handler(type, value, tb):
"""Handle uncaught exceptions
It does not catch SystemExit
"""
msg = ''
# get the filename attribute if available (for IOError)
if hasattr(value, 'filename') and value.filename is not None:
msg = value.filename + ': '
if hasattr(value, 'strerror') and value.strerror is not None:
msg += value.strerror
else:
msg += six.text_type(value)
if len(msg):
error_msg_qt(msg)
@_Backend.export
class _BackendQT5(_Backend):
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
@staticmethod
def trigger_manager_draw(manager):
manager.canvas.draw_idle()
@staticmethod
def mainloop():
# allow KeyboardInterrupt exceptions to close the plot window.
signal.signal(signal.SIGINT, signal.SIG_DFL)
global qApp
qApp.exec_()
|
mit
|
pravsripad/mne-python
|
tutorials/source-modeling/plot_visualize_stc.py
|
3
|
8263
|
"""
.. _tut-viz-stcs:
Visualize source time courses (stcs)
====================================
This tutorial focuses on visualization of :term:`source estimates<STC>`.
Surface Source Estimates
------------------------
First, we get the paths for the evoked data and the time courses (stcs).
"""
import os
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne import read_evokeds
data_path = sample.data_path()
sample_dir = os.path.join(data_path, 'MEG', 'sample')
subjects_dir = os.path.join(data_path, 'subjects')
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
fname_stc = os.path.join(sample_dir, 'sample_audvis-meg')
###############################################################################
# Then, we read the stc from file
stc = mne.read_source_estimate(fname_stc, subject='sample')
###############################################################################
# This is a :class:`SourceEstimate <mne.SourceEstimate>` object
print(stc)
###############################################################################
# The SourceEstimate object is in fact a *surface* source estimate. MNE also
# supports volume-based source estimates but more on that later.
#
# We can plot the source estimate using the
# :func:`stc.plot <mne.SourceEstimate.plot>` just as in other MNE
# objects. Note that for this visualization to work, you must have ``mayavi``
# and ``pysurfer`` installed on your machine.
initial_time = 0.1
brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,
clim=dict(kind='value', lims=[3, 6, 9]))
###############################################################################
# You can also morph it to fsaverage and visualize it using a flatmap
# sphinx_gallery_thumbnail_number = 3
stc_fs = mne.compute_source_morph(stc, 'sample', 'fsaverage', subjects_dir,
smooth=5, verbose='error').apply(stc)
brain = stc_fs.plot(subjects_dir=subjects_dir, initial_time=initial_time,
clim=dict(kind='value', lims=[3, 6, 9]),
surface='flat', hemi='split', size=(1000, 500),
smoothing_steps=5, time_viewer=False,
add_data_kwargs=dict(
colorbar_kwargs=dict(label_font_size=10)))
# You can save a movie like the one on our documentation website with:
# brain.save_movie(time_dilation=20, tmin=0.05, tmax=0.16,
# interpolation='linear', framerate=10)
###############################################################################
# Note that here we used ``initial_time=0.1``, but we can also browse through
# time using ``time_viewer=True``.
#
# In case ``mayavi`` is not available, we also offer a ``matplotlib``
# backend. Here we use verbose='error' to ignore a warning that not all
# vertices were used in plotting.
mpl_fig = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,
backend='matplotlib', verbose='error')
###############################################################################
#
# Volume Source Estimates
# -----------------------
# We can also visualize volume source estimates (used for deep structures).
#
# Let us load the sensor-level evoked data. We select the MEG channels
# to keep things simple.
evoked = read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False).crop(0.05, 0.15)
# this risks aliasing, but these data are very smooth
evoked.decimate(10, verbose='error')
###############################################################################
# Then, we can load the precomputed inverse operator from a file.
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-vol-7-meg-inv.fif'
inv = read_inverse_operator(fname_inv)
src = inv['src']
mri_head_t = inv['mri_head_t']
###############################################################################
# The source estimate is computed using the inverse operator and the
# sensor-space data.
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
stc = apply_inverse(evoked, inv, lambda2, method)
del inv
###############################################################################
# This time, we have a different container
# (:class:`VolSourceEstimate <mne.VolSourceEstimate>`) for the source time
# course.
print(stc)
###############################################################################
# This too comes with a convenient plot method.
stc.plot(src, subject='sample', subjects_dir=subjects_dir)
###############################################################################
# For this visualization, ``nilearn`` must be installed.
# This visualization is interactive. Click on any of the anatomical slices
# to explore the time series. Clicking on any time point will bring up the
# corresponding anatomical map.
#
# We could visualize the source estimate on a glass brain. Unlike the previous
# visualization, a glass brain does not show us one slice but what we would
# see if the brain was transparent like glass, and
# :term:`maximum intensity projection`) is used:
stc.plot(src, subject='sample', subjects_dir=subjects_dir, mode='glass_brain')
###############################################################################
# You can also extract label time courses using volumetric atlases. Here we'll
# use the built-in ``aparc.a2009s+aseg.mgz``:
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aparc.a2009s+aseg.mgz')
label_names = mne.get_volume_labels_from_aseg(fname_aseg)
label_tc = stc.extract_label_time_course(fname_aseg, src=src)
lidx, tidx = np.unravel_index(np.argmax(label_tc), label_tc.shape)
fig, ax = plt.subplots(1)
ax.plot(stc.times, label_tc.T, 'k', lw=1., alpha=0.5)
xy = np.array([stc.times[tidx], label_tc[lidx, tidx]])
xytext = xy + [0.01, 1]
ax.annotate(
label_names[lidx], xy, xytext, arrowprops=dict(arrowstyle='->'), color='r')
ax.set(xlim=stc.times[[0, -1]], xlabel='Time (s)', ylabel='Activation')
for key in ('right', 'top'):
ax.spines[key].set_visible(False)
fig.tight_layout()
###############################################################################
# And we can project these label time courses back to their original
# locations and see how the plot has been smoothed:
stc_back = mne.labels_to_stc(fname_aseg, label_tc, src=src)
stc_back.plot(src, subjects_dir=subjects_dir, mode='glass_brain')
###############################################################################
# Vector Source Estimates
# -----------------------
# If we choose to use ``pick_ori='vector'`` in
# :func:`apply_inverse <mne.minimum_norm.apply_inverse>`
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
inv = read_inverse_operator(fname_inv)
stc = apply_inverse(evoked, inv, lambda2, 'dSPM', pick_ori='vector')
brain = stc.plot(subject='sample', subjects_dir=subjects_dir,
initial_time=initial_time, brain_kwargs=dict(
silhouette=True))
###############################################################################
# Dipole fits
# -----------
# For computing a dipole fit, we need to load the noise covariance, the BEM
# solution, and the coregistration transformation files. Note that for the
# other methods, these were already used to generate the inverse operator.
fname_cov = os.path.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = os.path.join(subjects_dir, 'sample', 'bem',
'sample-5120-bem-sol.fif')
fname_trans = os.path.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
##############################################################################
# Dipoles are fit independently for each time point, so let us crop our time
# series to visualize the dipole fit for the time point of interest.
evoked.crop(0.1, 0.1)
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
##############################################################################
# Finally, we can visualize the dipole.
dip.plot_locations(fname_trans, 'sample', subjects_dir)
|
bsd-3-clause
|
DonghoChoi/ISB_Project
|
local/gps_data_analysis.py
|
2
|
13595
|
#!/usr/bin/python
# Author: Dongho Choi
import os.path
import datetime
import math
import time
import itertools
import pandas as pd
import numpy as np
from sshtunnel import SSHTunnelForwarder # for SSH connection
import pymysql.cursors # MySQL handling API
from geopy.distance import vincenty
import sys
sys.path.append("./configs/")
#sys.path.append("/Users/donghochoi/Documents/Work/Exploration_Study/Dissertation/Code/local/configs/")
import server_config # (1) info2_server (2) exploration_db
import matplotlib.pyplot as plt
import seaborn as sns
datetimeFormat = '%Y-%m-%d %H:%M:%S'
close_distance_cut = 40
number_k = 2
def is_location_close(location_a, location_b):
#print("Distance:", vincenty(location_a, location_b).meters)
if (vincenty(location_a, location_b).meters <= close_distance_cut):
return True
else:
return False
def find_location(current_location,df_user_location_list): # Return -1 when no close location existing, otherwise the location
for i in range(0, len(df_user_location_list)):
#print("index in df_location_list", i)
temp_location = (df_user_location_list.iloc[i]['latitude'],df_user_location_list.iloc[i]['longitude'])
if (is_location_close(current_location, temp_location) == True):
#print("FOUND ONE CLOSE LOCATION")
return df_user_location_list.iloc[i]['locationID']
#print("No match, returning -1")
return -1
def get_center_of_mass(user_location_list):
x = 0
y = 0
visit_sum = user_location_list['visit_times'].sum()
for i in range(0,len(user_location_list)):
x = x + user_location_list.iloc[i]['visit_times'] * user_location_list.iloc[i]['latitude']
y = y + user_location_list.iloc[i]['visit_times'] * user_location_list.iloc[i]['longitude']
x = x/visit_sum
y = y/visit_sum
return [x, y]
if __name__ == "__main__":
# READ DATA FROM SERVER
#read_Data_from_Server()
# Server connection
server = SSHTunnelForwarder(
(server_config.info2_server['host'], 22),
ssh_username=server_config.info2_server['user'],
ssh_password=server_config.info2_server['password'],
remote_bind_address=('127.0.0.1', 3306))
server.start()
connection = pymysql.connect(host='127.0.0.1',
port=server.local_bind_port,
user=server_config.exploration_db['user'],
password=server_config.exploration_db['password'],
db=server_config.exploration_db['database'])
connection.autocommit(True)
cursor = connection.cursor()
print("MySQL connection established.")
# Get individual data
df_individual_data = pd.read_sql('SELECT * FROM individual_data', con=connection)
print("Individual data READ")
# Get the participants list from the table of 'final_participants'
df_participants = pd.read_sql('SELECT * FROM final_participants', con=connection)
print("Participants Table READ")
# Get locations_all table: importing all locations that participants visited.
df_locations_all = pd.read_sql(
"SELECT userID,date_time,latitude,longitude,timestamp FROM locations_all WHERE (userID!=5001)",
con=connection)
print("Locations Table READ")
# READ AND FILL THE PARTICIPANTS LIST WITH COMBINATIONS
participants_list = df_participants['userID'].tolist()
num_participants = len(participants_list) # number of participants
print('number of participants:{0}'.format(num_participants))
## POPULATE VISITS OF PARTICIPANTS
df_visits = pd.DataFrame(columns=('userID','visit_start','visit_end','dwellTime','latitude','longitude'))
df_mobility = pd.DataFrame(columns=('userID','visited_locations','k','gyration_all','gyration_k','s_k'))
for i in range(0, num_participants-1):
#for i in range(0,1):
df_user_visits = pd.DataFrame(columns=('userID', 'visit_start', 'visit_end', 'dwellTime', 'latitude', 'longitude'))
current_userID = participants_list[i]
print("current_userID: {0}".format(current_userID))
df_temp_locations = df_locations_all.loc[df_locations_all['userID'] == current_userID] # location list of a particular user
df_temp_locations = df_temp_locations.sort_values(by='timestamp')
current_location = (df_temp_locations.iloc[0]['latitude'],df_temp_locations.iloc[0]['longitude']) # the first line of the list
visit_start = datetime.datetime.strptime(df_temp_locations.iloc[0]['date_time'],datetimeFormat)
#print("visit_start:",visit_start)
visit_end = datetime.datetime.strptime(df_temp_locations.iloc[0]['date_time'], datetimeFormat)
#print("visit_end:",visit_end)
for j in range(1,len(df_temp_locations)-1):
if (visit_start + datetime.timedelta(minutes=55) > datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'], datetimeFormat)): # when time interval until next record is too small..
#print("too close")
continue
else:
temp_location = (df_temp_locations.iloc[j]['latitude'],df_temp_locations.iloc[j]['longitude'])
#print("distance:",vincenty(current_location, temp_location).meters)
if (vincenty(current_location, temp_location).meters <= close_distance_cut): # When seen the user stays nearby
#print("SAME LOCATION")
visit_end = datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'],datetimeFormat)
#print("visit_end update:",visit_end)
else:
#print("MOVED TO NEW LOCATION")
df_temp_visits = pd.DataFrame(columns=('userID', 'visit_start', 'visit_end', 'dwellTime', 'latitude', 'longitude'))
df_temp_visits.set_value(0,'userID', current_userID)
df_temp_visits.set_value(0,'visit_start', visit_start)
df_temp_visits.set_value(0,'visit_end', visit_end)
df_temp_visits.set_value(0,'dwellTime', visit_end-visit_start)
df_temp_visits.set_value(0,'latitude', current_location[0])
df_temp_visits.set_value(0,'longitude', current_location[1])
df_user_visits = df_user_visits.append(df_temp_visits)
current_location = (df_temp_locations.iloc[j]['latitude'], df_temp_locations.iloc[j]['longitude'])
visit_start = datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'],
datetimeFormat)
visit_end = datetime.datetime.strptime(df_temp_locations.iloc[j]['date_time'], datetimeFormat)
df_user_location_list = pd.DataFrame(columns=('userID','locationID','latitude','longitude','visit_times','spent_time'))
df_temp_location = pd.DataFrame(columns=('userID', 'locationID', 'latitude', 'longitude'))
df_temp_location.set_value(0, 'userID', current_userID)
df_temp_location.set_value(0, 'locationID', 0)
df_temp_location.set_value(0, 'latitude', df_user_visits.iloc[0]['latitude'])
df_temp_location.set_value(0, 'longitude', df_user_visits.iloc[0]['longitude'])
df_temp_location.set_value(0, 'visit_times', 1)
df_temp_location.set_value(0, 'spent_time', df_user_visits.iloc[0]['dwellTime'])
df_user_location_list = df_user_location_list.append(df_temp_location)
for k in range(1,len(df_user_visits)): # To populate the user's location list with visit_times and spent_time
current_location = (df_user_visits.iloc[k]['latitude'],df_user_visits.iloc[k]['longitude'])
same_location = find_location(current_location,df_user_location_list)
#print("value of same location:",same_location)
if (same_location == -1): # if there is no place close to the current place
#print("same_location = -1")
df_temp_location = pd.DataFrame(columns=('userID','locationID','latitude','longitude'))
df_temp_location.set_value(0, 'userID', current_userID)
df_temp_location.set_value(0, 'locationID', len(df_user_location_list))
df_temp_location.set_value(0, 'latitude', current_location[0])
df_temp_location.set_value(0, 'longitude', current_location[1])
df_temp_location.set_value(0, 'visit_times', 1)
df_temp_location.set_value(0, 'spent_time', df_user_visits.iloc[k]['dwellTime'])
df_user_location_list = df_user_location_list.append(df_temp_location)
else: # when current location can be perceived as the found 'same location'
#print("same_location = :",same_location)
val_visit_times = df_user_location_list.iloc[same_location]['visit_times']
val_spent_time = df_user_location_list.iloc[same_location]['spent_time']
#print("previous visit_times of locationID {0}: {1} becomes to {2}".format(same_location, val_visit_times, val_visit_times + 1))
df_user_location_list.iloc[same_location, df_user_location_list.columns.get_loc('visit_times')]= val_visit_times + 1
df_user_location_list.iloc[same_location, df_user_location_list.columns.get_loc('spent_time')] = val_spent_time + df_user_visits.iloc[k]['dwellTime']
#print(df_user_location_list)
# Calculating the total of radius of gyration
df_user_location_list = (df_user_location_list.loc[df_user_location_list['spent_time'] > datetime.timedelta(seconds=0)]).sort_values(by='visit_times',ascending=False)
center_of_mass = get_center_of_mass(df_user_location_list)
print("center_of_mass: ({0}, {1})".format(center_of_mass[0],center_of_mass[1]))
gyration_sum = 0
for l in range(0,len(df_user_location_list)):
current_location = (df_user_location_list.iloc[l]['latitude'],df_user_location_list.iloc[l]['longitude'])
gyration_sum = gyration_sum + df_user_location_list.iloc[l]['visit_times'] * pow(vincenty(current_location, (center_of_mass[0],center_of_mass[1])).meters, 2)
#gyration_sum = math.sqrt(gyration_sum/len(df_user_location_list))
gyration_sum_new = math.sqrt(gyration_sum/(df_user_location_list['visit_times'].sum()))
#print("gyration_sum of user {0}: {1}, len(df_user_location_list): {2}".format(current_userID, gyration_sum, len(df_user_location_list)))
print("[NEW] gyration_sum of user {0}: {1}, sum of visits: {2}".format(current_userID, gyration_sum_new, df_user_location_list['visit_times'].sum()))
# Calculating the total of radius of gyration of the k-th most frequented locations
#number_k = (len(df_user_location_list))//3
df_user_location_list_k = df_user_location_list[:number_k]
print("len(df_user_location_list_k) = ",len(df_user_location_list_k))
center_of_mass_k = get_center_of_mass(df_user_location_list_k)
print("center_of_mass_of_k: ({0}, {1})".format(center_of_mass_k[0], center_of_mass_k[1]))
gyration_sum_k = 0
for l in range(0, len(df_user_location_list_k)):
current_location = (df_user_location_list_k.iloc[l]['latitude'], df_user_location_list_k.iloc[l]['longitude'])
gyration_sum_k = gyration_sum_k + df_user_location_list_k.iloc[l]['visit_times'] * pow(
vincenty(current_location, (center_of_mass_k[0], center_of_mass_k[1])).meters, 2)
#gyration_sum_k = math.sqrt(gyration_sum_k / len(df_user_location_list_k))
gyration_sum_k_new = math.sqrt(gyration_sum_k/(df_user_location_list_k['visit_times'].sum()))
#print("gyration_sum_k of user {0}: {1}, len(df_user_location_list_k): {2}".format(current_userID, gyration_sum_k, len(df_user_location_list_k)))
print("[NEW] gyration_sum_k of user {0}: {1}, sum of visits: {2}".format(current_userID,gyration_sum_k_new, df_user_location_list_k['visit_times'].sum()))
#s_k_ratio = gyration_sum_k/gyration_sum
s_k_ratio_new = gyration_sum_k_new/gyration_sum_new
#print("s_k of user %i = %f" % (current_userID, s_k_ratio))
print("[NEW] s_k of user {0} = {1}".format(current_userID, s_k_ratio_new))
# Calculating the location diversity and loyalty
print("total visits in df_user_location_list:{}",len(df_user_location_list))
#print(df_user_location_list)
series_user_mobility = pd.Series([current_userID, df_user_location_list['visit_times'].sum(), number_k, df_user_location_list_k['visit_times'].sum(),gyration_sum_new, gyration_sum_k_new, s_k_ratio_new], index=['userID','visited_locations','k','visited_locations_k','gyration_all','gyration_k','s_k'])
df_mobility = df_mobility.append(series_user_mobility,ignore_index=True)
#print(df_visits)
print(df_mobility)
# MOBILITY DATA INTO SERVER
for i in range(0, len(df_mobility)):
sql = "INSERT INTO mobility_data (userID,visited_locations,k,visited_locations_k,gyration_all,gyration_k,s_k) VALUES (" + str(df_mobility.iloc[i]['userID']) + "," + str(df_mobility.iloc[i]['visited_locations']) + "," + str(df_mobility.iloc[i]['k']) + "," + str(df_mobility.iloc[i]['visited_locations_k'])+","+str(df_mobility.iloc[i]['gyration_all']) + "," + str(df_mobility.iloc[i]['gyration_k']) + "," + str(df_mobility.iloc[i]['s_k']) + ");"
cursor.execute(sql)
server.stop()
print("End")
|
gpl-3.0
|
jzt5132/scikit-learn
|
sklearn/preprocessing/tests/test_function_transformer.py
|
176
|
2169
|
from nose.tools import assert_equal
import numpy as np
from sklearn.preprocessing import FunctionTransformer
def _make_func(args_store, kwargs_store, func=lambda X, *a, **k: X):
def _func(X, *args, **kwargs):
args_store.append(X)
args_store.extend(args)
kwargs_store.update(kwargs)
return func(X)
return _func
def test_delegate_to_func():
# (args|kwargs)_store will hold the positional and keyword arguments
# passed to the function inside the FunctionTransformer.
args_store = []
kwargs_store = {}
X = np.arange(10).reshape((5, 2))
np.testing.assert_array_equal(
FunctionTransformer(_make_func(args_store, kwargs_store)).transform(X),
X,
'transform should have returned X unchanged',
)
# The function should only have recieved X.
assert_equal(
args_store,
[X],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
# reset the argument stores.
args_store[:] = [] # python2 compatible inplace list clear.
kwargs_store.clear()
y = object()
np.testing.assert_array_equal(
FunctionTransformer(
_make_func(args_store, kwargs_store),
pass_y=True,
).transform(X, y),
X,
'transform should have returned X unchanged',
)
# The function should have recieved X and y.
assert_equal(
args_store,
[X, y],
'Incorrect positional arguments passed to func: {args}'.format(
args=args_store,
),
)
assert_equal(
kwargs_store,
{},
'Unexpected keyword arguments passed to func: {args}'.format(
args=kwargs_store,
),
)
def test_np_log():
X = np.arange(10).reshape((5, 2))
# Test that the numpy.log example still works.
np.testing.assert_array_equal(
FunctionTransformer(np.log1p).transform(X),
np.log1p(X),
)
|
bsd-3-clause
|
will-iam/Variant
|
script/process/model.py
|
1
|
22934
|
#!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
"""
import __future__
import os
import sys
from collections import *
import parser
from pprint import *
from scipy import stats
#from pandas import Series
import numpy as np
from numpy.linalg import inv, pinv
import matplotlib.pyplot as plt
import operator
# Calcule le nombre de voisins d'un domaine
def computeNeighborNumber(x, y, withCorner = False):
if x == 1 and y == 1:
return 0
total = 0.0
if x == 1: # then y > 1
total += (y - 2) * 2 # border without corners
total += 2 # corners
return total
if y == 1: # then x > 1
total += (x-2) * 2 # border without corners
total += 2 # corners
return total
# Now we know that x > 1 and y > 1
if withCorner == True:
a = 8
b = 5
c = 3
else:
a = 4
b = 3
c = 2
total += (x-2) * (y-2) * a # internal sub-domains
total += 2 * (x-2) * b # x borders without corners
total += 2 * (y-2) * b # y borders without corners
total += 4 * c # corners
# return total
return total
def expectation(data, field, filterKey):
'''
Dans cette première étape on récupère les données produites sur un seul coeur pour estimer le
temps de calcul Tc.
'''
caseKeyList = [k for k in data.keys() if k.endswith(filterKey)]
if len(caseKeyList) > 1:
print("Weird ...")
sys.exit(1)
TcData = data[caseKeyList[0]]
if not len(TcData):
print("Aucune données sur le cas %s" % caseKey)
sys.exit(1)
#pprint(TcData)
pointTimeDict = {}
for element in TcData:
coord = element['point']
a = np.log(element[field])
if coord not in pointTimeDict.keys():
pointTimeDict[coord] = []
pointTimeDict[coord].append(a)
pointExpectDict = {}
for point, TcLoopTimeList in pointTimeDict.items():
sample = np.array(TcLoopTimeList)
meanTc, stdTc = stats.norm.fit(TcLoopTimeList)
pointExpectDict[(point[0], point[1], point[2], len(TcLoopTimeList))] = np.exp(meanTc)
return pointExpectDict
def synchronizeEstimate(data, resource):
rawTsData = {}
for k, v in data.items():
if k.endswith(str(resource) + ':1:4'):
for p in v:
coord = p['point']
if coord not in rawTsData.keys():
rawTsData[coord] = []
t = p['maxIterationSum'] - p['maxComputeSum']
#print p['point'], 100. * p['synchronizeTime'] / (p['loopTime'] - p["computeTime"]), "%%"
rawTsData[coord].append(np.log(t))
if not len(rawTsData):
print("Aucune données sur le cas %s" % rawTsData)
sys.exit(1)
#pprint(rawTsData)
pointExpectDict = {}
for point, synchronizeTimeList in rawTsData.items():
meanTs, stdTs = stats.norm.fit(synchronizeTimeList)
pointExpectDict[(point[0], point[1], point[2], len(synchronizeTimeList))] = np.exp(meanTs)
'''
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
ax.hist(synchronizeTimeList, bins=25, alpha=0.3, normed= True)
x = np.linspace(meanTs - 2*stdTs, meanTs + 2*stdTs, 100)
normal_dist = stats.norm(meanTs, stdTs)
normal_pdf = normal_dist.pdf(x)
ax.plot(x, normal_pdf, '--', label="%s x %s" % (point[0], point[1]))
plt.legend()
plt.show()
'''
return pointExpectDict
def chargeEstimate(data, resource):
'''
Dans cette première étape on récupère les données produites avec un seul thread et 64 SDD pour estimer le
temps de calcul Tc.
'''
TcKeyFiltered = [k for k in data.keys() if k.endswith(':' + str(resource) + ':1:4')]
if len(TcKeyFiltered) > 1:
print("Weird ...")
sys.exit(1)
TcData = data[TcKeyFiltered[0]]
if not len(TcData):
print("Aucune données sur le cas %s" % TcKeyFiltered[0])
sys.exit(1)
#pprint(TcData)
allTime = []
timeByPointDict = {}
machinePointTimeDict = {}
pointMachineTimeDict = {}
for element in TcData:
coord = element['point']
if coord[0] * coord[1] != resource:
continue
point = element['point']
machine = element['machine']
if point not in timeByPointDict.keys():
timeByPointDict[point] = []
if point not in pointMachineTimeDict.keys():
pointMachineTimeDict[point] = {}
if machine not in pointMachineTimeDict[point].keys():
pointMachineTimeDict[point][machine] = []
if machine not in machinePointTimeDict.keys():
machinePointTimeDict[machine] = {}
if point not in machinePointTimeDict[machine].keys():
machinePointTimeDict[machine][point] = []
a = np.log(element["minComputeSum"])
allTime.append(a)
timeByPointDict[point].append(a)
pointMachineTimeDict[point][machine].append(a)
machinePointTimeDict[machine][point].append(a)
expectTimeDict = {}
for point, pointTimeDict in pointMachineTimeDict.items():
if point not in expectTimeDict.keys():
expectTimeDict[point] = {}
for machine, timeList in pointTimeDict.items():
sample = np.array(timeList)
# kstest is the Kolmogrov-Smirnov test for goodness of fit
# Here its sample is being tested against the normal distribution
# D is the KS statistic and the closer it is to 0 the better.
## Normale
meanTc, stdTc = stats.norm.fit(timeList)
expectTimeDict[point][machine] = (meanTc, stdTc, len(timeList))
ktOut = stats.kstest(sample, 'norm', args=(meanTc, stdTc))
print('\n\tkstest output for the Normal distribution')
print('\tD = ' + str(ktOut[0]))
print('\tP-value = ' + str(ktOut[1]))
print(machine, point, meanTc, stdTc, len(timeList))
## wald : moins bon que lognormal
## normal : non
## rayleigh : non
## nakagami : non
'''
## Lognormal
shape, location, scale = stats.lognorm.fit(TcLoopTimeList)
muTc, sigmaTc = np.log(scale), shape
log_dist = stats.lognorm(s=shape, loc=location, scale=scale)
print "\tlog_dist.expect()", log_dist.expect()
print "\tLoi Lognormale: espérance de Tc = %s, écart-type (sigma) de Tc = %s sur %s points" % (np.exp(-muTc), sigmaTc, len(TcLoopTimeList))
ktOut = stats.kstest(sample, 'lognorm', args=(shape, location, scale))
print('\n\tkstest output for the Lognormal distribution')
print('\tD = ' + str(ktOut[0]))
print('\tP-value = ' + str(ktOut[1]))
## Wald
wald_location, wald_scale = stats.wald.fit(TcLoopTimeList)
ktOut = stats.kstest(sample, 'wald', args=(wald_location, wald_scale))
print('\n\tkstest output for the Wald distribution')
print('\tD = ' + str(ktOut[0]))
print('\tP-value = ' + str(ktOut[1]))
## RayleighcomputeMeanNeighborhoodNumber
rayleigh_a, rayleigh_b = stats.rayleigh.fit(TcLoopTimeList)
#ktOut = stats.kstest(sample, 'rayleigh')
print('\n\tkstest output for the rayleigh distribution')
print('\tD = ' + str(ktOut[0]))
print('\tP-value = ' + str(ktOut[1]))
## Normale
meanTc, stdTc = stats.norm.fit(TcLoopTimeList)
ktOut = stats.kstest(sample, 'norm', args=(meanTc, stdTc))
print('\n\tkstest output for the Normal distribution')
print('\tD = ' + str(ktOut[0]))
print('\tP-value = ' + str(ktOut[1]))
'''
'''
# Trace l'histogramme
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
ax.hist(sample, bins=25, alpha=0.3, color='k', normed= True)
x = np.linspace(np.min(sample) - 2. * stdTc, np.max(sample) + 2. * stdTc, 100)
normal_dist = stats.norm(meanTc, stdTc)
normal_pdf = normal_dist.pdf(x)
ax.plot(x, normal_pdf, 'k--', label="%s with %s point(s)" % (machine, len(timeList)))
plt.title('log(Time) on point: %sx%sx%s' % (point[0], point[1], point[2]))
plt.legend()
plt.show()
'''
'''
# Trace l'histogramme
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
ax.hist(timeByPointDict[point], bins=25, alpha=0.3, normed= True)
x = np.linspace(np.min(timeByPointDict[point]), np.max(timeByPointDict[point]), 100)
for machine, normal in expectTimeDict[point].items():
normal_dist = stats.norm(normal[0], normal[1])
normal_pdf = normal_dist.pdf(x)
ax.plot(x, normal_pdf, '--', label="%s with %s point(s)" % (machine, normal[2]))
plt.title('log(Time) on point: %sx%sx%s' % (point[0], point[1], point[2]))
plt.legend()
plt.show()
'''
print("\nExpectation by point:")
allExpectTime = []
for point, machineExpectDict in expectTimeDict.items():
print("Expectation(s) on point", point)
for machine, normal in machineExpectDict.items():
t = np.exp(normal[0])
allExpectTime.append(t)
print("\t%s %s" % (machine, t))
print("\tmean of expectation(s) = %s" % (np.mean([np.exp(v[0]) for v in machineExpectDict.values()])))
print("Mean of all expectation values: ", np.mean(allExpectTime))
# Normal of all points.
sample = np.array(allTime)
meanTc, stdTc = stats.norm.fit(sample)
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
binNumber = int(1. + 10./3.*np.log(len(sample)))
ax.hist(sample, bins=binNumber, alpha=0.3, normed= True)
x = np.linspace(meanTc - 2*stdTc, meanTc + 2*stdTc, 150)
normal_dist = stats.norm(meanTc, stdTc)
normal_pdf = normal_dist.pdf(x)
ax.plot(x, normal_pdf, 'r--', label="full sample %s point(s)" % (len(allTime)))
plt.legend()
plt.show()
print "Expectations of points all together:", np.exp(meanTc)
return np.exp(meanTc) * resource
def greeksEstimate(data, resource):
'''
delta = débit de la communication.
lambda = latence pour ouvrir une communication.
'''
TsData = []
for k, v in data.items():
if not k.endswith(':' + str(resource) + ':1:4'):
continue
for p in v:
coord = p['point']
if coord[0] * coord[1] * coord[2] != resource:
print("greeksEstimate: erreur dans la récupération des données.")
sys.exit(1)
if coord[0] == 1 and coord[1] == 1:
continue
#if coord[0] != coord[1]:
# continue
#caseSize = int(np.sqrt(p['nPhysicalCells'] * coord[0] * coord[1]))
#if caseSize > 700:
# continue
extractedKey = parser.extractKey(k)
p['Nx'] = extractedKey['Nx']
p['Ny'] = extractedKey['Ny']
TsData.append(p)
# Estimation des paramètres du modèle
TsList = []
coupleList = []
interfaceSizeDict = {}
neighbourhoodDict = {}
for p in TsData:
ts = np.log(p['maxIterationSum'] - p['maxComputeSum'])
#ts = p['loopTime'] - p['computeTime']
coord = p['point']
nx = float(coord[0])
ny = float(coord[1])
Nx = p['Nx']
Ny = p['Ny']
h = parser.fn(nx) * ny + parser.fn(ny) * nx
i = h * (Nx + Ny)
coupleList.append([1.0, np.log(h), np.log(i)])
TsList.append(ts)
if h not in interfaceSizeDict.keys():
interfaceSizeDict[h] = {}
if i not in interfaceSizeDict[h].keys():
interfaceSizeDict[h][i] = []
interfaceSizeDict[h][i].append(ts)
if i not in neighbourhoodDict.keys():
neighbourhoodDict[i] = {}
if h not in neighbourhoodDict[i].keys():
neighbourhoodDict[i][h] = []
neighbourhoodDict[i][h].append(ts)
Ts = np.array(TsList)
F = np.array(coupleList)
B = np.dot(pinv(F), Ts)
print(len(Ts), " simulations. Valeur de B:", B)
TsEstimate = np.dot(F, B)
err = np.sqrt(np.sum(np.dot((Ts - TsEstimate), (Ts - TsEstimate)))) / len(Ts)
print("Erreur entre le modèle bruité non calibré et le modèle calibré: ", err)
# i = h * caseSize, j = h * np.log(caseSize) : err = 0.00505024734782
# i = h * np.log(caseSize) : err = 0.005053122627
# i = h * np.sqrt(caseSize) : err = 0.00505726721335
# i = h * caseSize : err = 0.00505726721335
# Best = [-2.11062418 1.41218773 -1.39434254]
'''
# Estimation du bruit sur les temps de synchronisation ?
Pf = np.dot(np.dot(F, inv(F.T.dot(F))), F.T)
index = 0
varYDict = {}
for c in coupleList:
varY = Pf[index][index]
h = c[0]
i = c[1]/h
varYDict[i] = {}
varYDict[i][h] = varY
index += 1
# Estimation de l'erreur entre le modèle non calibré bruité et le modèle calibré
X = []
Y = []
err = []
for i in sorted(neighbourhoodDict):
for h,v in sorted(neighbourhoodDict[i].items()):
for y in neighbourhoodDict[i][h]:
x = h*(lambdaValue + deltaValue*i)
Y.append(y)
X.append(x)
err.append(np.abs(x-y))
sigma = np.std(err)
print "err var:", np.var(err),", err std:", sigma
Pf = Pf * sigma * sigma
'''
### fig ###
fig = plt.figure(0, figsize=(9, 6))
boxDist = {}
# Trace la courbe en fonction de i
ax = fig.add_subplot(111)
#ax = fig.add_subplot(211)
xMin = np.min(neighbourhoodDict.keys())
xMax = np.max(neighbourhoodDict.keys())
#ax.set_xscale('log')
#ax.set_yscale('log')
x = np.linspace(xMin, xMax, 60)
for h in sorted(interfaceSizeDict):
#if h != 224 and h != 48 and h != 8 and h != 2:
# continue
#if h < 8:
# continue
for k, vL in interfaceSizeDict[h].items():
if k not in boxDist.keys():
boxDist[k] = []
for v in vL:
boxDist[k].append(v)
ax.plot(sorted(interfaceSizeDict[h].keys()), [np.mean(t) for k, t in sorted(interfaceSizeDict[h].items())], "o--", label="total neighbour: " + str(h))
#for i, tsList in interfaceSizeDict[h].items():
# ii = [i for p in tsList]
# ax.plot(ii, tsList, "+")
# Estimate
y = [B[0] + B[1] * np.log(h) + B[2] * np.log(i) for i in x]
ax.plot(x, y, "-", label="total neighbour: " + str(h))
#Best = [-2.11062418, 1.41218773, -1.39434254]
#y = [np.exp(Best[0]) * np.power(h, Best[1] + Best[2]) * np.power(i / h, Best[2]) for i in x]
#ax.plot(x, y, "-", label="total neighbour: " + str(h))
ax.boxplot(boxDist.values(), positions=boxDist.keys())
plt.xlabel('max interface size')
plt.legend()
plt.title('synchronized time model')
plt.ylabel('Time')
'''
(1, 1, 64, 123) - (0)
(1, 2, 32, 111) - (2.0)
(1, 4, 16, 123) - (6.0)
(1, 8, 8, 111) - (14.0)
(1, 16, 4, 123) - (30.0)
(1, 32, 2, 111) - (62.0)
(1, 64, 1, 138) - (126.0)
(2, 2, 16, 123) - (8.0)
(2, 4, 8, 111) - (20.0)
(2, 8, 4, 123) - (44.0)
(2, 16, 2, 111) - (92.0)
(2, 32, 1, 105) - (188.0)
(4, 4, 4, 123) - (48.0)
(4, 8, 2, 111) - (104.0)
(4, 16, 1, 105) - (216.0)
(8, 8, 1, 105) - (224.0)
'''
'''
# Trace la courbe en fonction de h
bx = fig.add_subplot(212)
# bx = fig.add_subplot(111)
x = np.linspace(0, 256, 10)
for i in sorted(neighbourhoodDict):
bx.plot(sorted(neighbourhoodDict[i].keys()), [np.mean(t) for k, t in sorted(neighbourhoodDict[i].items())], "o-", label="interfacesize: " + str(i))
# Estimate
#y = [lambdaValue + deltaValue * np.log(h) + thetaValue * np.log(i) for h in x]
# bx.plot(x, y, "--")
plt.xlabel('total neighbour number')
#plt.legend(loc='upper left')
plt.title('synchronized time model')
plt.ylabel('Time')
'''
'''
for i in varYDict.keys():
errbarp = {}
errbarm = {}
for h, v in varYDict[i].items():
print i, h, v
x = h
yp = h*(lambdaValue + deltaValue*i) + v * sigma * sigma
ym = h*(lambdaValue + deltaValue*i) - v * sigma * sigma
errbarp[x] = yp
errbarm[x] = ym
bx.plot(errbarp.keys(), errbarp.values(), "+")
bx.plot(errbarm.keys(), errbarm.values(), "x")
positionList = []
valueList = []
for s in sorted(neighbourhoodDict):
for k in neighbourhoodDict[s].keys():
positionList.append(k)
for k in neighbourhoodDict[s].values():
valueList.append(k)
bx.boxplot(valueList, positions=positionList)
plt.legend(loc='upper left')
plt.title('synchronized time model')
# Trace la courbe en fonction de predict
cx = fig.add_subplot(111)
cx.plot(X,Y,"+")
cx.plot(X,X,"--")
'''
plt.show()
return B
# Définition du cas d'étude
filterDict = {'nSizeX' : 512, 'nSizeY' : 512}
resource = 64
data = parser.getData(filterDict)
if not len(data):
print("Aucune données.")
sys.exit(1)
#pprint(data)
# Estimation de Tc sur un seul point de fonctionnement.
expectTc = chargeEstimate(data, resource)
print("E[Tc] = %s milliseconds per (iteration x cell number)" % (expectTc))
# Estimation des paramètres du modèle de Ts en fonction de plusieurs cas
greeks = greeksEstimate(data, resource)
print("Greeks: ", greeks)
# Estimation de Ts par point de fonctionnement.
expectTsDict = synchronizeEstimate(data, resource)
for point, expectTs in expectTsDict.items():
print("E[Ts] = %s milliseconds per (iteration x cell number) - %s - (%s)" % (expectTs, point, computeNeighborNumber(point[0],point[1])))
minTuple = sorted(expectTsDict.items(), key=operator.itemgetter(1))[1]
print("min(E[Ts]) value = %s on point: %s" % (minTuple[1], minTuple[0]))
maxTuple = sorted(expectTsDict.items(), key=operator.itemgetter(1))[-1]
print("max(E[Ts]) value = %s on point: %s" % (maxTuple[1], maxTuple[0]))
# Estimation de T par point de fonctionnement.
expectTtotDict = expectation(data, 'loopTime', str(resource) + ':1:4')
minPoint = (0,0,0)
minValue = 1e300
for point, expectTot in expectTtotDict.items():
if expectTot < minValue:
minValue = expectTot
minPoint = point
print("E[T] = %s milliseconds per (iteration x cell number) - %s - (%s)" % (expectTot, point, computeNeighborNumber(point[0],point[1])))
print("min(E[T]) value = %s on point: %s" % (minValue, minPoint))
# Estimation de Tt par point de fonctionnement.
expectTtDict = {}
for point, expectTot in expectTtotDict.items():
expectTt = expectTot - expectTc / (point[0] * point[1] * point[2]) - expectTsDict[point]
# print "E[Tt] = %s - %s / %s - %s = %s" % (expectTot, expectTc, (point[0] * point[1] * point[2]), expectTsDict[point], expectTt)
expectTtDict[point] = expectTt
print("E[Tt] = %s milliseconds per (iteration x cell number) - %s - (%s)" % (expectTt, point, computeNeighborNumber(point[0],point[1])))
minTuple = sorted(expectTtDict.items(), key=operator.itemgetter(1))[0]
print("min(E[Tt]) value = %s on point: %s" % (minTuple[1], minTuple[0]))
maxTuple = sorted(expectTtDict.items(), key=operator.itemgetter(1))[-1]
print("max(E[Tt]) value = %s on point: %s" % (maxTuple[1], maxTuple[0]))
# Estimation de Tmod par point de fonctionnement.
expectTmodDict = {}
expectTsmodDict = {}
for point, expectTt in expectTtDict.items():
nx = point[0]
ny = point[1]
h = computeNeighborNumber(nx, ny)
interface_size = filterDict['nSizeX'] + filterDict['nSizeY']
estimate = np.exp(greeks[0]) * np.power(h, greeks[1] + greeks[2]) * np.power(interface_size, greeks[2])
estimateMod = estimate * np.power(2, 2 + greeks[2])
expectTsmod = estimateMod
#print(point, "expectTsmod:", expectTsmod, "expectTs:", expectTsDict[point], "estimateTs:", estimate, "estimateTsmod:", estimateMod)
expectTsmodDict[point] = expectTsmod
expectTmodDict[point] = expectTt + expectTsmod + expectTc / (point[0] * point[1] * point[2])
print("E[Tm] = %s milliseconds per (iteration x cell number) - %s - (%s)" % (expectTmodDict[point], point, computeNeighborNumber(point[0], point[1])))
minTuple = sorted(expectTmodDict.items(), key=operator.itemgetter(1))[0]
print("min(E[Tm]) value = %s on point: %s" % (minTuple[1], minTuple[0]))
# Final Plot
fig = plt.figure(0, figsize=(9, 6))
ax = fig.add_subplot(111)
#ax.set_xscale('log', basex=2)
x = np.linspace(1, 128, 40)
coeff = 0.5
y = [expectTc / (point[0] * point[1] * point[2]) * coeff * (p - 1.0) / (p - coeff * (p - 1.0)) for p in x]
#ax.plot(x, y, "--", label='ideal')
plotTtDict = {}
for k, v in expectTtDict.items():
if k[2] not in plotTtDict.keys():
plotTtDict[k[2]] = []
plotTtDict[k[2]].append(v)
ax.boxplot(plotTtDict.values(), positions=plotTtDict.keys())
ax.plot(sorted(plotTtDict.keys()), [np.mean(v) for k, v in sorted(plotTtDict.items())], label='E[Tt]')
plotTsDict = {}
for k, v in expectTsDict.items():
if k[2] not in plotTsDict.keys():
plotTsDict[k[2]] = []
plotTsDict[k[2]].append(v)
ax.boxplot(plotTsDict.values(), positions=plotTsDict.keys())
ax.plot(sorted(plotTsDict.keys()), [np.mean(v) for k, v in sorted(plotTsDict.items())], label='E[Ts]')
plotTsmodDict = {}
for k, v in expectTsmodDict.items():
if k[2] not in plotTsmodDict.keys():
plotTsmodDict[k[2]] = []
plotTsmodDict[k[2]].append(v)
ax.boxplot(plotTsmodDict.values(), positions=plotTsmodDict.keys())
ax.plot(sorted(plotTsmodDict.keys()), [np.mean(v) for k, v in sorted(plotTsmodDict.items())], label='E[TsMod]')
plt.legend(loc='upper right')
plt.show()
'''
# Estimation de T_c avec des données sur 1 coeur
On modélise T_c comme un processus gaussien stationnaire. On estime avec 30 tirages sa moyenne et sa variance.
(comme c'est stationnaire, quelque soit le point de fonctionnement, la moyenne et la variance sont les m\^emes)
Test Khi2 ou visuel avec l'histogramme.
# Trace l'histogramme
from pandas import Series
values = Series(sample)
values.hist(bins=20, alpha=0.3, color='k', normed= True)
values.plot(kind='kde', style='k--')
plt.show()
Deuxième étape:
- estimation du bruit sur T_s (voir pour tous les nx, ny) avec moindre carrés. (parce qu'il n'est pas stationnaire).
- calibration "traditionnelle" (par minimisation des moindres carrés) du modèle pour $T_s$ ($lambda$ et $theta$ dans le cas différent de 0)
- Leave one out pour valider
Troisième étape:
'''
|
mit
|
simon-pepin/scikit-learn
|
sklearn/tests/test_calibration.py
|
213
|
12219
|
# Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
|
bsd-3-clause
|
aetilley/scikit-learn
|
examples/ensemble/plot_gradient_boosting_oob.py
|
230
|
4762
|
"""
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
|
bsd-3-clause
|
ldirer/scikit-learn
|
sklearn/utils/tests/test_testing.py
|
7
|
8098
|
import warnings
import unittest
import sys
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (
assert_raises,
assert_less,
assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message,
assert_allclose_dense_sparse,
ignore_warnings)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
def test_assert_less():
assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
def test_assert_greater():
assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_allclose_dense_sparse():
x = np.arange(9).reshape(3, 3)
msg = "Not equal to tolerance "
y = sparse.csc_matrix(x)
for X in [x, y]:
# basic compare
assert_raise_message(AssertionError, msg, assert_allclose_dense_sparse,
X, X * 2)
assert_allclose_dense_sparse(X, X)
assert_raise_message(ValueError, "Can only compare two sparse",
assert_allclose_dense_sparse, x, y)
A = sparse.diags(np.ones(5), offsets=0).tocsr()
B = sparse.csr_matrix(np.ones((1, 5)))
assert_raise_message(AssertionError, "Arrays are not equal",
assert_allclose_dense_sparse, B, A)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
def test_ignore_warning():
# This check that ignore_warning decorateur and context manager are working
# as expected
def _warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
def _multiple_warning_function():
warnings.warn("deprecation warning", DeprecationWarning)
warnings.warn("deprecation warning")
# Check the function directly
assert_no_warnings(ignore_warnings(_warning_function))
assert_no_warnings(ignore_warnings(_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning, ignore_warnings(_warning_function,
category=UserWarning))
assert_warns(UserWarning,
ignore_warnings(_multiple_warning_function,
category=DeprecationWarning))
assert_warns(DeprecationWarning,
ignore_warnings(_multiple_warning_function,
category=UserWarning))
assert_no_warnings(ignore_warnings(_warning_function,
category=(DeprecationWarning,
UserWarning)))
# Check the decorator
@ignore_warnings
def decorator_no_warning():
_warning_function()
_multiple_warning_function()
@ignore_warnings(category=(DeprecationWarning, UserWarning))
def decorator_no_warning_multiple():
_multiple_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_warning():
_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_warning():
_warning_function()
@ignore_warnings(category=DeprecationWarning)
def decorator_no_deprecation_multiple_warning():
_multiple_warning_function()
@ignore_warnings(category=UserWarning)
def decorator_no_user_multiple_warning():
_multiple_warning_function()
assert_no_warnings(decorator_no_warning)
assert_no_warnings(decorator_no_warning_multiple)
assert_no_warnings(decorator_no_deprecation_warning)
assert_warns(DeprecationWarning, decorator_no_user_warning)
assert_warns(UserWarning, decorator_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, decorator_no_user_multiple_warning)
# Check the context manager
def context_manager_no_warning():
with ignore_warnings():
_warning_function()
def context_manager_no_warning_multiple():
with ignore_warnings(category=(DeprecationWarning, UserWarning)):
_multiple_warning_function()
def context_manager_no_deprecation_warning():
with ignore_warnings(category=DeprecationWarning):
_warning_function()
def context_manager_no_user_warning():
with ignore_warnings(category=UserWarning):
_warning_function()
def context_manager_no_deprecation_multiple_warning():
with ignore_warnings(category=DeprecationWarning):
_multiple_warning_function()
def context_manager_no_user_multiple_warning():
with ignore_warnings(category=UserWarning):
_multiple_warning_function()
assert_no_warnings(context_manager_no_warning)
assert_no_warnings(context_manager_no_warning_multiple)
assert_no_warnings(context_manager_no_deprecation_warning)
assert_warns(DeprecationWarning, context_manager_no_user_warning)
assert_warns(UserWarning, context_manager_no_deprecation_multiple_warning)
assert_warns(DeprecationWarning, context_manager_no_user_multiple_warning)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
# `clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
|
bsd-3-clause
|
AndreLamurias/IBEnt
|
src/classification/results.py
|
1
|
22825
|
import io
import logging
import cPickle as pickle
import os
import time
import argparse
import sys
from sklearn.dummy import DummyClassifier
from sklearn.externals import joblib
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
sys.path.append(os.path.abspath(os.path.dirname(__file__) + '../..'))
from config.corpus_paths import paths
from text.corpus import Corpus
from config import config
from text.offset import Offset, perfect_overlap, contained_by, Offsets
SINGLE_TAG = "single"
START_TAG = "start"
END_TAG = "end"
MIDDLE_TAG = "middle"
OTHER_TAG = "other"
class ResultsRE(object):
def __init__(self, name):
self.pairs = {}
self.name = name
self.corpus = None
self.document_pairs = {}
def save(self, path):
# no need to save the whole corpus, only the entities of each sentence are necessary
# because the full corpus is already saved on a diferent pickle
logging.info("Saving results to {}".format(path))
reduced_corpus = {}
npairs = 0
for did in self.corpus.documents:
self.document_pairs[did] = self.corpus.documents[did].pairs
npairs += len(self.document_pairs[did].pairs)
reduced_corpus[did] = {}
for sentence in self.corpus.documents[did].sentences:
reduced_corpus[did][sentence.sid] = sentence.entities
self.corpus = reduced_corpus
pickle.dump(self, open(path, "wb"))
def load_corpus(self, goldstd):
logging.info("loading corpus %s" % paths[goldstd]["corpus"])
corpus = pickle.load(open(paths[goldstd]["corpus"]))
for did in corpus.documents:
for sentence in corpus.documents[did].sentences:
sentence.entities = self.corpus[did][sentence.sid]
corpus.documents[did].pairs = self.document_pairs[did]
#for entity in sentence.entities.elist[options.models]:
# print entity.chebi_score,
self.corpus = corpus
def convert_to(self, output_format, output_path, eset):
if output_format == "brat":
self.convert_to_brat(output_path, eset)
def convert_to_brat(self, output_path, eset):
if not os.path.exists(output_path):
os.makedirs(output_path)
for did in self.corpus.documents:
eid_map = {}
with io.open("{}/{}.ann".format(output_path, did), "w", encoding='utf-8') as output_file:
ecount = 1
for sentence in self.corpus.documents[did].sentences:
if eset in sentence.entities.elist:
print "writing...", eset
for entity in sentence.entities.elist[eset]:
eid_map[entity.eid] = "T{0}".format(ecount)
output_file.write(u"T{0}\t{1.type} {1.dstart} {1.dend}\t{1.text}\n".format(ecount, entity))
ecount += 1
rcount = 1
for p in self.document_pairs[did].pairs:
output_file.write(u"R{0}\tmiRNA-regulation Arg1:{1} Arg2:{2}\n".format(rcount,
eid_map[p.entities[0].eid],
eid_map[p.entities[1].eid]))
rcount += 1
class ResultsNER(object):
"""Store a set of entities related to a corpus or input text """
def __init__(self, name):
self.entities = {}
self.name = name
self.corpus = Corpus(self.name)
self.basedir = "models/ensemble/"
def get_ensemble_results(self, ensemble, corpus, model):
"""
Go through every entity in corpus and if it was predicted true by the ensemble, save to entities,
otherwise, delete it.
"""
for did in corpus.documents:
for sentence in corpus.documents[did].sentences:
new_entities = []
for entity in sentence.entities.elist[model]:
sentence_type = "A"
if sentence.sid.endswith("s0"):
sentence_type = "T"
id = (did, "{0}:{1}:{2}".format(sentence_type, entity.dstart, entity.dend), "1")
if id not in ensemble.ids:
logging.debug("this is new! {0}".format(entity))
continue
predicted_index = ensemble.ids.index(id)
#logging.info(predicted_index)
if ensemble.predicted[predicted_index][1] > 0.5:
self.entities[entity.eid] = entity
#logging.info("good entity: {}".format(entity.text.encode("utf8")))
new_entities.append(entity)
#else:
# logging.info("bad entity: {}".format(entity.text.encode("utf8")))
sentence.entities.elist[self.name] = new_entities
self.corpus = corpus
def save(self, path):
# no need to save the whole corpus, only the entities of each sentence are necessary
# because the full corpus is already saved on a diferent pickle
logging.info("Saving results to {}".format(path))
reduced_corpus = {}
for did in self.corpus.documents:
reduced_corpus[did] = {}
for sentence in self.corpus.documents[did].sentences:
reduced_corpus[did][sentence.sid] = sentence.entities
self.corpus = reduced_corpus
pickle.dump(self, open(path, "wb"))
def save_chemdner(self):
pass
def load_corpus(self, goldstd):
logging.info("loading corpus %s" % paths[goldstd]["corpus"])
corpus = pickle.load(open(paths[goldstd]["corpus"]))
for did in corpus.documents:
if did not in self.corpus:
logging.info("no results for {}".format(did))
continue
for sentence in corpus.documents[did].sentences:
sentence.entities = self.corpus[did][sentence.sid]
#for entity in sentence.entities.elist[options.models]:
# print entity.chebi_score,
self.corpus = corpus
def combine_results(self, basemodel, name):
# add another set of anotations to each sentence, ending in combined
# each entity from this dataset should have a unique ID and a recognized_by attribute
scores = 0
total = 0
for did in self.corpus.documents:
#logging.debug(did)
for sentence in self.corpus.documents[did].sentences:
#logging.debug(sentence.sid)
sentence.entities.combine_entities(basemodel, name)
#for e in sentence.entities.elist[name]:
# total += 1
#logging.info("{} - {}".format(e.text, e.score))
# if len(e.recognized_by) > 1:
# scores += sum(e.score.values())/len(e.score.values())
# elif len == 1:
# scores += e.score.values()[0]
#if e.score < 0.8:
# logging.info("{0} score of {1}".format(e.text.encode("utf-8"),
# e.score))
if total > 0:
logging.info("{0} entities average confidence of {1}".format(total, scores/total))
def add_results(self, results):
all_models = set()
# merge the results of this set with another set
dids = set(self.corpus.documents.keys()).union(set(results.corpus.documents.keys()))
for did in dids:
# one result set may contain more or less documents than this one
# in that case, simply add the document to the other result set
if did not in self.corpus.documents:
self.corpus.documents[did] = results.corpus.document[did]
elif did not in results.corpus.documents:
results.corpus.documents[did] = self.corpus.documents[did]
else: # merge entities
for sentence in results.corpus.documents[did].sentences:
base_sentence = self.corpus.documents[did].get_sentence(sentence.sid)
# add every new model in the new result set to this one
for model in sentence.entities.elist:
if model != "goldstandard" and model not in base_sentence.entities.elist:
base_sentence.entities.elist[model] = sentence.entities.elist[model]
all_models = all_models.union(set(base_sentence.entities.elist.keys()))
# print all_models
def train_ensemble(self, pipeline, modelname, etype):
train_data, labels, offsets = self.generate_data(etype)
print "training ensemble classifier..."
pipeline = pipeline.fit(train_data, labels)
if not os.path.exists(self.basedir + modelname):
os.makedirs(self.basedir + modelname)
logging.info("Training complete, saving to {}/{}/{}.pkl".format(self.basedir, modelname, modelname))
joblib.dump(pipeline, "{}/{}/{}.pkl".format(self.basedir, modelname, modelname))
def test_ensemble(self, pipeline, modelname, etype):
train_data, labels, offsets = self.generate_data(etype, mode="test")
pred = pipeline.predict(train_data)
print pred
for i, p in enumerate(pred):
if p == True:
sentence = self.corpus.get_sentence(offsets.keys()[i][0])
sentence.tag_entity(offsets.keys()[i][1], offsets.keys()[i][2], etype, source=modelname)
def generate_data(self, etype, mode="train"):
"""
Use scikit to train a pipeline to classify entities as correct or incorrect
features consist in the classifiers that identified the entity
:param modelname:
:return:
"""
offsets = {}
features = set()
gs_labels = {}
# collect offsets from every model (except gold standard) and add classifier score
all_models = set()
# merge the results of this set with another set
for did in self.corpus.documents:
# logging.debug(did)
for sentence in self.corpus.documents[did].sentences:
for s in sentence.entities.elist:
# logging.info("%s - %s" % (self.sid, s))
# use everything except what's already combined and gold standard
if not s.startswith("goldstandard") and s.endswith(etype):
features.add(s)
for e in sentence.entities.elist[s]:
# if any([word in e.text for word in self.stopwords]):
# logging.info("ignored stopword %s" % e.text)
# continue
# eid_alt = e.sid + ":" + str(e.dstart) + ':' + str(e.dend)
#next_eid = "{0}.e{1}".format(e.sid, len(combined))
#eid_offset = Offset(e.dstart, e.dend, text=e.text, sid=e.sid, eid=next_eid)
# check for perfect overlaps only
offset = (sentence.sid, e.start, e.end)
if offset not in offsets:
offsets[offset] = {}
offsets[offset][s] = e.score
elif mode == "train" and s == "goldstandard_" + etype:
for e in sentence.entities.elist[s]:
offset = (sentence.sid, e.start, e.end)
gs_labels[offset] = True
train_data = []
train_labels = []
features = sorted(list(features))
for o in offsets:
of = []
for f in features:
if f in offsets[o]:
of.append(offsets[o][f])
else:
of.append(0)
train_data.append(of)
if mode == "train" and gs_labels.get(o) == True:
train_labels.append(True)
else:
train_labels.append(False)
# print features
# for i, l in enumerate(train_labels[:10]):
# print train_data[i], l
return train_data, train_labels, offsets
def convert_to(self, output_format, output_path, eset):
if output_format == "brat":
self.convert_to_brat(output_path, eset)
def convert_to_brat(self, output_path, eset):
if not os.path.exists(output_path):
os.makedirs(output_path)
for did in self.corpus.documents:
with io.open("{}/{}.ann".format(output_path, did), "w", encoding='utf-8') as output_file:
ecount = 0
for sentence in self.corpus.documents[did].sentences:
if eset in sentence.entities.elist:
print "writing...", eset
for entity in sentence.entities.elist[eset]:
output_file.write(u"T{0}\t{1.type} {1.dstart} {1.dend}\t{1.text}\n".format(ecount, entity))
ecount += 1
def import_chemdner(self, filepath):
with io.open(filepath, encoding="utf-8") as inputfile:
next(inputfile)
for l in inputfile:
values = l.split("\t")
did = values[0]
sectionid = values[1]
# print l
start, end, text = int(values[2]), int(values[3]), values[5]
confidence = values[4]
if did in self.corpus.documents:
entity = self.corpus.documents[did].tag_chemdner_entity(start, end, "unknown", source=self.model,
text=text, confidence=confidence, doct=sectionid, score=1)
if entity:
self.entities[entity.eid] = entity
#for d in self.corpus.documents:
# for s in self.corpus.documents[d].sentences:
# print s.entities.elist.keys()
class ResultSetNER(object):
"""
Organize and process a set a results from a TaggerCollection
"""
def __init__(self, corpus, basepath):
self.results = [] # list of ResultsNER
self.corpus = corpus
self.basepath = basepath
def add_results(self, res):
self.results.append(res)
def combine_results(self):
"""
Combine the results from multiple classifiers stored in self.results.
Process these results, and generate a ResultsNER object
:return: ResultsNER object of the combined results of the classifiers
"""
final_results = ResultsNER(self.basepath)
final_results.corpus = self.corpus
return final_results
def combine_results(modelname, results, resultsname, etype, models):
all_results = ResultsNER(resultsname)
# first results are used as reference
all_results.corpus = results[0].corpus
for r in results:
print r.path
for did in r.corpus.documents:
for sentence in r.corpus.documents[did].sentences:
ref_sentence = all_results.corpus.documents[did].get_sentence(sentence.sid)
if sentence.entities:
offsets = Offsets()
if modelname not in ref_sentence.entities.elist:
all_results.corpus.documents[did].get_sentence(sentence.sid).entities.elist[modelname] = []
for s in sentence.entities.elist:
# print s
if s in models:
# print s
for e in sentence.entities.elist[s]:
if e.type == etype:
eid_offset = Offset(e.dstart, e.dend, text=e.text, sid=e.sid)
exclude = [perfect_overlap]
toadd, v, overlapping, to_exclude = offsets.add_offset(eid_offset, exclude_this_if=exclude, exclude_others_if=[])
if toadd:
# print "added:", r.path, s, e.text
ref_sentence.entities.elist[modelname].append(e)
return all_results
def main():
start_time = time.time()
parser = argparse.ArgumentParser(description='')
parser.add_argument("action", default="evaluate", help="Actions to be performed.")
parser.add_argument("goldstd", default="chemdner_sample", help="Gold standard to be used.",
choices=paths.keys())
parser.add_argument("--corpus", dest="corpus",
default="data/chemdner_sample_abstracts.txt.pickle",
help="format path")
parser.add_argument("--results", dest="results", help="Results object pickle.", nargs='+')
parser.add_argument("--models", dest="models", help="model destination path, without extension", nargs='+')
parser.add_argument("--finalmodel", dest="finalmodel", help="model destination path, without extension") #, nargs='+')
parser.add_argument("--ensemble", dest="ensemble", help="name/path of ensemble classifier", default="combined")
parser.add_argument("--log", action="store", dest="loglevel", default="WARNING", help="Log level")
parser.add_argument("-o", "--output", action="store", dest="output")
parser.add_argument("--submodels", default="", nargs='+', help="sub types of classifiers"),
parser.add_argument("--features", default=["chebi", "case", "number", "greek", "dashes", "commas", "length", "chemwords", "bow"],
nargs='+', help="aditional features for ensemble classifier")
parser.add_argument("--doctype", dest="doctype", help="type of document to be considered", default="all")
parser.add_argument("--entitytype", dest="etype", help="type of entities to be considered", default="all")
parser.add_argument("--external", action="store_true", default=False, help="Run external evaluation script, depends on corpus type")
parser.add_argument("-i", "--input", action="store", help="input file to be convert to IBEnt results.")
options = parser.parse_args()
numeric_level = getattr(logging, options.loglevel.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % options.loglevel)
while len(logging.root.handlers) > 0:
logging.root.removeHandler(logging.root.handlers[-1])
logging_format = '%(asctime)s %(levelname)s %(filename)s:%(lineno)s:%(funcName)s %(message)s'
logging.basicConfig(level=numeric_level, format=logging_format)
logging.getLogger().setLevel(numeric_level)
logging.info("Processing action {0} on {1}".format(options.action, options.goldstd))
logging.info("loading results %s" % options.results + ".pickle")
results_list = []
for r in options.results:
if os.path.exists(r + ".pickle"):
results = pickle.load(open(r + ".pickle", 'rb'))
results.path = r
results.load_corpus(options.goldstd)
results_list.append(results)
else:
print "results not found"
if options.action == "combine":
# add another set of annotations to each sentence, ending in combined
# each entity from this dataset should have a unique ID and a recognized_by attribute
logging.info("combining results...")
#new_name = "_".join([m.split("/")[-1] for m in options.results])
#print new_name
results = combine_results(options.finalmodel, results_list, options.output, options.etype, options.models)
results.save(options.output + ".pickle")
if options.action == "import":
# import results from a different format to IBEnt
# for now assume CHEMDNER format
results = ResultsNER(options.results[0])
logging.info("loading corpus...")
results.corpus = pickle.load(open(paths[options.goldstd]["corpus"]))
results.model = options.models[0]
results.import_chemdner(options.input)
results.save(results.name + ".pickle")
"""elif options.action in ("train_ensemble", "test_ensemble"):
if "annotations" in config.paths[options.goldstd]:
logging.info("loading gold standard %s" % config.paths[options.goldstd]["annotations"])
goldset = get_gold_ann_set(config.paths[options.goldstd]["format"], config.paths[options.goldstd]["annotations"],
options.etype, config.paths[options.goldstd]["text"])
else:
goldset = None
logging.info("using thresholds: chebi > {!s} ssm > {!s}".format(options.chebi, options.ssm))
results.load_corpus(options.goldstd)
results.path = options.results
ths = {"chebi": options.chebi, "ssm": options.ssm}
if "ensemble" in options.action:
if len(options.submodels) > 1:
submodels = []
for s in options.submodels:
submodels += ['_'.join(options.models.split("_")[:-1]) + "_" + s + "_" + t for t in results.corpus.subtypes]
else:
submodels = ['_'.join(options.models.split("_")[:-1]) + "_" + t for t in results.corpus.subtypes]
logging.info("using these features: {}".format(' '.join(submodels)))
if options.action == "train_ensemble":
ensemble = EnsembleNER(options.ensemble, goldset, options.models, types=submodels,
features=options.features)
ensemble.generate_data(results)
ensemble.train()
ensemble.save()
if options.action == "test_ensemble":
ensemble = EnsembleNER(options.ensemble, [], options.models, types=submodels,
features=options.features)
ensemble.load()
ensemble.generate_data(results, supervisioned=False)
ensemble.test()
ensemble_results = ResultsNER(options.models + "_ensemble")
# process the results
ensemble_results.get_ensemble_results(ensemble, results.corpus, options.models)
ensemble_results.path = options.results + "_ensemble"
get_results(ensemble_results, options.models + "_ensemble", goldset, ths, options.rules)"""
total_time = time.time() - start_time
logging.info("Total time: %ss" % total_time)
if __name__ == "__main__":
main()
|
mit
|
kimoonkim/spark
|
python/setup.py
|
10
|
9500
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import glob
import os
import sys
from setuptools import setup, find_packages
from shutil import copyfile, copytree, rmtree
if sys.version_info < (2, 7):
print("Python versions prior to 2.7 are not supported for pip installed PySpark.",
file=sys.stderr)
exit(-1)
try:
exec(open('pyspark/version.py').read())
except IOError:
print("Failed to load PySpark version file for packaging. You must be in Spark's python dir.",
file=sys.stderr)
sys.exit(-1)
VERSION = __version__
# A temporary path so we can access above the Python project root and fetch scripts and jars we need
TEMP_PATH = "deps"
SPARK_HOME = os.path.abspath("../")
# Provide guidance about how to use setup.py
incorrect_invocation_message = """
If you are installing pyspark from spark source, you must first build Spark and
run sdist.
To build Spark with maven you can run:
./build/mvn -DskipTests clean package
Building the source dist is done in the Python directory:
cd python
python setup.py sdist
pip install dist/*.tar.gz"""
# Figure out where the jars are we need to package with PySpark.
JARS_PATH = glob.glob(os.path.join(SPARK_HOME, "assembly/target/scala-*/jars/"))
if len(JARS_PATH) == 1:
JARS_PATH = JARS_PATH[0]
elif (os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1):
# Release mode puts the jars in a jars directory
JARS_PATH = os.path.join(SPARK_HOME, "jars")
elif len(JARS_PATH) > 1:
print("Assembly jars exist for multiple scalas ({0}), please cleanup assembly/target".format(
JARS_PATH), file=sys.stderr)
sys.exit(-1)
elif len(JARS_PATH) == 0 and not os.path.exists(TEMP_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
EXAMPLES_PATH = os.path.join(SPARK_HOME, "examples/src/main/python")
SCRIPTS_PATH = os.path.join(SPARK_HOME, "bin")
DATA_PATH = os.path.join(SPARK_HOME, "data")
LICENSES_PATH = os.path.join(SPARK_HOME, "licenses")
SCRIPTS_TARGET = os.path.join(TEMP_PATH, "bin")
JARS_TARGET = os.path.join(TEMP_PATH, "jars")
EXAMPLES_TARGET = os.path.join(TEMP_PATH, "examples")
DATA_TARGET = os.path.join(TEMP_PATH, "data")
LICENSES_TARGET = os.path.join(TEMP_PATH, "licenses")
# Check and see if we are under the spark path in which case we need to build the symlink farm.
# This is important because we only want to build the symlink farm while under Spark otherwise we
# want to use the symlink farm. And if the symlink farm exists under while under Spark (e.g. a
# partially built sdist) we should error and have the user sort it out.
in_spark = (os.path.isfile("../core/src/main/scala/org/apache/spark/SparkContext.scala") or
(os.path.isfile("../RELEASE") and len(glob.glob("../jars/spark*core*.jar")) == 1))
def _supports_symlinks():
"""Check if the system supports symlinks (e.g. *nix) or not."""
return getattr(os, "symlink", None) is not None
if (in_spark):
# Construct links for setup
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
exit(-1)
try:
# We copy the shell script to be under pyspark/python/pyspark so that the launcher scripts
# find it where expected. The rest of the files aren't copied because they are accessed
# using Python imports instead which will be resolved correctly.
try:
os.makedirs("pyspark/python/pyspark")
except OSError:
# Don't worry if the directory already exists.
pass
copyfile("pyspark/shell.py", "pyspark/python/pyspark/shell.py")
if (in_spark):
# Construct the symlink farm - this is necessary since we can't refer to the path above the
# package root and we need to copy the jars and scripts which are up above the python root.
if _supports_symlinks():
os.symlink(JARS_PATH, JARS_TARGET)
os.symlink(SCRIPTS_PATH, SCRIPTS_TARGET)
os.symlink(EXAMPLES_PATH, EXAMPLES_TARGET)
os.symlink(DATA_PATH, DATA_TARGET)
os.symlink(LICENSES_PATH, LICENSES_TARGET)
else:
# For windows fall back to the slower copytree
copytree(JARS_PATH, JARS_TARGET)
copytree(SCRIPTS_PATH, SCRIPTS_TARGET)
copytree(EXAMPLES_PATH, EXAMPLES_TARGET)
copytree(DATA_PATH, DATA_TARGET)
copytree(LICENSES_PATH, LICENSES_TARGET)
else:
# If we are not inside of SPARK_HOME verify we have the required symlink farm
if not os.path.exists(JARS_TARGET):
print("To build packaging must be in the python directory under the SPARK_HOME.",
file=sys.stderr)
if not os.path.isdir(SCRIPTS_TARGET):
print(incorrect_invocation_message, file=sys.stderr)
exit(-1)
# Scripts directive requires a list of each script path and does not take wild cards.
script_names = os.listdir(SCRIPTS_TARGET)
scripts = list(map(lambda script: os.path.join(SCRIPTS_TARGET, script), script_names))
# We add find_spark_home.py to the bin directory we install so that pip installed PySpark
# will search for SPARK_HOME with Python.
scripts.append("pyspark/find_spark_home.py")
# Parse the README markdown file into rst for PyPI
long_description = "!!!!! missing pandoc do not upload to PyPI !!!!"
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except ImportError:
print("Could not import pypandoc - required to package PySpark", file=sys.stderr)
setup(
name='pyspark',
version=VERSION,
description='Apache Spark Python API',
long_description=long_description,
author='Spark Developers',
author_email='[email protected]',
url='https://github.com/apache/spark/tree/master/python',
packages=['pyspark',
'pyspark.mllib',
'pyspark.ml',
'pyspark.sql',
'pyspark.streaming',
'pyspark.bin',
'pyspark.jars',
'pyspark.python.pyspark',
'pyspark.python.lib',
'pyspark.data',
'pyspark.licenses',
'pyspark.examples.src.main.python'],
include_package_data=True,
package_dir={
'pyspark.jars': 'deps/jars',
'pyspark.bin': 'deps/bin',
'pyspark.python.lib': 'lib',
'pyspark.data': 'deps/data',
'pyspark.licenses': 'deps/licenses',
'pyspark.examples.src.main.python': 'deps/examples',
},
package_data={
'pyspark.jars': ['*.jar'],
'pyspark.bin': ['*'],
'pyspark.python.lib': ['*.zip'],
'pyspark.data': ['*.txt', '*.data'],
'pyspark.licenses': ['*.txt'],
'pyspark.examples.src.main.python': ['*.py', '*/*.py']},
scripts=scripts,
license='http://www.apache.org/licenses/LICENSE-2.0',
install_requires=['py4j==0.10.4'],
setup_requires=['pypandoc'],
extras_require={
'ml': ['numpy>=1.7'],
'mllib': ['numpy>=1.7'],
'sql': ['pandas']
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy']
)
finally:
# We only cleanup the symlink farm if we were in Spark, otherwise we are installing rather than
# packaging.
if (in_spark):
# Depending on cleaning up the symlink farm or copied version
if _supports_symlinks():
os.remove(os.path.join(TEMP_PATH, "jars"))
os.remove(os.path.join(TEMP_PATH, "bin"))
os.remove(os.path.join(TEMP_PATH, "examples"))
os.remove(os.path.join(TEMP_PATH, "data"))
os.remove(os.path.join(TEMP_PATH, "licenses"))
else:
rmtree(os.path.join(TEMP_PATH, "jars"))
rmtree(os.path.join(TEMP_PATH, "bin"))
rmtree(os.path.join(TEMP_PATH, "examples"))
rmtree(os.path.join(TEMP_PATH, "data"))
rmtree(os.path.join(TEMP_PATH, "licenses"))
os.rmdir(TEMP_PATH)
|
apache-2.0
|
gfyoung/pandas
|
pandas/tests/tseries/offsets/test_business_hour.py
|
2
|
41321
|
"""
Tests for offsets.BusinessHour
"""
from datetime import datetime, time as dt_time
import pytest
from pandas._libs.tslibs import Timedelta, Timestamp
from pandas._libs.tslibs.offsets import BDay, BusinessHour, Nano
from pandas import DatetimeIndex, _testing as tm, date_range
from pandas.tests.tseries.offsets.common import Base, assert_offset_equal
class TestBusinessHour(Base):
_offset = BusinessHour
def setup_method(self, method):
self.d = datetime(2014, 7, 1, 10, 00)
self.offset1 = BusinessHour()
self.offset2 = BusinessHour(n=3)
self.offset3 = BusinessHour(n=-1)
self.offset4 = BusinessHour(n=-4)
from datetime import time as dt_time
self.offset5 = BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))
self.offset6 = BusinessHour(start="20:00", end="05:00")
self.offset7 = BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))
self.offset8 = BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"])
self.offset9 = BusinessHour(
n=3, start=["09:00", "22:00"], end=["13:00", "03:00"]
)
self.offset10 = BusinessHour(
n=-1, start=["23:00", "13:00"], end=["02:00", "17:00"]
)
@pytest.mark.parametrize(
"start,end,match",
[
(
dt_time(11, 0, 5),
"17:00",
"time data must be specified only with hour and minute",
),
("AAA", "17:00", "time data must match '%H:%M' format"),
("14:00:05", "17:00", "time data must match '%H:%M' format"),
([], "17:00", "Must include at least 1 start time"),
("09:00", [], "Must include at least 1 end time"),
(
["09:00", "11:00"],
"17:00",
"number of starting time and ending time must be the same",
),
(
["09:00", "11:00"],
["10:00"],
"number of starting time and ending time must be the same",
),
(
["09:00", "11:00"],
["12:00", "20:00"],
r"invalid starting and ending time\(s\): opening hours should not "
"touch or overlap with one another",
),
(
["12:00", "20:00"],
["09:00", "11:00"],
r"invalid starting and ending time\(s\): opening hours should not "
"touch or overlap with one another",
),
],
)
def test_constructor_errors(self, start, end, match):
with pytest.raises(ValueError, match=match):
BusinessHour(start=start, end=end)
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset1) == "<BusinessHour: BH=09:00-17:00>"
assert repr(self.offset2) == "<3 * BusinessHours: BH=09:00-17:00>"
assert repr(self.offset3) == "<-1 * BusinessHour: BH=09:00-17:00>"
assert repr(self.offset4) == "<-4 * BusinessHours: BH=09:00-17:00>"
assert repr(self.offset5) == "<BusinessHour: BH=11:00-14:30>"
assert repr(self.offset6) == "<BusinessHour: BH=20:00-05:00>"
assert repr(self.offset7) == "<-2 * BusinessHours: BH=21:30-06:30>"
assert repr(self.offset8) == "<BusinessHour: BH=09:00-12:00,13:00-17:00>"
assert repr(self.offset9) == "<3 * BusinessHours: BH=09:00-13:00,22:00-03:00>"
assert repr(self.offset10) == "<-1 * BusinessHour: BH=13:00-17:00,23:00-02:00>"
def test_with_offset(self):
expected = Timestamp("2014-07-01 13:00")
assert self.d + BusinessHour() * 3 == expected
assert self.d + BusinessHour(n=3) == expected
@pytest.mark.parametrize(
"offset_name",
["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
)
def test_eq_attribute(self, offset_name):
offset = getattr(self, offset_name)
assert offset == offset
@pytest.mark.parametrize(
"offset1,offset2",
[
(BusinessHour(start="09:00"), BusinessHour()),
(
BusinessHour(start=["23:00", "13:00"], end=["12:00", "17:00"]),
BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
),
],
)
def test_eq(self, offset1, offset2):
assert offset1 == offset2
@pytest.mark.parametrize(
"offset1,offset2",
[
(BusinessHour(), BusinessHour(-1)),
(BusinessHour(start="09:00"), BusinessHour(start="09:01")),
(
BusinessHour(start="09:00", end="17:00"),
BusinessHour(start="17:00", end="09:01"),
),
(
BusinessHour(start=["13:00", "23:00"], end=["18:00", "07:00"]),
BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),
),
],
)
def test_neq(self, offset1, offset2):
assert offset1 != offset2
@pytest.mark.parametrize(
"offset_name",
["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],
)
def test_hash(self, offset_name):
offset = getattr(self, offset_name)
assert offset == offset
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset1(self.d) == datetime(2014, 7, 1, 11)
assert self.offset2(self.d) == datetime(2014, 7, 1, 13)
assert self.offset3(self.d) == datetime(2014, 6, 30, 17)
assert self.offset4(self.d) == datetime(2014, 6, 30, 14)
assert self.offset8(self.d) == datetime(2014, 7, 1, 11)
assert self.offset9(self.d) == datetime(2014, 7, 1, 22)
assert self.offset10(self.d) == datetime(2014, 7, 1, 1)
def test_sub(self):
# we have to override test_sub here because self.offset2 is not
# defined as self._offset(2)
off = self.offset2
msg = "Cannot subtract datetime from offset"
with pytest.raises(TypeError, match=msg):
off - self.d
assert 2 * off - off == off
assert self.d - self.offset2 == self.d + self._offset(-3)
def testRollback1(self):
assert self.offset1.rollback(self.d) == self.d
assert self.offset2.rollback(self.d) == self.d
assert self.offset3.rollback(self.d) == self.d
assert self.offset4.rollback(self.d) == self.d
assert self.offset5.rollback(self.d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(self.d) == datetime(2014, 7, 1, 5, 0)
assert self.offset7.rollback(self.d) == datetime(2014, 7, 1, 6, 30)
assert self.offset8.rollback(self.d) == self.d
assert self.offset9.rollback(self.d) == self.d
assert self.offset10.rollback(self.d) == datetime(2014, 7, 1, 2)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset2.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset3.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset4.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset5.rollback(d) == datetime(2014, 6, 30, 14, 30)
assert self.offset6.rollback(d) == d
assert self.offset7.rollback(d) == d
assert self.offset8.rollback(d) == datetime(2014, 6, 30, 17)
assert self.offset9.rollback(d) == d
assert self.offset10.rollback(d) == d
assert self._offset(5).rollback(self.d) == self.d
def testRollback2(self):
assert self._offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(
2014, 7, 4, 17, 0
)
def testRollforward1(self):
assert self.offset1.rollforward(self.d) == self.d
assert self.offset2.rollforward(self.d) == self.d
assert self.offset3.rollforward(self.d) == self.d
assert self.offset4.rollforward(self.d) == self.d
assert self.offset5.rollforward(self.d) == datetime(2014, 7, 1, 11, 0)
assert self.offset6.rollforward(self.d) == datetime(2014, 7, 1, 20, 0)
assert self.offset7.rollforward(self.d) == datetime(2014, 7, 1, 21, 30)
assert self.offset8.rollforward(self.d) == self.d
assert self.offset9.rollforward(self.d) == self.d
assert self.offset10.rollforward(self.d) == datetime(2014, 7, 1, 13)
d = datetime(2014, 7, 1, 0)
assert self.offset1.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset2.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset3.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset4.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset5.rollforward(d) == datetime(2014, 7, 1, 11)
assert self.offset6.rollforward(d) == d
assert self.offset7.rollforward(d) == d
assert self.offset8.rollforward(d) == datetime(2014, 7, 1, 9)
assert self.offset9.rollforward(d) == d
assert self.offset10.rollforward(d) == d
assert self._offset(5).rollforward(self.d) == self.d
def testRollforward2(self):
assert self._offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(
2014, 7, 7, 9
)
def test_roll_date_object(self):
offset = BusinessHour()
dt = datetime(2014, 7, 6, 15, 0)
result = offset.rollback(dt)
assert result == datetime(2014, 7, 4, 17)
result = offset.rollforward(dt)
assert result == datetime(2014, 7, 7, 9)
normalize_cases = []
normalize_cases.append(
(
BusinessHour(normalize=True),
{
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 1, 0): datetime(2014, 7, 1),
datetime(2014, 7, 4, 15): datetime(2014, 7, 4),
datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 7),
datetime(2014, 7, 6, 10): datetime(2014, 7, 7),
},
)
)
normalize_cases.append(
(
BusinessHour(-1, normalize=True),
{
datetime(2014, 7, 1, 8): datetime(2014, 6, 30),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30),
datetime(2014, 7, 1, 0): datetime(2014, 6, 30),
datetime(2014, 7, 7, 10): datetime(2014, 7, 4),
datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),
datetime(2014, 7, 5, 23): datetime(2014, 7, 4),
datetime(2014, 7, 6, 10): datetime(2014, 7, 4),
},
)
)
normalize_cases.append(
(
BusinessHour(1, normalize=True, start="17:00", end="04:00"),
{
datetime(2014, 7, 1, 8): datetime(2014, 7, 1),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 2),
datetime(2014, 7, 2, 2): datetime(2014, 7, 2),
datetime(2014, 7, 2, 3): datetime(2014, 7, 2),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5),
datetime(2014, 7, 5, 2): datetime(2014, 7, 5),
datetime(2014, 7, 7, 2): datetime(2014, 7, 7),
datetime(2014, 7, 7, 17): datetime(2014, 7, 7),
},
)
)
@pytest.mark.parametrize("case", normalize_cases)
def test_normalize(self, case):
offset, cases = case
for dt, expected in cases.items():
assert offset.apply(dt) == expected
on_offset_cases = []
on_offset_cases.append(
(
BusinessHour(),
{
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False,
},
)
)
on_offset_cases.append(
(
BusinessHour(start="10:00", end="15:00"),
{
datetime(2014, 7, 1, 9): False,
datetime(2014, 7, 1, 10): True,
datetime(2014, 7, 1, 15): True,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12): False,
datetime(2014, 7, 6, 12): False,
},
)
)
on_offset_cases.append(
(
BusinessHour(start="19:00", end="05:00"),
{
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False,
},
)
)
on_offset_cases.append(
(
BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"]),
{
datetime(2014, 7, 1, 9): True,
datetime(2014, 7, 1, 8, 59): False,
datetime(2014, 7, 1, 8): False,
datetime(2014, 7, 1, 17): True,
datetime(2014, 7, 1, 17, 1): False,
datetime(2014, 7, 1, 18): False,
datetime(2014, 7, 5, 9): False,
datetime(2014, 7, 6, 12): False,
datetime(2014, 7, 1, 12, 30): False,
},
)
)
on_offset_cases.append(
(
BusinessHour(start=["19:00", "23:00"], end=["21:00", "05:00"]),
{
datetime(2014, 7, 1, 9, 0): False,
datetime(2014, 7, 1, 10, 0): False,
datetime(2014, 7, 1, 15): False,
datetime(2014, 7, 1, 15, 1): False,
datetime(2014, 7, 5, 12, 0): False,
datetime(2014, 7, 6, 12, 0): False,
datetime(2014, 7, 1, 19, 0): True,
datetime(2014, 7, 2, 0, 0): True,
datetime(2014, 7, 4, 23): True,
datetime(2014, 7, 5, 1): True,
datetime(2014, 7, 5, 5, 0): True,
datetime(2014, 7, 6, 23, 0): False,
datetime(2014, 7, 7, 3, 0): False,
datetime(2014, 7, 4, 22): False,
},
)
)
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, cases = case
for dt, expected in cases.items():
assert offset.is_on_offset(dt) == expected
apply_cases = [
(
BusinessHour(),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
},
),
(
BusinessHour(4),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),
},
),
(
BusinessHour(-1),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),
datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),
# out of business hours
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),
},
),
(
BusinessHour(-4),
{
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),
},
),
(
BusinessHour(start="13:00", end="16:00"),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),
datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
},
),
(
BusinessHour(n=2, start="13:00", end="16:00"),
{
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),
datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),
},
),
(
BusinessHour(n=-1, start="13:00", end="16:00"),
{
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),
datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),
},
),
(
BusinessHour(n=-3, start="10:00", end="16:00"),
{
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),
datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),
datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),
datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),
datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),
datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),
},
),
(
BusinessHour(start="19:00", end="05:00"),
{
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),
datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),
datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),
datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),
datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),
datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),
datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),
},
),
(
BusinessHour(n=-1, start="19:00", end="05:00"),
{
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),
},
),
(
BusinessHour(n=4, start="00:00", end="23:00"),
{
datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),
datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),
datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),
datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),
datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),
datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),
},
),
(
BusinessHour(n=-4, start="00:00", end="23:00"),
{
datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),
datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),
datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),
datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),
datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),
datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),
},
),
(
BusinessHour(start=["09:00", "14:00"], end=["12:00", "18:00"]),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),
datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),
datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),
# out of business hours
datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),
# saturday
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),
datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),
datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),
},
),
(
BusinessHour(n=4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),
datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),
datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),
datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),
datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),
datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),
datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),
datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),
datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),
datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),
datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),
datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),
datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),
},
),
(
BusinessHour(n=-4, start=["09:00", "14:00"], end=["12:00", "18:00"]),
{
datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),
datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),
datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),
datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),
datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),
datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),
datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),
datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),
datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),
},
),
(
BusinessHour(n=-1, start=["19:00", "03:00"], end=["01:00", "05:00"]),
{
datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),
datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),
datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),
datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),
datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),
datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),
datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),
datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),
datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),
datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),
datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),
datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),
datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),
},
),
]
# long business hours (see gh-26381)
# multiple business hours
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
apply_large_n_cases = [
(
# A week later
BusinessHour(40),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),
datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),
datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),
datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),
datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),
datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),
datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),
datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),
datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),
datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),
datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),
},
),
(
# 3 days and 1 hour before
BusinessHour(-25),
{
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
},
),
(
# 5 days and 3 hours later
BusinessHour(28, start="21:00", end="02:00"),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),
datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),
datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
},
),
(
# large n for multiple opening hours (3 days and 1 hour before)
BusinessHour(n=-25, start=["09:00", "14:00"], end=["12:00", "19:00"]),
{
datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),
datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),
datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),
datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),
datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),
datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),
datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),
datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),
datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),
datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),
datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),
datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),
datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),
},
),
(
# 5 days and 3 hours later
BusinessHour(28, start=["21:00", "03:00"], end=["01:00", "04:00"]),
{
datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),
datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),
datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),
datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),
datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),
datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),
datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),
datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),
datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),
datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),
datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),
datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),
},
),
]
@pytest.mark.parametrize("case", apply_large_n_cases)
def test_apply_large_n(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_nanoseconds(self):
tests = [
(
BusinessHour(),
{
Timestamp("2014-07-04 15:00")
+ Nano(5): Timestamp("2014-07-04 16:00")
+ Nano(5),
Timestamp("2014-07-04 16:00")
+ Nano(5): Timestamp("2014-07-07 09:00")
+ Nano(5),
Timestamp("2014-07-04 16:00")
- Nano(5): Timestamp("2014-07-04 17:00")
- Nano(5),
},
),
(
BusinessHour(-1),
{
Timestamp("2014-07-04 15:00")
+ Nano(5): Timestamp("2014-07-04 14:00")
+ Nano(5),
Timestamp("2014-07-04 10:00")
+ Nano(5): Timestamp("2014-07-04 09:00")
+ Nano(5),
Timestamp("2014-07-04 10:00")
- Nano(5): Timestamp("2014-07-03 17:00")
- Nano(5),
},
),
]
for offset, cases in tests:
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_datetimeindex(self):
idx1 = date_range(start="2014-07-04 15:00", end="2014-07-08 10:00", freq="BH")
idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="BH")
idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="BH")
expected = DatetimeIndex(
[
"2014-07-04 15:00",
"2014-07-04 16:00",
"2014-07-07 09:00",
"2014-07-07 10:00",
"2014-07-07 11:00",
"2014-07-07 12:00",
"2014-07-07 13:00",
"2014-07-07 14:00",
"2014-07-07 15:00",
"2014-07-07 16:00",
"2014-07-08 09:00",
"2014-07-08 10:00",
],
freq="BH",
)
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
idx1 = date_range(start="2014-07-04 15:45", end="2014-07-08 10:45", freq="BH")
idx2 = date_range(start="2014-07-04 15:45", periods=12, freq="BH")
idx3 = date_range(end="2014-07-08 10:45", periods=12, freq="BH")
expected = idx1
for idx in [idx1, idx2, idx3]:
tm.assert_index_equal(idx, expected)
def test_bday_ignores_timedeltas(self):
idx = date_range("2010/02/01", "2010/02/10", freq="12H")
t1 = idx + BDay(offset=Timedelta(3, unit="H"))
expected = DatetimeIndex(
[
"2010-02-02 03:00:00",
"2010-02-02 15:00:00",
"2010-02-03 03:00:00",
"2010-02-03 15:00:00",
"2010-02-04 03:00:00",
"2010-02-04 15:00:00",
"2010-02-05 03:00:00",
"2010-02-05 15:00:00",
"2010-02-08 03:00:00",
"2010-02-08 15:00:00",
"2010-02-08 03:00:00",
"2010-02-08 15:00:00",
"2010-02-08 03:00:00",
"2010-02-08 15:00:00",
"2010-02-09 03:00:00",
"2010-02-09 15:00:00",
"2010-02-10 03:00:00",
"2010-02-10 15:00:00",
"2010-02-11 03:00:00",
],
freq=None,
)
tm.assert_index_equal(t1, expected)
|
bsd-3-clause
|
mrry/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/__init__.py
|
5
|
2795
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowBaseTransformer
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
from tensorflow.contrib.learn.python.learn.estimators.classifier import Classifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestEstimator
from tensorflow.contrib.learn.python.learn.estimators.random_forest import TensorForestLossMonitor
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.rnn import TensorFlowRNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
apache-2.0
|
dimroc/tensorflow-mnist-tutorial
|
lib/python3.6/site-packages/mpl_toolkits/axes_grid1/axes_divider.py
|
10
|
31231
|
"""
The axes_divider module provides helper classes to adjust the positions of
multiple axes at drawing time.
Divider: this is the class that is used to calculate the axes
position. It divides the given rectangular area into several sub
rectangles. You initialize the divider by setting the horizontal
and vertical lists of sizes that the division will be based on. You
then use the new_locator method, whose return value is a callable
object that can be used to set the axes_locator of the axes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
import matplotlib.transforms as mtransforms
from matplotlib.axes import SubplotBase
from . import axes_size as Size
class Divider(object):
"""
This is the class that is used calculates the axes position. It
divides the given rectangular area into several
sub-rectangles. You initialize the divider by setting the
horizontal and vertical lists of sizes
(:mod:`mpl_toolkits.axes_grid.axes_size`) that the division will
be based on. You then use the new_locator method to create a
callable object that can be used as the axes_locator of the
axes.
"""
def __init__(self, fig, pos, horizontal, vertical,
aspect=None, anchor="C"):
"""
Parameters
----------
fig : Figure
pos : tuple of 4 floats
position of the rectangle that will be divided
horizontal : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for horizontal division
vertical : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for vertical division
aspect : bool
if True, the overall rectangular area is reduced
so that the relative part of the horizontal and
vertical scales have the same scale.
anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
placement of the reduced rectangle when *aspect* is True
"""
self._fig = fig
self._pos = pos
self._horizontal = horizontal
self._vertical = vertical
self._anchor = anchor
self._aspect = aspect
self._xrefindex = 0
self._yrefindex = 0
self._locator = None
def get_horizontal_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_horizontal()]
def get_vertical_sizes(self, renderer):
return [s.get_size(renderer) for s in self.get_vertical()]
def get_vsize_hsize(self):
from .axes_size import AddList
vsize = AddList(self.get_vertical())
hsize = AddList(self.get_horizontal())
return vsize, hsize
@staticmethod
def _calc_k(l, total_size):
rs_sum, as_sum = 0., 0.
for _rs, _as in l:
rs_sum += _rs
as_sum += _as
if rs_sum != 0.:
k = (total_size - as_sum) / rs_sum
return k
else:
return 0.
@staticmethod
def _calc_offsets(l, k):
offsets = [0.]
#for s in l:
for _rs, _as in l:
#_rs, _as = s.get_size(renderer)
offsets.append(offsets[-1] + _rs*k + _as)
return offsets
def set_position(self, pos):
"""
set the position of the rectangle.
Parameters
----------
pos : tuple of 4 floats
position of the rectangle that will be divided
"""
self._pos = pos
def get_position(self):
"return the position of the rectangle."
return self._pos
def set_anchor(self, anchor):
"""
Parameters
----------
anchor : {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}
anchor position
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_anchor(self):
"return the anchor"
return self._anchor
def set_horizontal(self, h):
"""
Parameters
----------
h : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for horizontal division
"""
self._horizontal = h
def get_horizontal(self):
"return horizontal sizes"
return self._horizontal
def set_vertical(self, v):
"""
Parameters
----------
v : list of :mod:`~mpl_toolkits.axes_grid.axes_size`
sizes for vertical division
"""
self._vertical = v
def get_vertical(self):
"return vertical sizes"
return self._vertical
def set_aspect(self, aspect=False):
"""
Parameters
----------
aspect : bool
"""
self._aspect = aspect
def get_aspect(self):
"return aspect"
return self._aspect
def set_locator(self, _locator):
self._locator = _locator
def get_locator(self):
return self._locator
def get_position_runtime(self, ax, renderer):
if self._locator is None:
return self.get_position()
else:
return self._locator(ax, renderer).bounds
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
axes
renderer
"""
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
hsizes = self.get_horizontal_sizes(renderer)
vsizes = self.get_vertical_sizes(renderer)
k_h = self._calc_k(hsizes, figW*w)
k_v = self._calc_k(vsizes, figH*h)
if self.get_aspect():
k = min(k_h, k_v)
ox = self._calc_offsets(hsizes, k)
oy = self._calc_offsets(vsizes, k)
ww = (ox[-1] - ox[0])/figW
hh = (oy[-1] - oy[0])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
else:
ox = self._calc_offsets(hsizes, k_h)
oy = self._calc_offsets(vsizes, k_v)
x0, y0 = x, y
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
def new_locator(self, nx, ny, nx1=None, ny1=None):
"""
Returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
"""
return AxesLocator(self, nx, ny, nx1, ny1)
def append_size(self, position, size):
if position == "left":
self._horizontal.insert(0, size)
self._xrefindex += 1
elif position == "right":
self._horizontal.append(size)
elif position == "bottom":
self._vertical.insert(0, size)
self._yrefindex += 1
elif position == "top":
self._vertical.append(size)
else:
raise ValueError("the position must be one of left," +
" right, bottom, or top")
def add_auto_adjustable_area(self,
use_axes, pad=0.1,
adjust_dirs=None,
):
if adjust_dirs is None:
adjust_dirs = ["left", "right", "bottom", "top"]
from .axes_size import Padded, SizeFromFunc, GetExtentHelper
for d in adjust_dirs:
helper = GetExtentHelper(use_axes, d)
size = SizeFromFunc(helper)
padded_size = Padded(size, pad) # pad in inch
self.append_size(d, padded_size)
class AxesLocator(object):
"""
A simple callable object, initialized with AxesDivider class,
returns the position and size of the given cell.
"""
def __init__(self, axes_divider, nx, ny, nx1=None, ny1=None):
"""
Parameters
----------
axes_divider : AxesDivider
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
"""
self._axes_divider = axes_divider
_xrefindex = axes_divider._xrefindex
_yrefindex = axes_divider._yrefindex
self._nx, self._ny = nx - _xrefindex, ny - _yrefindex
if nx1 is None:
nx1 = nx+1
if ny1 is None:
ny1 = ny+1
self._nx1 = nx1 - _xrefindex
self._ny1 = ny1 - _yrefindex
def __call__(self, axes, renderer):
_xrefindex = self._axes_divider._xrefindex
_yrefindex = self._axes_divider._yrefindex
return self._axes_divider.locate(self._nx + _xrefindex,
self._ny + _yrefindex,
self._nx1 + _xrefindex,
self._ny1 + _yrefindex,
axes,
renderer)
def get_subplotspec(self):
if hasattr(self._axes_divider, "get_subplotspec"):
return self._axes_divider.get_subplotspec()
else:
return None
from matplotlib.gridspec import SubplotSpec, GridSpec
class SubplotDivider(Divider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def __init__(self, fig, *args, **kwargs):
"""
Parameters
----------
fig : :class:`matplotlib.figure.Figure`
args : tuple (*numRows*, *numCols*, *plotNum*)
The array of subplots in the figure has dimensions *numRows*,
*numCols*, and *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args) == 1:
if isinstance(args[0], SubplotSpec):
self._subplotspec = args[0]
else:
try:
s = str(int(args[0]))
rows, cols, num = list(map(int, s))
except ValueError:
raise ValueError(
'Single argument to subplot must be a 3-digit integer')
self._subplotspec = GridSpec(rows, cols)[num-1]
# num - 1 for converting from MATLAB to python indexing
elif len(args) == 3:
rows, cols, num = args
rows = int(rows)
cols = int(cols)
if isinstance(num, tuple) and len(num) == 2:
num = [int(n) for n in num]
self._subplotspec = GridSpec(rows, cols)[num[0]-1:num[1]]
else:
self._subplotspec = GridSpec(rows, cols)[int(num)-1]
# num - 1 for converting from MATLAB to python indexing
else:
raise ValueError('Illegal argument(s) to subplot: %s' % (args,))
# total = rows*cols
# num -= 1 # convert from matlab to python indexing
# # i.e., num in range(0,total)
# if num >= total:
# raise ValueError( 'Subplot number exceeds total subplots')
# self._rows = rows
# self._cols = cols
# self._num = num
# self.update_params()
# sets self.fixbox
self.update_params()
pos = self.figbox.bounds
horizontal = kwargs.pop("horizontal", [])
vertical = kwargs.pop("vertical", [])
aspect = kwargs.pop("aspect", None)
anchor = kwargs.pop("anchor", "C")
if kwargs:
raise Exception("")
Divider.__init__(self, fig, pos, horizontal, vertical,
aspect=aspect, anchor=anchor)
def get_position(self):
"return the bounds of the subplot box"
self.update_params() # update self.figbox
return self.figbox.bounds
# def update_params(self):
# 'update the subplot position from fig.subplotpars'
# rows = self._rows
# cols = self._cols
# num = self._num
# pars = self.figure.subplotpars
# left = pars.left
# right = pars.right
# bottom = pars.bottom
# top = pars.top
# wspace = pars.wspace
# hspace = pars.hspace
# totWidth = right-left
# totHeight = top-bottom
# figH = totHeight/(rows + hspace*(rows-1))
# sepH = hspace*figH
# figW = totWidth/(cols + wspace*(cols-1))
# sepW = wspace*figW
# rowNum, colNum = divmod(num, cols)
# figBottom = top - (rowNum+1)*figH - rowNum*sepH
# figLeft = left + colNum*(figW + sepW)
# self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
# figW, figH)
def update_params(self):
'update the subplot position from fig.subplotpars'
self.figbox = self.get_subplotspec().get_position(self.figure)
def get_geometry(self):
'get the subplot geometry, e.g., 2,2,3'
rows, cols, num1, num2 = self.get_subplotspec().get_geometry()
return rows, cols, num1+1 # for compatibility
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, e.g., from 1,1,1 to 2,2,3'
self._subplotspec = GridSpec(numrows, numcols)[num-1]
self.update_params()
self.set_position(self.figbox)
def get_subplotspec(self):
'get the SubplotSpec instance'
return self._subplotspec
def set_subplotspec(self, subplotspec):
'set the SubplotSpec instance'
self._subplotspec = subplotspec
class AxesDivider(Divider):
"""
Divider based on the pre-existing axes.
"""
def __init__(self, axes, xref=None, yref=None):
"""
Parameters
----------
axes : :class:`~matplotlib.axes.Axes`
xref
yref
"""
self._axes = axes
if xref is None:
self._xref = Size.AxesX(axes)
else:
self._xref = xref
if yref is None:
self._yref = Size.AxesY(axes)
else:
self._yref = yref
Divider.__init__(self, fig=axes.get_figure(), pos=None,
horizontal=[self._xref], vertical=[self._yref],
aspect=None, anchor="C")
def _get_new_axes(self, **kwargs):
axes = self._axes
axes_class = kwargs.pop("axes_class", None)
if axes_class is None:
if isinstance(axes, SubplotBase):
axes_class = axes._axes_class
else:
axes_class = type(axes)
ax = axes_class(axes.get_figure(),
axes.get_position(original=True), **kwargs)
return ax
def new_horizontal(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the right (or left) side of the main axes.
Parameters
----------
size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
A width of the axes. If float or string is given, *from_any*
function is used to create the size, with *ref_size* set to AxesX
instance of the current axes.
pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
Pad between the axes. It takes same argument as *size*.
pack_start : bool
If False, the new axes is appended at the end
of the list, i.e., it became the right-most axes. If True, it is
inserted at the start of the list, and becomes the left-most axes.
kwargs
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad,
fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, pad)
self._xrefindex += 1
else:
self._horizontal.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size,
fraction_ref=self._xref)
if pack_start:
self._horizontal.insert(0, size)
self._xrefindex += 1
locator = self.new_locator(nx=0, ny=self._yrefindex)
else:
self._horizontal.append(size)
locator = self.new_locator(nx=len(self._horizontal)-1, ny=self._yrefindex)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def new_vertical(self, size, pad=None, pack_start=False, **kwargs):
"""
Add a new axes on the top (or bottom) side of the main axes.
Parameters
----------
size : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
A height of the axes. If float or string is given, *from_any*
function is used to create the size, with *ref_size* set to AxesX
instance of the current axes.
pad : :mod:`~mpl_toolkits.axes_grid.axes_size` or float or string
Pad between the axes. It takes same argument as *size*.
pack_start : bool
If False, the new axes is appended at the end
of the list, i.e., it became the right-most axes. If True, it is
inserted at the start of the list, and becomes the left-most axes.
kwargs
All extra keywords arguments are passed to the created axes.
If *axes_class* is given, the new axes will be created as an
instance of the given class. Otherwise, the same class of the
main axes will be used.
"""
if pad:
if not isinstance(pad, Size._Base):
pad = Size.from_any(pad,
fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, pad)
self._yrefindex += 1
else:
self._vertical.append(pad)
if not isinstance(size, Size._Base):
size = Size.from_any(size,
fraction_ref=self._yref)
if pack_start:
self._vertical.insert(0, size)
self._yrefindex += 1
locator = self.new_locator(nx=self._xrefindex, ny=0)
else:
self._vertical.append(size)
locator = self.new_locator(nx=self._xrefindex, ny=len(self._vertical)-1)
ax = self._get_new_axes(**kwargs)
ax.set_axes_locator(locator)
return ax
def append_axes(self, position, size, pad=None, add_to_figure=True,
**kwargs):
"""
create an axes at the given *position* with the same height
(or width) of the main axes.
*position*
["left"|"right"|"bottom"|"top"]
*size* and *pad* should be axes_grid.axes_size compatible.
"""
if position == "left":
ax = self.new_horizontal(size, pad, pack_start=True, **kwargs)
elif position == "right":
ax = self.new_horizontal(size, pad, pack_start=False, **kwargs)
elif position == "bottom":
ax = self.new_vertical(size, pad, pack_start=True, **kwargs)
elif position == "top":
ax = self.new_vertical(size, pad, pack_start=False, **kwargs)
else:
raise ValueError("the position must be one of left," +
" right, bottom, or top")
if add_to_figure:
self._fig.add_axes(ax)
return ax
def get_aspect(self):
if self._aspect is None:
aspect = self._axes.get_aspect()
if aspect == "auto":
return False
else:
return True
else:
return self._aspect
def get_position(self):
if self._pos is None:
bbox = self._axes.get_position(original=True)
return bbox.bounds
else:
return self._pos
def get_anchor(self):
if self._anchor is None:
return self._axes.get_anchor()
else:
return self._anchor
def get_subplotspec(self):
if hasattr(self._axes, "get_subplotspec"):
return self._axes.get_subplotspec()
else:
return None
class HBoxDivider(SubplotDivider):
def __init__(self, fig, *args, **kwargs):
SubplotDivider.__init__(self, fig, *args, **kwargs)
@staticmethod
def _determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size):
n = len(equivalent_sizes)
import numpy as np
A = np.mat(np.zeros((n+1, n+1), dtype="d"))
B = np.zeros((n+1), dtype="d")
# AxK = B
# populated A
for i, (r, a) in enumerate(equivalent_sizes):
A[i, i] = r
A[i, -1] = -1
B[i] = -a
A[-1, :-1] = [r for r, a in appended_sizes]
B[-1] = total_appended_size - sum([a for rs, a in appended_sizes])
karray_H = (A.I*np.mat(B).T).A1
karray = karray_H[:-1]
H = karray_H[-1]
if H > max_equivalent_size:
karray = ((max_equivalent_size -
np.array([a for r, a in equivalent_sizes]))
/ np.array([r for r, a in equivalent_sizes]))
return karray
@staticmethod
def _calc_offsets(appended_sizes, karray):
offsets = [0.]
#for s in l:
for (r, a), k in zip(appended_sizes, karray):
offsets.append(offsets[-1] + r*k + a)
return offsets
def new_locator(self, nx, nx1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
Parameters
----------
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
"""
return AxesLocator(self, nx, 0, nx1, None)
def _locate(self, x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH):
"""
Parameters
----------
x
y
w
h
y_equivalent_sizes
x_appended_sizes
figW
figH
"""
equivalent_sizes = y_equivalent_sizes
appended_sizes = x_appended_sizes
max_equivalent_size = figH*h
total_appended_size = figW*w
karray = self._determine_karray(equivalent_sizes, appended_sizes,
max_equivalent_size,
total_appended_size)
ox = self._calc_offsets(appended_sizes, karray)
ww = (ox[-1] - ox[0])/figW
ref_h = equivalent_sizes[0]
hh = (karray[0]*ref_h[0] + ref_h[1])/figH
pb = mtransforms.Bbox.from_bounds(x, y, w, h)
pb1 = mtransforms.Bbox.from_bounds(x, y, ww, hh)
pb1_anchored = pb1.anchored(self.get_anchor(), pb)
x0, y0 = pb1_anchored.x0, pb1_anchored.y0
return x0, y0, ox, hh
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
Parameters
----------
axes_divider : AxesDivider
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
axes
renderer
"""
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
y_equivalent_sizes = self.get_vertical_sizes(renderer)
x_appended_sizes = self.get_horizontal_sizes(renderer)
x0, y0, ox, hh = self._locate(x, y, w, h,
y_equivalent_sizes, x_appended_sizes,
figW, figH)
if nx1 is None:
nx1 = nx+1
x1, w1 = x0 + ox[nx]/figW, (ox[nx1] - ox[nx])/figW
y1, h1 = y0, hh
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class VBoxDivider(HBoxDivider):
"""
The Divider class whose rectangle area is specified as a subplot geometry.
"""
def new_locator(self, ny, ny1=None):
"""
returns a new locator
(:class:`mpl_toolkits.axes_grid.axes_divider.AxesLocator`) for
specified cell.
Parameters
----------
ny, ny1 : int
Integers specifying the row-position of the
cell. When *ny1* is None, a single *ny*-th row is
specified. Otherwise location of rows spanning between *ny*
to *ny1* (but excluding *ny1*-th row) is specified.
"""
return AxesLocator(self, 0, ny, None, ny1)
def locate(self, nx, ny, nx1=None, ny1=None, axes=None, renderer=None):
"""
Parameters
----------
axes_divider : AxesDivider
nx, nx1 : int
Integers specifying the column-position of the
cell. When *nx1* is None, a single *nx*-th column is
specified. Otherwise location of columns spanning between *nx*
to *nx1* (but excluding *nx1*-th column) is specified.
ny, ny1 : int
Same as *nx* and *nx1*, but for row positions.
axes
renderer
"""
figW, figH = self._fig.get_size_inches()
x, y, w, h = self.get_position_runtime(axes, renderer)
x_equivalent_sizes = self.get_horizontal_sizes(renderer)
y_appended_sizes = self.get_vertical_sizes(renderer)
y0, x0, oy, ww = self._locate(y, x, h, w,
x_equivalent_sizes, y_appended_sizes,
figH, figW)
if ny1 is None:
ny1 = ny+1
x1, w1 = x0, ww
y1, h1 = y0 + oy[ny]/figH, (oy[ny1] - oy[ny])/figH
return mtransforms.Bbox.from_bounds(x1, y1, w1, h1)
class LocatableAxesBase(object):
def __init__(self, *kl, **kw):
self._axes_class.__init__(self, *kl, **kw)
self._locator = None
self._locator_renderer = None
def set_axes_locator(self, locator):
self._locator = locator
def get_axes_locator(self):
return self._locator
def apply_aspect(self, position=None):
if self.get_axes_locator() is None:
self._axes_class.apply_aspect(self, position)
else:
pos = self.get_axes_locator()(self, self._locator_renderer)
self._axes_class.apply_aspect(self, position=pos)
def draw(self, renderer=None, inframe=False):
self._locator_renderer = renderer
self._axes_class.draw(self, renderer, inframe)
def _make_twin_axes(self, *kl, **kwargs):
"""
Need to overload so that twinx/twiny will work with
these axes.
"""
ax2 = type(self)(self.figure, self.get_position(True), *kl, **kwargs)
ax2.set_axes_locator(self.get_axes_locator())
self.figure.add_axes(ax2)
return ax2
_locatableaxes_classes = {}
def locatable_axes_factory(axes_class):
new_class = _locatableaxes_classes.get(axes_class)
if new_class is None:
new_class = type(str("Locatable%s" % (axes_class.__name__)),
(LocatableAxesBase, axes_class),
{'_axes_class': axes_class})
_locatableaxes_classes[axes_class] = new_class
return new_class
#if hasattr(maxes.Axes, "get_axes_locator"):
# LocatableAxes = maxes.Axes
#else:
def make_axes_locatable(axes):
if not hasattr(axes, "set_axes_locator"):
new_class = locatable_axes_factory(type(axes))
axes.__class__ = new_class
divider = AxesDivider(axes)
locator = divider.new_locator(nx=0, ny=0)
axes.set_axes_locator(locator)
return divider
def make_axes_area_auto_adjustable(ax,
use_axes=None, pad=0.1,
adjust_dirs=None):
if adjust_dirs is None:
adjust_dirs = ["left", "right", "bottom", "top"]
divider = make_axes_locatable(ax)
if use_axes is None:
use_axes = ax
divider.add_auto_adjustable_area(use_axes=use_axes, pad=pad,
adjust_dirs=adjust_dirs)
#from matplotlib.axes import Axes
from .mpl_axes import Axes
LocatableAxes = locatable_axes_factory(Axes)
|
apache-2.0
|
rabernat/xray
|
setup.py
|
1
|
4716
|
#!/usr/bin/env python
import os
import re
import sys
import warnings
from setuptools import setup, find_packages
from setuptools import Command
MAJOR = 0
MINOR = 10
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = 'rc2'
DISTNAME = 'xarray'
LICENSE = 'Apache'
AUTHOR = 'xarray Developers'
AUTHOR_EMAIL = '[email protected]'
URL = 'https://github.com/pydata/xarray'
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
]
INSTALL_REQUIRES = ['numpy >= 1.11', 'pandas >= 0.18.0']
TESTS_REQUIRE = ['pytest >= 2.7.1']
if sys.version_info[0] < 3:
TESTS_REQUIRE.append('mock')
DESCRIPTION = "N-D labeled arrays and datasets in Python"
LONG_DESCRIPTION = """
**xarray** (formerly **xray**) is an open source project and Python package
that aims to bring the labeled data power of pandas_ to the physical sciences,
by providing N-dimensional variants of the core pandas data structures.
Our goal is to provide a pandas-like and pandas-compatible toolkit for
analytics on multi-dimensional arrays, rather than the tabular data for which
pandas excels. Our approach adopts the `Common Data Model`_ for self-
describing scientific data in widespread use in the Earth sciences:
``xarray.Dataset`` is an in-memory representation of a netCDF file.
.. _pandas: http://pandas.pydata.org
.. _Common Data Model: http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM
.. _netCDF: http://www.unidata.ucar.edu/software/netcdf
.. _OPeNDAP: http://www.opendap.org/
Important links
---------------
- HTML documentation: http://xarray.pydata.org
- Issue tracker: http://github.com/pydata/xarray/issues
- Source code: http://github.com/pydata/xarray
- SciPy2015 talk: https://www.youtube.com/watch?v=X0pAhJgySxk
"""
# Code to extract and write the version copied from pandas.
# Used under the terms of pandas's license, see licenses/PANDAS_LICENSE.
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git', 'git.cmd']:
try:
pipe = subprocess.Popen(
[cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so, serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('xarray/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing xarray/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}", rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev = "v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
filename = os.path.join(
os.path.dirname(__file__), 'xarray', 'version.py')
a = open(filename, 'w')
try:
a.write(cnt % (FULLVERSION, VERSION))
finally:
a.close()
if write_version:
write_version_py()
setup(name=DISTNAME,
version=FULLVERSION,
license=LICENSE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
classifiers=CLASSIFIERS,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=INSTALL_REQUIRES,
tests_require=TESTS_REQUIRE,
url=URL,
packages=find_packages(),
package_data={'xarray': ['tests/data/*', 'plot/default_colormap.csv']})
|
apache-2.0
|
corburn/scikit-bio
|
skbio/diversity/alpha/tests/test_base.py
|
5
|
18232
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from unittest import TestCase, main
from io import StringIO
import os
import numpy as np
import numpy.testing as npt
import pandas as pd
from skbio import TreeNode
from skbio.util import get_data_path
from skbio.tree import DuplicateNodeError, MissingNodeError
from skbio.diversity.alpha import (
berger_parker_d, brillouin_d, dominance, doubles, enspie,
esty_ci, faith_pd, fisher_alpha, goods_coverage, heip_e, kempton_taylor_q,
margalef, mcintosh_d, mcintosh_e, menhinick, michaelis_menten_fit,
observed_otus, osd, pielou_e, robbins, shannon, simpson, simpson_e,
singles, strong)
class BaseTests(TestCase):
def setUp(self):
self.counts = np.array([0, 1, 1, 4, 2, 5, 2, 4, 1, 2])
self.b1 = np.array(
[[1, 3, 0, 1, 0],
[0, 2, 0, 4, 4],
[0, 0, 6, 2, 1],
[0, 0, 1, 1, 1]])
self.sids1 = list('ABCD')
self.oids1 = ['OTU%d' % i for i in range(1, 6)]
self.t1 = TreeNode.read(StringIO(
u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):'
u'0.0,(OTU4:0.75,OTU5:0.75):1.25):0.0)root;'))
self.t1_w_extra_tips = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,(OTU5:0.25,(OTU6:0.5,OTU7:0.5):0.5):0.5):1.25):0.0'
u')root;'))
def test_berger_parker_d(self):
self.assertEqual(berger_parker_d(np.array([5])), 1)
self.assertEqual(berger_parker_d(np.array([5, 5])), 0.5)
self.assertEqual(berger_parker_d(np.array([1, 1, 1, 1, 0])), 0.25)
self.assertEqual(berger_parker_d(self.counts), 5 / 22)
def test_brillouin_d(self):
self.assertAlmostEqual(brillouin_d(np.array([1, 2, 0, 0, 3, 1])),
0.86289353018248782)
def test_dominance(self):
self.assertEqual(dominance(np.array([5])), 1)
self.assertAlmostEqual(dominance(np.array([1, 0, 2, 5, 2])), 0.34)
def test_doubles(self):
self.assertEqual(doubles(self.counts), 3)
self.assertEqual(doubles(np.array([0, 3, 4])), 0)
self.assertEqual(doubles(np.array([2])), 1)
self.assertEqual(doubles(np.array([0, 0])), 0)
def test_enspie(self):
# Totally even community should have ENS_pie = number of OTUs.
self.assertAlmostEqual(enspie(np.array([1, 1, 1, 1, 1, 1])), 6)
self.assertAlmostEqual(enspie(np.array([13, 13, 13, 13])), 4)
# Hand calculated.
arr = np.array([1, 41, 0, 0, 12, 13])
exp = 1 / ((arr / arr.sum()) ** 2).sum()
self.assertAlmostEqual(enspie(arr), exp)
# Using dominance.
exp = 1 / dominance(arr)
self.assertAlmostEqual(enspie(arr), exp)
arr = np.array([1, 0, 2, 5, 2])
exp = 1 / dominance(arr)
self.assertAlmostEqual(enspie(arr), exp)
def test_esty_ci(self):
def _diversity(indices, f):
"""Calculate diversity index for each window of size 1.
indices: vector of indices of OTUs
f: f(counts) -> diversity measure
"""
result = []
max_size = max(indices) + 1
freqs = np.zeros(max_size, dtype=int)
for i in range(len(indices)):
freqs += np.bincount(indices[i:i + 1], minlength=max_size)
try:
curr = f(freqs)
except (ZeroDivisionError, FloatingPointError):
curr = 0
result.append(curr)
return np.array(result)
data = [1, 1, 2, 1, 1, 3, 2, 1, 3, 4]
observed_lower, observed_upper = zip(*_diversity(data, esty_ci))
expected_lower = np.array([1, -1.38590382, -0.73353593, -0.17434465,
-0.15060902, -0.04386191, -0.33042054,
-0.29041008, -0.43554755, -0.33385652])
expected_upper = np.array([1, 1.38590382, 1.40020259, 0.67434465,
0.55060902, 0.71052858, 0.61613483,
0.54041008, 0.43554755, 0.53385652])
npt.assert_array_almost_equal(observed_lower, expected_lower)
npt.assert_array_almost_equal(observed_upper, expected_upper)
def test_faith_pd_none_observed(self):
actual = faith_pd(np.array([], dtype=int), np.array([], dtype=int),
self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
actual = faith_pd([0, 0, 0, 0, 0], self.oids1, self.t1)
expected = 0.0
self.assertAlmostEqual(actual, expected)
def test_faith_pd_all_observed(self):
actual = faith_pd([1, 1, 1, 1, 1], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
actual = faith_pd([1, 2, 3, 4, 5], self.oids1, self.t1)
expected = sum(n.length for n in self.t1.traverse()
if n.length is not None)
self.assertAlmostEqual(actual, expected)
def test_faith_pd(self):
# expected results derived from QIIME 1.9.1, which
# is a completely different implementation skbio's initial
# phylogenetic diversity implementation
actual = faith_pd(self.b1[0], self.oids1, self.t1)
expected = 4.5
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1)
expected = 4.75
self.assertAlmostEqual(actual, expected)
def test_faith_pd_extra_tips(self):
# results are the same despite presences of unobserved tips in tree
actual = faith_pd(self.b1[0], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[0], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[1], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[1], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[2], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[2], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
actual = faith_pd(self.b1[3], self.oids1, self.t1_w_extra_tips)
expected = faith_pd(self.b1[3], self.oids1, self.t1)
self.assertAlmostEqual(actual, expected)
def test_faith_pd_minimal_trees(self):
# expected values computed by hand
# zero tips
tree = TreeNode.read(StringIO(u'root;'))
actual = faith_pd(np.array([], dtype=int), [], tree)
expected = 0.0
self.assertEqual(actual, expected)
# two tips
tree = TreeNode.read(StringIO(u'(OTU1:0.25, OTU2:0.25)root;'))
actual = faith_pd([1, 0], ['OTU1', 'OTU2'], tree)
expected = 0.25
self.assertEqual(actual, expected)
def test_faith_pd_qiime_tiny_test(self):
# the following table and tree are derived from the QIIME 1.9.1
# "tiny-test" data
tt_table_fp = get_data_path(
os.path.join('qiime-191-tt', 'otu-table.tsv'), 'data')
tt_tree_fp = get_data_path(
os.path.join('qiime-191-tt', 'tree.nwk'), 'data')
self.q_table = pd.read_csv(tt_table_fp, sep='\t', skiprows=1,
index_col=0)
self.q_tree = TreeNode.read(tt_tree_fp)
expected_fp = get_data_path(
os.path.join('qiime-191-tt', 'faith-pd.txt'), 'data')
expected = pd.read_csv(expected_fp, sep='\t', index_col=0)
for sid in self.q_table.columns:
actual = faith_pd(self.q_table[sid], otu_ids=self.q_table.index,
tree=self.q_tree)
self.assertAlmostEqual(actual, expected['PD_whole_tree'][sid])
def test_faith_pd_root_not_observed(self):
# expected values computed by hand
tree = TreeNode.read(
StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, (OTU3:0.5, OTU4:0.7):1.1)'
u'root;'))
otu_ids = ['OTU%d' % i for i in range(1, 5)]
# root node not observed, but branch between (OTU1, OTU2) and root
# is considered observed
actual = faith_pd([1, 1, 0, 0], otu_ids, tree)
expected = 0.6
self.assertAlmostEqual(actual, expected)
# root node not observed, but branch between (OTU3, OTU4) and root
# is considered observed
actual = faith_pd([0, 0, 1, 1], otu_ids, tree)
expected = 2.3
self.assertAlmostEqual(actual, expected)
def test_faith_pd_invalid_input(self):
# Many of these tests are duplicated from
# skbio.diversity.tests.test_base, but I think it's important to
# confirm that they are being run when faith_pd is called.
# tree has duplicated tip ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU2:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(DuplicateNodeError, faith_pd, counts, otu_ids, t)
# unrooted tree as input
t = TreeNode.read(StringIO(u'((OTU1:0.1, OTU2:0.2):0.3, OTU3:0.5,'
u'OTU4:0.7);'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids has duplicated ids
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# len of vectors not equal
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# negative counts
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, -3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree with no branch lengths
t = TreeNode.read(
StringIO(u'((((OTU1,OTU2),OTU3)),(OTU4,OTU5));'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# tree missing some branch lengths
t = TreeNode.read(
StringIO(u'(((((OTU1,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU3']
self.assertRaises(ValueError, faith_pd, counts, otu_ids, t)
# otu_ids not present in tree
t = TreeNode.read(
StringIO(u'(((((OTU1:0.5,OTU2:0.5):0.5,OTU3:1.0):1.0):0.0,(OTU4:'
u'0.75,OTU5:0.75):1.25):0.0)root;'))
counts = [1, 2, 3]
otu_ids = ['OTU1', 'OTU2', 'OTU42']
self.assertRaises(MissingNodeError, faith_pd, counts, otu_ids, t)
def test_fisher_alpha(self):
exp = 2.7823795367398798
arr = np.array([4, 3, 4, 0, 1, 0, 2])
obs = fisher_alpha(arr)
self.assertAlmostEqual(obs, exp)
# Should depend only on S and N (number of OTUs, number of
# individuals / seqs), so we should obtain the same output as above.
obs = fisher_alpha([1, 6, 1, 0, 1, 0, 5])
self.assertAlmostEqual(obs, exp)
# Should match another by hand:
# 2 OTUs, 62 seqs, alpha is 0.39509
obs = fisher_alpha([61, 0, 0, 1])
self.assertAlmostEqual(obs, 0.39509, delta=0.0001)
# Test case where we have >1000 individuals (SDR-IV makes note of this
# case). Verified against R's vegan::fisher.alpha.
obs = fisher_alpha([999, 0, 10])
self.assertAlmostEqual(obs, 0.2396492)
def test_goods_coverage(self):
counts = [1] * 75 + [2, 2, 2, 2, 2, 2, 3, 4, 4]
obs = goods_coverage(counts)
self.assertAlmostEqual(obs, 0.23469387755)
def test_heip_e(self):
# Calculate "by hand".
arr = np.array([1, 2, 3, 1])
h = shannon(arr, base=np.e)
expected = (np.exp(h) - 1) / 3
self.assertEqual(heip_e(arr), expected)
# From Statistical Ecology: A Primer in Methods and Computing, page 94,
# table 8.1.
self.assertAlmostEqual(heip_e([500, 300, 200]), 0.90, places=2)
self.assertAlmostEqual(heip_e([500, 299, 200, 1]), 0.61, places=2)
def test_kempton_taylor_q(self):
# Approximate Magurran 1998 calculation p143.
arr = np.array([2, 3, 3, 3, 3, 3, 4, 4, 4, 6, 6, 7, 7, 9, 9, 11, 14,
15, 15, 20, 29, 33, 34, 36, 37, 53, 57, 138, 146, 170])
exp = 14 / np.log(34 / 4)
self.assertAlmostEqual(kempton_taylor_q(arr), exp)
# Should get same answer regardless of input order.
np.random.shuffle(arr)
self.assertAlmostEqual(kempton_taylor_q(arr), exp)
def test_margalef(self):
self.assertEqual(margalef(self.counts), 8 / np.log(22))
def test_mcintosh_d(self):
self.assertAlmostEqual(mcintosh_d(np.array([1, 2, 3])),
0.636061424871458)
def test_mcintosh_e(self):
num = np.sqrt(15)
den = np.sqrt(19)
exp = num / den
self.assertEqual(mcintosh_e(np.array([1, 2, 3, 1])), exp)
def test_menhinick(self):
# observed_otus = 9, total # of individuals = 22
self.assertEqual(menhinick(self.counts), 9 / np.sqrt(22))
def test_michaelis_menten_fit(self):
obs = michaelis_menten_fit([22])
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([42])
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([34], num_repeats=3, params_guess=(13, 13))
self.assertAlmostEqual(obs, 1.0)
obs = michaelis_menten_fit([70, 70], num_repeats=5)
self.assertAlmostEqual(obs, 2.0, places=1)
obs_few = michaelis_menten_fit(np.arange(4) * 2, num_repeats=10)
obs_many = michaelis_menten_fit(np.arange(4) * 100, num_repeats=10)
# [0,100,200,300] looks like only 3 OTUs.
self.assertAlmostEqual(obs_many, 3.0, places=1)
# [0,2,4,6] looks like 3 OTUs with maybe more to be found.
self.assertTrue(obs_few > obs_many)
def test_observed_otus(self):
obs = observed_otus(np.array([4, 3, 4, 0, 1, 0, 2]))
self.assertEqual(obs, 5)
obs = observed_otus(np.array([0, 0, 0]))
self.assertEqual(obs, 0)
obs = observed_otus(self.counts)
self.assertEqual(obs, 9)
def test_osd(self):
self.assertEqual(osd(self.counts), (9, 3, 3))
def test_pielou_e(self):
# Calculate "by hand".
arr = np.array([1, 2, 3, 1])
h = shannon(arr, np.e)
s = 4
expected = h / np.log(s)
self.assertAlmostEqual(pielou_e(arr), expected)
self.assertAlmostEqual(pielou_e(self.counts), 0.92485490560)
self.assertEqual(pielou_e([1, 1]), 1.0)
self.assertEqual(pielou_e([1, 1, 1, 1]), 1.0)
self.assertEqual(pielou_e([1, 1, 1, 1, 0, 0]), 1.0)
# Examples from
# http://ww2.mdsg.umd.edu/interactive_lessons/biofilm/diverse.htm#3
self.assertAlmostEqual(pielou_e([1, 1, 196, 1, 1]), 0.078, 3)
self.assertTrue(np.isnan(pielou_e([0, 0, 200, 0, 0])))
self.assertTrue(np.isnan(pielou_e([0, 0, 0, 0, 0])))
def test_robbins(self):
self.assertEqual(robbins(np.array([1, 2, 3, 0, 1])), 2 / 7)
def test_shannon(self):
self.assertEqual(shannon(np.array([5])), 0)
self.assertEqual(shannon(np.array([5, 5])), 1)
self.assertEqual(shannon(np.array([1, 1, 1, 1, 0])), 2)
def test_simpson(self):
self.assertAlmostEqual(simpson(np.array([1, 0, 2, 5, 2])), 0.66)
self.assertAlmostEqual(simpson(np.array([5])), 0)
def test_simpson_e(self):
# A totally even community should have simpson_e = 1.
self.assertEqual(simpson_e(np.array([1, 1, 1, 1, 1, 1, 1])), 1)
arr = np.array([0, 30, 25, 40, 0, 0, 5])
freq_arr = arr / arr.sum()
D = (freq_arr ** 2).sum()
exp = 1 / (D * 4)
obs = simpson_e(arr)
self.assertEqual(obs, exp)
# From:
# https://groups.nceas.ucsb.edu/sun/meetings/calculating-evenness-
# of-habitat-distributions
arr = np.array([500, 400, 600, 500])
D = 0.0625 + 0.04 + 0.09 + 0.0625
exp = 1 / (D * 4)
self.assertEqual(simpson_e(arr), exp)
def test_singles(self):
self.assertEqual(singles(self.counts), 3)
self.assertEqual(singles(np.array([0, 3, 4])), 0)
self.assertEqual(singles(np.array([1])), 1)
self.assertEqual(singles(np.array([0, 0])), 0)
def test_strong(self):
self.assertAlmostEqual(strong(np.array([1, 2, 3, 1])), 0.214285714)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
cpritam/moose
|
framework/scripts/memory_logger.py
|
3
|
41944
|
#!/usr/bin/env python
from tempfile import TemporaryFile, SpooledTemporaryFile
import os, sys, re, socket, time, pickle, csv, uuid, subprocess, argparse, decimal, select, platform
class LLDB:
def _parseStackTrace(self, jibberish):
not_jibberish = re.findall(r'\(lldb\) bt(.*)\(lldb\)', jibberish, re.DOTALL)
if len(not_jibberish) != 0:
return not_jibberish[0].replace(' frame ', '')
else:
return 'Stack Trace failed:', jibberish
def _waitForResponse(self, wait=True):
while wait:
self.lldb_stdout.seek(self.last_position)
for line in self.lldb_stdout:
if line == '(lldb) ':
self.last_position = self.lldb_stdout.tell()
return True
time.sleep(0.05)
time.sleep(0.05)
return True
def getStackTrace(self, pid):
lldb_commands = [ 'attach -p ' + pid + '\n', 'bt\n', 'quit\n', 'Y\n' ]
self.lldb_stdout = SpooledTemporaryFile()
self.last_position = 0
lldb_process = subprocess.Popen(['lldb', '-x'], stdin=subprocess.PIPE, stdout=self.lldb_stdout, stderr=self.lldb_stdout)
while lldb_process.poll() == None:
for command in lldb_commands:
if command == lldb_commands[-1]:
lldb_commands = []
if self._waitForResponse(False):
# I have seen LLDB exit out from under us
try:
lldb_process.stdin.write(command)
except:
pass
elif self._waitForResponse():
lldb_process.stdin.write(command)
self.lldb_stdout.seek(0)
stack_trace = self._parseStackTrace(self.lldb_stdout.read())
self.lldb_stdout.close()
return stack_trace
class GDB:
def _parseStackTrace(self, jibberish):
not_jibberish = re.findall(r'\(gdb\) (#.*)\(gdb\)', jibberish, re.DOTALL)
if len(not_jibberish) != 0:
return not_jibberish[0]
else:
return 'Stack Trace failed:', jibberish
def _waitForResponse(self, wait=True):
while wait:
self.gdb_stdout.seek(self.last_position)
for line in self.gdb_stdout:
if line == '(gdb) ':
self.last_position = self.gdb_stdout.tell()
return True
time.sleep(0.05)
time.sleep(0.05)
return True
def getStackTrace(self, pid):
gdb_commands = [ 'attach ' + pid + '\n', 'set verbose off\n', 'thread\n', 'apply\n', 'all\n', 'bt\n', 'quit\n', 'y\n' ]
self.gdb_stdout = SpooledTemporaryFile()
self.last_position = 0
gdb_process = subprocess.Popen(['gdb', '-nx'], stdin=subprocess.PIPE, stdout=self.gdb_stdout, stderr=self.gdb_stdout)
while gdb_process.poll() == None:
for command in gdb_commands:
if command == gdb_commands[-1]:
gdb_commands = []
elif self._waitForResponse():
# I have seen GDB exit out from under us
try:
gdb_process.stdin.write(command)
except:
pass
self.gdb_stdout.seek(0)
stack_trace = self._parseStackTrace(self.gdb_stdout.read())
self.gdb_stdout.close()
return stack_trace
class Server:
def __init__(self, arguments):
self.arguments = arguments
self.arguments.cwd = os.getcwd()
# Test to see if we are starting as a server
if self.arguments.pbs == True:
if os.getenv('PBS_NODEFILE') != None:
# Initialize an agent, strictly for holding our stdout logs. Give it the UUID of 'server'
self.agent = Agent(self.arguments, 'server')
if self.arguments.recover:
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
self.logfile = WriteCSV(self.arguments.outfile[0], True)
self.client_connections = []
self.startServer()
else:
print 'I could not find your PBS_NODEFILE. Is PBS loaded?'
sys.exit(1)
# If we are not a server, start the single client
else:
self.startClient()
def startServer(self):
# Setup the TCP socket
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_socket.bind((socket.gethostname(), 0))
self.server_socket.listen(5)
(self.host, self.port) = self.server_socket.getsockname()
# We will store all connections (sockets objects) made to the server in a list
self.client_connections.append(self.server_socket)
# Launch the actual binary we want to track
self._launchJob()
# Now launch all pbs agents
self._launchClients()
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to listen and accept active connections from agents
# until all agents report a STOP command.
AGENTS_ACTIVE = True
while AGENTS_ACTIVE:
read_sockets, write_sockets, error_sockets = select.select(self.client_connections,[],[])
for sock in read_sockets:
if sock == self.server_socket:
# Accept an incomming connection
self.client_connections.append(self.server_socket.accept()[0])
else:
# Deal with the data being sent to the server by its agents
self.handleAgent()
# Check to see if _all_ agents are telling the server to stop
agent_count = len(self.agent.agent_data.keys())
current_count = 0
for agent in self.agent.agent_data.keys():
if self.agent.agent_data[agent]['STOP']:
current_count += 1
# if All Agents have reported a STOP command, begin to exit
if current_count == agent_count:
AGENTS_ACTIVE = False
# Gotta get out of the for loop somehow...
break
# Sleep a bit before reading additional data
time.sleep(self.arguments.repeat_rate[-1])
# Close the server socket
self.server_socket.close()
# Close the logfile as the server is about to exit
self.logfile.close()
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Normal exiting procedures
print '\n\nAll agents have stopped. Log file saved to:', self.arguments.outfile[0]
sys.exit(0)
def startClient(self):
Client(self.arguments)
def _launchClients(self):
# Read the environment PBS_NODEFILE
self._PBS_NODEFILE = open(os.getenv('PBS_NODEFILE'), 'r')
nodes = set(self._PBS_NODEFILE.read().split())
# Print some useful information about our setup
print 'Memory Logger running on Host:', self.host, 'Port:', self.port, '\nNodes:', ', '.join(nodes), '\nSample rate (including stdout):', self.arguments.repeat_rate[-1], 's (use --repeat-rate to adjust)\nRemote agents delaying', self.arguments.pbs_delay[-1], 'second/s before tracking. (use --pbs-delay to adjust)\n'
# Build our command list based on the PBS_NODEFILE
command = []
for node in nodes:
command.append([ 'ssh', node,
'bash --login -c "source /etc/profile && ' \
+ 'sleep ' + str(self.arguments.pbs_delay[-1]) + ' && ' \
+ os.path.abspath(__file__) \
+ ' --call-back-host ' \
+ self.host + ' ' + str(self.port) \
+ '"'])
# remote into each node and execute another copy of memory_logger.py
# with a call back argument to recieve further instructions
for pbs_node in command:
subprocess.Popen(pbs_node, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Launch the binary we intend to track
def _launchJob(self):
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.agent.log, stderr=self.agent.log)
# A connection has been made from client to server
# Capture that data, and determin what to do with it
def handleAgent(self):
# Loop through all client connections, and receive data if any
for agent_socket in self.client_connections:
# Completely ignore the server_socket object
if agent_socket == self.server_socket:
continue
# Assign an AgentConnector for the task of handling data between client and server
reporting_agent = AgentConnector(self.arguments, agent_socket)
# OK... get data from a client and begin
new_data = reporting_agent.readData()
if new_data != None:
# There should be only one dictionary key (were reading data from just one client at a time)
agent_uuid = new_data.keys()[0]
# Update our dictionary of an agents data
self.agent.agent_data[agent_uuid] = new_data[agent_uuid]
# Modify incoming Agents timestamp to match Server's time (because every node is a little bit off)
if self.arguments.recover:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now - self.agent.delta
else:
self.agent.agent_data[agent_uuid]['TIMESTAMP'] = GetTime().now
# update total usage for all known reporting agents
total_usage = 0
for one_agent in self.agent.agent_data.keys():
total_usage += self.agent.agent_data[one_agent]['MEMORY']
self.agent.agent_data[agent_uuid]['TOTAL'] = int(total_usage)
# Get any stdout thats happened thus far and apply it to what ever agent just sent us data
self.agent.agent_data[agent_uuid]['STDOUT'] = self.agent._getStdout()
# Write to our logfile
self.logfile.write(self.agent.agent_data[agent_uuid])
# Check for any agents sending a stop command. If we find one,
# set some zeroing values, and close that agent's socket.
if self.agent.agent_data[agent_uuid]['STOP']:
self.agent.agent_data[agent_uuid]['MEMORY'] = 0
agent_socket.close()
if agent_socket != self.server_socket:
self.client_connections.remove(agent_socket)
# Go ahead and set our server agent to STOP as well.
# The server will continue recording samples from agents
self.agent.agent_data['server']['STOP'] = True
# If an Agent has made a request for instructions, handle it here
update_client = False
if new_data[agent_uuid]['REQUEST'] != None:
for request in new_data[agent_uuid]['REQUEST'].iteritems():
if new_data[agent_uuid]['REQUEST'][request[0]] == '':
update_client = True
# We only support sending any arguments supplied to ther server, back to the agent
for request_type in dir(self.arguments):
if request[0] == str(request_type):
self.agent.agent_data[agent_uuid]['REQUEST'][request[0]] = getattr(self.arguments, request[0])
# If an Agent needed additional instructions, go ahead and re-send those instructions
if update_client:
reporting_agent.sendData(self.agent.agent_data[agent_uuid])
class Client:
def __init__(self, arguments):
self.arguments = arguments
# Initialize an Agent with a UUID based on our hostname
self.my_agent = Agent(arguments, str(uuid.uuid3(uuid.NAMESPACE_DNS, socket.gethostname())))
# Initialize an AgentConnector
self.remote_server = AgentConnector(self.arguments)
# If client will talk to a server (PBS)
if self.arguments.call_back_host:
# We know by initializing an agent, agent_data contains the necessary message asking for further instructions
self.my_agent.agent_data[self.my_agent.my_uuid] = self.remote_server.sendData(self.my_agent.agent_data)
# Apply new instructions received from server (this basically updates our arguments)
for request in self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'].iteritems():
for request_type in dir(self.arguments):
if request[0] == str(request_type):
setattr(self.arguments, request[0], request[1])
# Requests have been satisfied, set to None
self.my_agent.agent_data[self.my_agent.my_uuid]['REQUEST'] = None
# Change to the same directory as the server was when initiated (needed for PBS stuff)
os.chdir(self.arguments.cwd)
# Client will not be talking to a server, save data to a file instead
else:
# Deal with --recover
if self.arguments.recover:
# Do not overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], False)
else:
# Overwrite the file
self.logfile = WriteCSV(self.arguments.outfile[0], True)
# Lets begin!
self.startProcess()
# This function handles the starting and stoping of the sampler process.
# We loop until an agent returns a stop command.
def startProcess(self):
AGENTS_ACTIVE = True
# If we know we are the only client, go ahead and start the process we want to track.
if self.arguments.call_back_host == None:
subprocess.Popen(self.arguments.run[-1].split(), stdout=self.my_agent.log, stderr=self.my_agent.log)
# Delay just a bit to keep from recording a possible zero memory usage as the binary starts up
time.sleep(self.arguments.sample_delay[0])
# This is a try so we can handle a keyboard ctrl-c
try:
# Continue to process data until an Agent reports a STOP command
while AGENTS_ACTIVE:
# Take a sample
current_data = self.my_agent.takeSample()
# Handle the data supplied by the Agent.
self._handleData(current_data)
# If an Agent reported a STOP command, go ahead and begin the shutdown phase
if current_data[current_data.keys()[0]]['STOP']:
AGENTS_ACTIVE = False
# Sleep just a bit between samples, as to not saturate the machine
time.sleep(self.arguments.repeat_rate[-1])
# An agent reported a stop command... so let everyone know where the log was saved, and exit!
if self.arguments.call_back_host == None:
print 'Binary has exited. Wrote log:', self.arguments.outfile[0]
# Cancel server operations if ctrl-c was pressed
except KeyboardInterrupt:
self.logfile.close()
print 'Canceled by user. Wrote log:', self.arguments.outfile[0]
sys.exit(0)
# Everything went smooth.
sys.exit(0)
# Figure out what to do with the sampled data
def _handleData(self, data):
# Sending the sampled data to a server
if self.arguments.call_back_host:
self.remote_server.sendData(data)
# Saving the sampled data to a file
else:
# Compute the TOTAL memory usage to be how much our one agent reported
# Because were the only client doing any work
data[self.my_agent.my_uuid]['TOTAL'] = data[self.my_agent.my_uuid]['MEMORY']
self.logfile.write(data[self.my_agent.my_uuid])
# If the agent has been told to stop, close the database file
if self.my_agent.agent_data[self.my_agent.my_uuid]['STOP'] == True:
self.logfile.close()
class AgentConnector:
"""
Functions used to communicate to and from Client and Server.
Both Client and Server classes use this object.
readData()
sendData('message', socket_connection=None)
if sendData's socket_connection is None, it will create a new connection to the server
based on supplied arguments
"""
def __init__(self, arguments, connection=None):
self.arguments = arguments
self.connection = connection
self.CREATED_CONNECTION = False
# If the connection is None, meaning this object was instanced by a client,
# we must create a connection to the server first
if self.connection == None and self.arguments.call_back_host != None:
self.CREATED_CONNECTION = True
self.connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.settimeout(15)
self.connection.connect((self.arguments.call_back_host[0], int(self.arguments.call_back_host[1])))
# read all data sent by an agent
def readData(self):
# Get how much data there is to receive
# The first eight bytes is our data length
data_width = int(self.connection.recv(8))
tmp_received = ''
# We need to receive precisely the ammount of data the
# client is trying to send us.
while len(tmp_received) < data_width:
if data_width - len(tmp_received) > 1024:
tmp_received += self.connection.recv(1024)
else:
tmp_received += self.connection.recv(data_width - (len(tmp_received)))
# unpickle the received message
return self._unpickleMessage(tmp_received)
# send data to an agent
def sendData(self, message):
# pickle the data up, and send the message
self.connection.sendall(self._pickleMessage(message))
# If we had to create the socket (connection was none), and this client/agent is requesting
# instructions, go ahead and read the data that _better be there_ sent to us by the server.
if self.CREATED_CONNECTION and message[message.keys()[0]]['REQUEST'] != None:
return self.readData()
# The following two functions pickle up the data for easy socket transport
def _pickleMessage(self, message):
t = TemporaryFile()
pickle.dump(message, t)
t.seek(0)
str_msg = t.read()
str_len = len(str_msg)
message = "%-8d" % (str_len,) + str_msg
return message
def _unpickleMessage(self, message):
t = TemporaryFile()
t.write(message)
t.seek(0)
try:
return pickle.load(t)
except KeyError:
print 'Socket data was not pickled data: ', message
except:
raise
class WriteCSV:
def __init__(self, logfile, overwrite):
if overwrite:
self.file_object = open(logfile, 'w', 1)
else:
self.file_object = open(logfile, 'a', 1)
self.log_file = csv.writer(self.file_object, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Close the logfile
def close(self):
self.file_object.close()
# Write a CSV row
def write(self, data):
formatted_string = self._formatString(data)
self.log_file.writerow(formatted_string)
# Format the CSV output
def _formatString(self, data):
# We will be saving this data in CSV format. Before we do, lets format it a bit here
format_order = ['TIMESTAMP', 'TOTAL', 'STDOUT', 'STACK', 'HOSTNAME', 'MEMORY']
formatted_text = []
for item in format_order:
# We have to handle python's way of formatting floats to strings specially
if item == 'TIMESTAMP':
formatted_text.append('%.6f' % data[item])
else:
formatted_text.append(data[item])
return formatted_text
class Agent:
"""
Each agent object contains its own sampled log data. The Agent class is responsible for
collecting and storing data. machine_id is used to identify the agent.
machine_id is supplied by the client class. This allows for multiple agents if desired
"""
def __init__(self, arguments, machine_id):
self.arguments = arguments
self.my_uuid = machine_id
self.track_process = ''
# This log object is for stdout purposes
self.log = TemporaryFile()
self.log_position = 0
# Discover if --recover is being used. If so, we need to obtain the
# timestamp of the last entry in the outfile log... a little bulky
# to do... and not a very good place to do it.
if self.arguments.recover:
if os.path.exists(self.arguments.outfile[-1]):
memory_list = []
history_file = open(self.arguments.outfile[-1], 'r')
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
# Get last item in list. Unfortunately, no way to do this until
# we have read the entire file...? Lucky for us, most memory log
# files are in the single digit megabytes
for row in reader:
memory_list.append(row)
history_file.close()
last_entry = float(memory_list[-1][0]) + self.arguments.repeat_rate[-1]
self.delta = (GetTime().now - last_entry)
else:
print 'Recovery options detected, but I could not find your previous memory log file.'
sys.exit(1)
else:
self.delta = 0
# Create the dictionary to which all sampled data will be stored
# NOTE: REQUEST dictionary items are instructions (arguments) we will
# ask the server to provide (if we are running with --pbs)
# Simply add them here. We _can not_ make the arguments match the
# server exactly, this would cause every agent launched to perform
# like a server... bad stuff
# Example: We added repeat_rate (see dictionary below). Now every
# agent would update their repeat_rate according to what the user
# supplied as an argument (--repeat_rate 0.02)
self.agent_data = { self.my_uuid :
{ 'HOSTNAME' : socket.gethostname(),
'STDOUT' : '',
'STACK' : '',
'MEMORY' : 0,
'TIMESTAMP' : GetTime().now - self.delta,
'REQUEST' : { 'run' : '',
'pstack' : '',
'repeat_rate' : '',
'cwd' : ''},
'STOP' : False,
'TOTAL' : 0,
'DEBUG_LOG' : ''
}
}
# NOTE: This is the only function that should be called in this class
def takeSample(self):
if self.arguments.pstack:
self.agent_data[self.my_uuid]['STACK'] = self._getStack()
# Always do the following
self.agent_data[self.my_uuid]['MEMORY'] = self._getMemory()
self.agent_data[self.my_uuid]['STDOUT'] = self._getStdout()
if self.arguments.recover:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now - self.delta
else:
self.agent_data[self.my_uuid]['TIMESTAMP'] = GetTime().now
# Return the data to whom ever asked for it
return self.agent_data
def _getStdout(self):
self.log.seek(self.log_position)
output = self.log.read()
self.log_position = self.log.tell()
sys.stdout.write(output)
return output
def _getMemory(self):
tmp_pids = self._getPIDs()
memory_usage = 0
if tmp_pids != {}:
for single_pid in tmp_pids.iteritems():
memory_usage += int(single_pid[1][0])
if memory_usage == 0:
# Memory usage hit zero? Then assume the binary being tracked has exited. So lets begin doing the same.
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found the total memory usage of all my processes hit 0. Stoping'
self.agent_data[self.my_uuid]['STOP'] = True
return 0
return int(memory_usage)
# No binay even detected? Lets assume it exited, so we should begin doing the same.
self.agent_data[self.my_uuid]['STOP'] = True
self.agent_data[self.my_uuid]['DEBUG_LOG'] = 'I found no processes running. Stopping'
return 0
def _getStack(self):
if self._darwin() == True:
stack_trace = LLDB()
else:
stack_trace = GDB()
tmp_pids = self._getPIDs()
if tmp_pids != {}:
last_pid = sorted([x for x in tmp_pids.keys()])[-1]
return stack_trace.getStackTrace(str(last_pid))
else:
return ''
def _getPIDs(self):
pid_list = {}
# Determin the binary to sample and store it. Doing the findCommand is a little expensive.
if self.track_process == '':
self.track_process = self._findCommand(''.join(self.arguments.run))
# A quick way to safely check for the avilability of needed tools
self._verifyCommand(['ps'])
# If we are tracking a binary
if self.arguments.run:
command = [which('ps'), '-e', '-o', 'pid,rss,user,args']
tmp_proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
all_pids = tmp_proc.communicate()[0].split('\n')
# Figure out what we are allowed to track (strip away mpiexec, processes not owned by us, etc)
for single_pid in all_pids:
if single_pid.find(self.track_process) != -1 and \
single_pid.find(__file__) == -1 and \
single_pid.find('mpirun') == -1 and \
single_pid.find(os.getenv('USER')) != -1 and \
single_pid.find('mpiexec') == -1:
pid_list[int(single_pid.split()[0])] = []
pid_list[int(single_pid.split()[0])].extend([single_pid.split()[1], single_pid.split()[3]])
return pid_list
def _verifyCommand(self, command_list):
for command in command_list:
if which(command) == None:
print 'Command not found:', command
sys.exit(1)
# determine if we are running on a darwin kernel
def _darwin(self):
if platform.platform(0, 1).split('-')[:-1][0].find('Darwin') != -1:
return True
# Determine the command we are going to track
# A few things are happening here; first we strip off any MPI commands
# we then loop through the remaining items until we find a matching path
# exp: mpiexec -n 12 ../../../moose_test-opt -i simple_diffusion.i -r 6
# would first strip off mpiexec, check for the presence of -n in our
# current directory, then 12, then ../../../moose_test-opt <- found. It would
# stop and return the base name (moose_test-opt).
def _findCommand(self, command):
if command.find('mpiexec') == 0 or command.find('mpirun') == 0:
for binary in command.split():
if os.path.exists(binary):
return os.path.split(binary)[1]
elif os.path.exists(command.split()[0]):
return os.path.split(command.split()[0])[1]
class GetTime:
"""A simple formatted time object.
"""
def __init__(self, posix_time=None):
import datetime
if posix_time == None:
self.posix_time = datetime.datetime.now()
else:
self.posix_time = datetime.datetime.fromtimestamp(posix_time)
self.now = float(datetime.datetime.now().strftime('%s.%f'))
self.microsecond = self.posix_time.microsecond
self.second = self.posix_time.second
self.minute = self.posix_time.strftime('%M')
self.hour = self.posix_time.strftime('%H')
self.day = self.posix_time.strftime('%d')
self.month = self.posix_time.strftime('%m')
self.year = self.posix_time.year
self.dayname = self.posix_time.strftime('%a')
self.monthname = self.posix_time.strftime('%b')
class MemoryPlotter:
def __init__(self, arguments):
self.arguments = arguments
self.buildGraph()
def buildPlots(self):
plot_dictionary = {}
for log in self.arguments.plot:
memory_list = []
if os.path.exists(log):
log_file = open(log, 'r')
reader = csv.reader(log_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
for row in reader:
memory_list.append(row)
log_file.close()
plot_dictionary[log.split('/')[-1:][0]] = memory_list
else:
print 'log not found:', log
sys.exit(1)
return plot_dictionary
def buildGraph(self):
try:
import matplotlib.pyplot as plt
except ImportError:
print 'Error importing matplotlib. Matplotlib not available on this system?'
sys.exit(1)
plot_dictionary = self.buildPlots()
fig = plt.figure()
plot_list = []
tmp_plot = []
tmp_legend = []
self.stdout_msgs = {}
self.pstack_msgs = {}
self.multiples = 1
self.memory_label = 'Memory in Bytes'
# Try and calculate memory sizes, so we can move annotations around a bit more accurately
largest_memory = []
for plot_name, value_list in plot_dictionary.iteritems():
for records in value_list:
largest_memory.append(int(records[1]))
largest_memory.sort()
# Determine the scale of the graph
suffixes = ["Terabytes", "Gigabytes", "Megabytes", "Kilobytes", "Bytes"]
multiplier = 1 << 40;
index = 0
while largest_memory[-1] < multiplier and multiplier >= 1:
multiplier = multiplier >> 10
index = index + 1
self.multiples = multiplier
self.memory_label = "Memory in " + suffixes[index-1]
# Loop through each log file
for plot_name, value_list in plot_dictionary.iteritems():
plot_list.append(fig.add_subplot(111))
tmp_memory = []
tmp_time = []
tmp_stdout_x = []
tmp_stdout_y = []
tmp_pstack_x = []
tmp_pstack_y = []
stdout_msg = []
pstack_msg = []
# Get the start time, and make this 0
try:
tmp_zero = decimal.Decimal(value_list[0][0])
except:
print 'Could not parse log file:', plot_name, 'is this a valid memory_logger file?'
sys.exit(1)
# Populate the graph
for records in value_list:
tmp_memory.append(decimal.Decimal(records[1]) / self.multiples)
tmp_time.append(str(decimal.Decimal(records[0]) - tmp_zero))
if len(records[2]) > 0 and self.arguments.stdout:
tmp_stdout_x.append(tmp_time[-1])
tmp_stdout_y.append(tmp_memory[-1])
stdout_msg.append(records[2])
if len(records[3]) > 0 and self.arguments.pstack:
tmp_pstack_x.append(tmp_time[-1])
tmp_pstack_y.append(tmp_memory[-1])
pstack_msg.append(records[3])
# Do the actual plotting:
f, = plot_list[-1].plot(tmp_time, tmp_memory)
tmp_plot.append(f)
tmp_legend.append(plot_name)
plot_list[-1].grid(True)
plot_list[-1].set_ylabel(self.memory_label)
plot_list[-1].set_xlabel('Time in Seconds')
# Plot annotations
if self.arguments.stdout:
stdout_line, = plot_list[-1].plot(tmp_stdout_x, tmp_stdout_y, 'x', picker=10, color=f.get_color())
next_index = str(len(plot_list))
stdout_line.set_gid('stdout' + next_index)
self.stdout_msgs[next_index] = stdout_msg
self.buildAnnotation(plot_list[-1], tmp_stdout_x, tmp_stdout_y, stdout_msg, f.get_color())
if self.arguments.pstack:
pstack_line, = plot_list[-1].plot(tmp_pstack_x, tmp_pstack_y, 'o', picker=10, color=f.get_color())
next_index = str(len(plot_list))
pstack_line.set_gid('pstack' + next_index)
self.pstack_msgs[next_index] = pstack_msg
# Make points clickable
fig.canvas.mpl_connect('pick_event', self)
# Create legend
plt.legend(tmp_plot, tmp_legend, loc = 2)
plt.show()
def __call__(self, event):
color_codes = {'RESET':'\033[0m', 'r':'\033[31m','g':'\033[32m','c':'\033[36m','y':'\033[33m', 'b':'\033[34m', 'm':'\033[35m', 'k':'\033[0m', 'w':'\033[0m' }
line = event.artist
ind = event.ind
name = line.get_gid()[:-1]
index = line.get_gid()[-1]
if self.arguments.stdout and name == 'stdout':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "stdout -----------------------------------------------------\n"
for id in ind:
print self.stdout_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
if self.arguments.pstack and name == 'pstack':
if self.arguments.no_color != False:
print color_codes[line.get_color()]
print "pstack -----------------------------------------------------\n"
for id in ind:
print self.pstack_msgs[index][id]
if self.arguments.no_color != False:
print color_codes['RESET']
def buildAnnotation(self,fig,x,y,msg,c):
for i in range(len(x)):
fig.annotate(str(msg[i].split('\n')[0][:self.arguments.trim_text[-1]]),
xy=(x[i], y[i]),
rotation=self.arguments.rotate_text[-1],
xytext=(decimal.Decimal(x[i]) + decimal.Decimal(self.arguments.move_text[0]), decimal.Decimal(y[i]) + decimal.Decimal(self.arguments.move_text[1])),
color=c, horizontalalignment='center', verticalalignment='bottom',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=0.5",
color=c
)
)
class ReadLog:
"""Read a memory_logger log file, and display the results to stdout in an easy to read form.
"""
def __init__(self, arguments):
self.arguments = arguments
history_file = open(self.arguments.read[-1], 'r')
reader = csv.reader(history_file, delimiter=',', quotechar='|', escapechar='\\', quoting=csv.QUOTE_MINIMAL)
self.memory_list = []
for row in reader:
self.memory_list.append(row)
history_file.close()
self.sorted_list = []
self.mem_list = []
self.use_nodes = False
self.printHistory()
def printHistory(self):
RESET = '\033[0m'
BOLD = '\033[1m'
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
CYAN = '\033[36m'
YELLOW = '\033[33m'
last_memory = 0.0
(terminal_width, terminal_height) = self.getTerminalSize()
for timestamp in self.memory_list:
to = GetTime(float(timestamp[0]))
total_memory = int(timestamp[1])
log = timestamp[2].split('\n')
pstack = timestamp[3].split('\n')
node_name = str(timestamp[4])
node_memory = int(timestamp[5])
self.mem_list.append(total_memory)
self.sorted_list.append([str(to.day) + ' ' + str(to.monthname) + ' ' + str(to.hour) + ':' + str(to.minute) + ':' + '{:02.0f}'.format(to.second) + '.' + '{:06.0f}'.format(to.microsecond), total_memory, log, pstack, node_name, node_memory])
largest_memory = decimal.Decimal(max(self.mem_list))
if len(set([x[4] for x in self.sorted_list])) > 1:
self.use_nodes = True
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
for item in self.sorted_list:
tmp_str = ''
if decimal.Decimal(item[1]) == largest_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RESET, terminal_width)
elif item[1] > last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], RED, terminal_width)
elif item[1] == last_memory:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], CYAN, terminal_width)
else:
tmp_str = self.formatText(largest_memory, item[0], item[1], item[5], item[2], item[3], item[4], GREEN, terminal_width)
last_memory = item[1]
sys.stdout.write(tmp_str)
print 'Date Stamp' + ' '*int(17) + 'Memory Usage | Percent of MAX memory used: ( ' + str('{:0,.0f}'.format(largest_memory)) + ' K )'
def formatText(self, largest_memory, date, total_memory, node_memory, log, pstack, reporting_host, color_code, terminal_width):
RESET = '\033[0m'
if decimal.Decimal(total_memory) == largest_memory:
percent = '100'
elif (decimal.Decimal(total_memory) / largest_memory) == 0:
percent = '0'
else:
percent = str(decimal.Decimal(total_memory) / largest_memory)[2:4] + '.' + str(decimal.Decimal(total_memory) / largest_memory)[4:6]
header = len(date) + 18
footer = len(percent) + 6
additional_correction = 0
max_length = decimal.Decimal(terminal_width - header) / largest_memory
total_position = total_memory * decimal.Decimal(max_length)
node_position = node_memory * decimal.Decimal(max_length)
tmp_log = ''
if self.arguments.stdout:
for single_log in log:
if single_log != '':
tmp_log += ' '*(header - len(' stdout |')) + ' stdout | ' + single_log + '\n'
if self.arguments.pstack:
for single_pstack in pstack:
if single_pstack != '':
tmp_log += ' '*(header - len(' pstack |')) + ' pstack | ' + single_pstack + '\n'
if self.arguments.separate and self.use_nodes != False:
message = '< ' + RESET + reporting_host + ' - ' + '{:10,.0f}'.format(node_memory) + ' K' + color_code + ' >'
additional_correction = len(RESET) + len(color_code)
elif self.use_nodes:
message = '< >'
else:
node_position = 0
message = ''
return date + '{:15,.0f}'.format(total_memory) + ' K | ' + color_code + '-'*int(node_position) + message + '-'*(int(total_position) - (int(node_position) + ((len(message) - additional_correction) + footer))) + RESET + '| ' + percent + '%\n' + tmp_log
def getTerminalSize(self):
"""Quicky to get terminal window size"""
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
try:
cr = (env['LINES'], env['COLUMNS'])
except:
cr = (25, 80)
return int(cr[1]), int(cr[0])
# A simple which function to return path to program
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
print 'I could not find the following binary:', program
sys.exit(1)
def verifyArgs(args):
option_count = 0
if args.read:
option_count += 1
if args.run:
option_count += 1
if args.plot:
option_count += 1
if option_count != 1 and args.pbs != True:
if args.call_back_host == None:
print 'You must use one of the following: run, read, or plot'
sys.exit(1)
args.cwd = os.getcwd()
# Work with --recover (a MOOSE application specific option)
args.recover = False
if args.run:
if args.run[0].find('--recover') != -1:
args.recover = True
if args.outfile == None and args.run:
# Attempt to build the output file based on input file
if re.findall(r'-i (\w+)', args.run[0]) != []:
args.outfile = [os.getcwd() + '/' + re.findall(r'-i (\w+)', args.run[0])[0] + '_memory.log']
else:
args.outfile = [os.getcwd() + '/' + args.run[0].replace('..', '').replace('/', '').replace(' ', '_') + '.log']
return args
def parseArguments(args=None):
parser = argparse.ArgumentParser(description='Track and Display memory usage')
rungroup = parser.add_argument_group('Tracking', 'The following options control how the memory logger tracks memory usage')
rungroup.add_argument('--run', nargs=1, metavar='command', help='Run specified command. You must encapsulate the command in quotes\n ')
rungroup.add_argument('--pbs', dest='pbs', metavar='', action='store_const', const=True, default=False, help='Instruct memory logger to tally all launches on all nodes\n ')
rungroup.add_argument('--pbs-delay', dest='pbs_delay', metavar='float', nargs=1, type=float, default=[1.0], help='For larger jobs, you may need to increase the delay as to when the memory_logger will launch the tracking agents\n ')
rungroup.add_argument('--sample-delay', dest='sample_delay', metavar='float', nargs=1, type=float, default=[0.25], help='The time to delay before taking the first sample (when not using pbs)')
rungroup.add_argument('--repeat-rate', nargs=1, metavar='float', type=float, default=[0.25], help='Indicate the sleep delay in float seconds to check memory usage (default 0.25 seconds)\n ')
rungroup.add_argument('--outfile', nargs=1, metavar='file', help='Save log to specified file. (Defaults based on run command)\n ')
readgroup = parser.add_argument_group('Read / Display', 'Options to manipulate or read log files created by the memory_logger')
readgroup.add_argument('--read', nargs=1, metavar='file', help='Read a specified memory log file to stdout\n ')
readgroup.add_argument('--separate', dest='separate', action='store_const', const=True, default=False, help='Display individual node memory usage (read mode only)\n ')
readgroup.add_argument('--plot', nargs="+", metavar='file', help='Display a graphical representation of memory usage (Requires Matplotlib). Specify a single file or a list of files to plot\n ')
commongroup = parser.add_argument_group('Common Options', 'The following options can be used when displaying the results')
commongroup.add_argument('--pstack', dest='pstack', action='store_const', const=True, default=False, help='Display/Record stack trace information (if available)\n ')
commongroup.add_argument('--stdout', dest='stdout', action='store_const', const=True, default=False, help='Display stdout information\n ')
plotgroup = parser.add_argument_group('Plot Options', 'Additional options when using --plot')
plotgroup.add_argument('--rotate-text', nargs=1, metavar='int', type=int, default=[30], help='Rotate stdout/pstack text by this ammount (default 30)\n ')
plotgroup.add_argument('--move-text', nargs=2, metavar='int', default=['0', '0'], help='Move text X and Y by this ammount (default 0 0)\n ')
plotgroup.add_argument('--trim-text', nargs=1, metavar='int', type=int, default=[15], help='Display this many characters in stdout/pstack (default 15)\n ')
plotgroup.add_argument('--no-color', dest='no_color', metavar='', action='store_const', const=False, help='When printing output to stdout do not use color codes\n ')
internalgroup = parser.add_argument_group('Internal PBS Options', 'The following options are used to control how memory_logger as a tracking agent connects back to the caller. These are set automatically when using PBS and can be ignored.')
internalgroup.add_argument('--call-back-host', nargs=2, help='Server hostname and port that launched memory_logger\n ')
return verifyArgs(parser.parse_args(args))
if __name__ == '__main__':
args = parseArguments()
if args.read:
ReadLog(args)
sys.exit(0)
if args.plot:
MemoryPlotter(args)
sys.exit(0)
Server(args)
|
lgpl-2.1
|
robbwagoner/airflow
|
airflow/www/app.py
|
1
|
63254
|
import copy
from datetime import datetime, timedelta
import dateutil.parser
from functools import wraps
import inspect
import json
import logging
import os
import socket
import sys
from flask import (
Flask, url_for, Markup, Blueprint, redirect,
flash, Response, render_template)
from flask.ext.admin import Admin, BaseView, expose, AdminIndexView
from flask.ext.admin.form import DateTimePickerWidget
from flask.ext.admin import base
from flask.ext.admin.contrib.sqla import ModelView
from flask.ext.cache import Cache
from flask import request
import sqlalchemy as sqla
from wtforms import (
widgets,
Form, DateTimeField, SelectField, TextAreaField, PasswordField)
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
import chartkick
import jinja2
import markdown
from sqlalchemy import or_
import airflow
from airflow import jobs, login, models, settings, utils
from airflow.configuration import conf
from airflow.models import State
from airflow.settings import Session
from airflow.utils import AirflowException
from airflow.www import utils as wwwutils
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
from airflow import default_login as login
if conf.getboolean('webserver', 'AUTHENTICATE'):
try:
# Environment specific login
import airflow_login as login
except ImportError:
logging.error(
"authenticate is set to True in airflow.cfg, "
"but airflow_login failed to import")
login_required = login.login_required
current_user = login.current_user
logout_user = login.logout_user
AUTHENTICATE = conf.getboolean('webserver', 'AUTHENTICATE')
if AUTHENTICATE is False:
login_required = lambda x: x
class VisiblePasswordInput(widgets.PasswordInput):
def __init__(self, hide_value=False):
self.hide_value = hide_value
class VisiblePasswordField(PasswordField):
widget = VisiblePasswordInput()
def superuser_required(f):
'''
Decorator for views requiring superuser access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.is_superuser())
):
return f(*args, **kwargs)
else:
flash("This page requires superuser privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def data_profiling_required(f):
'''
Decorator for views requiring data profiling access
'''
@wraps(f)
def decorated_function(*args, **kwargs):
if (
not AUTHENTICATE or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: pygment_html_render(x, lexers.BashLexer),
'hql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'sql': lambda x: pygment_html_render(x, lexers.SqlLexer),
'doc': lambda x: pygment_html_render(x, lexers.TextLexer),
'doc_json': lambda x: pygment_html_render(x, lexers.JsonLexer),
'doc_rst': lambda x: pygment_html_render(x, lexers.RstLexer),
'doc_yaml': lambda x: pygment_html_render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: pygment_html_render(
inspect.getsource(x), lexers.PythonLexer),
}
dagbag = models.DagBag(os.path.expanduser(conf.get('core', 'DAGS_FOLDER')))
utils.pessimistic_connection_handling()
app = Flask(__name__)
app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600
app.secret_key = conf.get('webserver', 'SECRET_KEY')
login.login_manager.init_app(app)
cache = Cache(
app=app, config={'CACHE_TYPE': 'filesystem', 'CACHE_DIR': '/tmp'})
# Init for chartkick, the python wrapper for highcharts
ck = Blueprint(
'ck_page', __name__,
static_folder=chartkick.js(), static_url_path='/static')
app.register_blueprint(ck, url_prefix='/ck')
app.jinja_env.add_extension("chartkick.ext.charts")
@app.context_processor
def jinja_globals():
return {
'hostname': socket.gethostname(),
}
class DateTimeForm(Form):
# Date filter form needed for gantt and graph view
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
class GraphForm(Form):
execution_date = DateTimeField(
"Execution date", widget=DateTimePickerWidget())
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
@app.route('/')
def index():
return redirect(url_for('admin.index'))
@app.route('/health')
def health():
""" We can add an array of tests here to check the server's health """
content = Markup(markdown.markdown("The server is healthy!"))
return content
@app.teardown_appcontext
def shutdown_session(exception=None):
settings.Session.remove()
def dag_link(v, c, m, p):
url = url_for(
'airflow.graph',
dag_id=m.dag_id)
return Markup(
'<a href="{url}">{m.dag_id}</a>'.format(**locals()))
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = 50
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
class HomeView(AdminIndexView):
@expose("/")
@login_required
def index(self):
session = Session()
DM = models.DagModel
qry = session.query(DM).filter(~DM.is_subdag, DM.is_active).all()
orm_dags = {dag.dag_id: dag for dag in qry}
session.expunge_all()
session.commit()
session.close()
dags = dagbag.dags.values()
dags = {dag.dag_id: dag for dag in dags if not dag.parent_dag}
all_dag_ids = sorted(set(orm_dags.keys()) | set(dags.keys()))
return self.render(
'airflow/dags.html',
dags=dags,
orm_dags=orm_dags,
all_dag_ids=all_dag_ids)
admin = Admin(
app,
name="Airflow",
index_view=HomeView(name="DAGs"),
template_mode='bootstrap3')
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).all()[0]
session.expunge_all()
session.commit()
session.close()
payload = {}
payload['state'] = 'ERROR'
payload['error'] = ''
# Processing templated fields
try:
args = eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
from airflow import macros
args.update(request_dict)
args['macros'] = macros
sql = jinja2.Template(chart.sql).render(**args)
label = jinja2.Template(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
import pandas as pd
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, CHART_LIMIT))
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns'and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart_type == "datatable":
chart.show_datatable = True
if chart.show_datatable:
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
except Exception as e:
raise AirflowException(str(e))
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
series = []
colorAxis = None
if chart_type == 'datatable':
payload['data'] = data
payload['state'] = 'SUCCESS'
return Response(
response=json.dumps(
payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
elif chart_type == 'para':
df.rename(columns={
df.columns[0]: 'name',
df.columns[1]: 'group',
}, inplace=True)
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
elif chart_type == 'heatmap':
color_perc_lbound = float(
request.args.get('color_perc_lbound', 0))
color_perc_rbound = float(
request.args.get('color_perc_rbound', 1))
color_scheme = request.args.get('color_scheme', 'blue_red')
if color_scheme == 'blue_red':
stops = [
[color_perc_lbound, '#00D1C1'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#FFFFCC'
],
[color_perc_rbound, '#FF5A5F']
]
elif color_scheme == 'blue_scale':
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_rbound, '#2222FF']
]
elif color_scheme == 'fire':
diff = float(color_perc_rbound - color_perc_lbound)
stops = [
[color_perc_lbound, '#FFFFFF'],
[color_perc_lbound + 0.33*diff, '#FFFF00'],
[color_perc_lbound + 0.66*diff, '#FF0000'],
[color_perc_rbound, '#000000']
]
else:
stops = [
[color_perc_lbound, '#FFFFFF'],
[
color_perc_lbound +
((color_perc_rbound - color_perc_lbound)/2),
'#888888'
],
[color_perc_rbound, '#000000'],
]
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
data = []
for row in df.itertuples():
data.append({
'x': row[2],
'y': row[3],
'value': row[4],
})
x_format = '{point.x:%Y-%m-%d}' \
if chart.x_is_date else '{point.x}'
series.append({
'data': data,
'borderWidth': 0,
'colsize': 24 * 36e5,
'turboThreshold': sys.float_info.max,
'tooltip': {
'headerFormat': '',
'pointFormat': (
df.columns[1] + ': ' + x_format + '<br/>' +
df.columns[2] + ': {point.y}<br/>' +
df.columns[3] + ': <b>{point.value}</b>'
),
},
})
colorAxis = {
'stops': stops,
'minColor': '#FFFFFF',
'maxColor': '#000000',
'min': 50,
'max': 2200,
}
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
for col in df.columns:
series.append({
'name': col,
'data': [
(i, v)
for i, v in df[col].iteritems() if not np.isnan(v)]
})
series = [serie for serie in sorted(
series, key=lambda s: s['data'][0][1], reverse=True)]
if chart_type == "stacked_area":
stacking = "normal"
chart_type = 'area'
elif chart_type == "percent_area":
stacking = "percent"
chart_type = 'area'
else:
stacking = None
hc = {
'chart': {
'type': chart_type
},
'plotOptions': {
'series': {
'marker': {
'enabled': False
}
},
'area': {'stacking': stacking},
},
'title': {'text': ''},
'xAxis': {
'title': {'text': xaxis_label},
'type': 'datetime' if chart.x_is_date else None,
},
'yAxis': {
'title': {'text': yaxis_label},
},
'colorAxis': colorAxis,
'tooltip': {
'useHTML': True,
'backgroundColor': None,
'borderWidth': 0,
},
'series': series,
}
if chart.y_log_scale:
hc['yAxis']['type'] = 'logarithmic'
hc['yAxis']['minorTickInterval'] = 0.1
if 'min' in hc['yAxis']:
del hc['yAxis']['min']
payload['state'] = 'SUCCESS'
payload['hc'] = hc
payload['data'] = data
payload['request_dict'] = request_dict
return Response(
response=json.dumps(payload, indent=4, default=date_handler),
status=200,
mimetype="application/json")
@expose('/chart')
@data_profiling_required
def chart(self):
session = settings.Session()
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).all()[0]
session.expunge_all()
session.commit()
session.close()
if chart.chart_type == 'para':
return self.render('airflow/para/para.html', chart=chart)
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/highchart.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
def dag_stats(self):
states = [
State.SUCCESS,
State.RUNNING,
State.FAILED,
State.UPSTREAM_FAILED,
State.UP_FOR_RETRY,
State.QUEUED,
]
task_ids = []
for dag in dagbag.dags.values():
task_ids += dag.task_ids
TI = models.TaskInstance
session = Session()
qry = (
session.query(TI.dag_id, TI.state, sqla.func.count(TI.task_id))
.filter(TI.task_id.in_(task_ids))
.group_by(TI.dag_id, TI.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
session.close()
payload = {}
for dag in dagbag.dags.values():
payload[dag.dag_id] = []
for state in states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.dag_id].append(d)
return Response(
response=json.dumps(payload, indent=4),
status=200, mimetype="application/json")
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
code = "".join(open(dag.full_filepath, 'r').readlines())
title = dag.filepath
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@app.errorhandler(404)
def circles(self):
return render_template('airflow/circles.html'), 404
@expose('/sandbox')
@login_required
def sandbox(self):
from airflow import configuration
title = "Sandbox Suggested Configuration"
cfg_loc = configuration.AIRFLOW_CONFIG + '.sandbox'
f = open(cfg_loc, 'r')
config = f.read()
f.close()
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
code_html=code_html, title=title, subtitle=cfg_loc)
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/headers')
def headers(self):
d = {k: v for k, v in request.headers}
if hasattr(current_user, 'is_superuser'):
d['is_superuser'] = current_user.is_superuser()
d['data_profiling'] = current_user.data_profiling()
d['is_anonymous'] = current_user.is_anonymous()
d['is_authenticated'] = current_user.is_authenticated()
return Response(
response=json.dumps(d, indent=4),
status=200, mimetype="application/json")
@expose('/login')
def login(self):
return login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
return redirect('/admin/dagmodel/')
@expose('/rendered')
@login_required
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title,)
@expose('/log')
@login_required
def log(self):
BASE_LOG_FOLDER = os.path.expanduser(
conf.get('core', 'BASE_LOG_FOLDER'))
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dag = dagbag.get_dag(dag_id)
log_relative = "/{dag_id}/{task_id}/{execution_date}".format(
**locals())
loc = BASE_LOG_FOLDER + log_relative
loc = loc.format(**locals())
log = ""
TI = models.TaskInstance
session = Session()
dttm = dateutil.parser.parse(execution_date)
ti = session.query(TI).filter(
TI.dag_id == dag_id, TI.task_id == task_id,
TI.execution_date == dttm).first()
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
if ti:
host = ti.hostname
if socket.gethostname() == host:
try:
f = open(loc)
log += "".join(f.readlines())
f.close()
except:
log = "Log file isn't where expected.\n".format(loc)
else:
WORKER_LOG_SERVER_PORT = \
conf.get('celery', 'WORKER_LOG_SERVER_PORT')
url = (
"http://{host}:{WORKER_LOG_SERVER_PORT}/log"
"{log_relative}").format(**locals())
log += "Log file isn't local.\n"
log += "Fetching here: {url}\n".format(**locals())
try:
import requests
log += requests.get(url).text
except:
log += "Failed to fetch log file.".format(**locals())
session.commit()
session.close()
title = "Log"
return self.render(
'airflow/ti_code.html',
code=log, dag=dag, title=title, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
def task(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = dateutil.parser.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task = copy.copy(task)
task.resolve_template_files()
attributes = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
attributes.append((attr_name, str(attr)))
title = "Task Details"
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
return self.render(
'airflow/task.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/action')
@login_required
def action(self):
action = request.args.get('action')
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = dateutil.parser.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
if action == "run":
from airflow.executors import DEFAULT_EXECUTOR as executor
from airflow.executors import CeleryExecutor
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
force = request.args.get('force') == "true"
deps = request.args.get('deps') == "true"
ti = models.TaskInstance(task=task, execution_date=execution_date)
executor.start()
executor.queue_task_instance(
ti, force=force, ignore_dependencies=deps)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
elif action == 'clear':
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
else:
tis = dag.clear(
start_date=start_date,
end_date=end_date,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to clear:"),
details=details,)
return response
elif action == 'success':
# Flagging tasks as successful
session = settings.Session()
task_ids = [task_id]
if downstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=False)]
if upstream:
task_ids += [
t.task_id
for t in task.get_flat_relatives(upstream=True)]
TI = models.TaskInstance
tis = session.query(TI).filter(
TI.dag_id == dag_id,
TI.execution_date == execution_date,
TI.task_id.in_(task_ids)).all()
if confirmed:
updated_task_ids = []
for ti in tis:
updated_task_ids.append(ti.task_id)
ti.state = State.SUCCESS
session.commit()
to_insert = list(set(task_ids) - set(updated_task_ids))
for task_id in to_insert:
ti = TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS)
session.add(ti)
session.commit()
session.commit()
session.close()
flash("Marked success on {} task instances".format(
len(task_ids)))
return redirect(origin)
else:
if not task_ids:
flash("No task instances to mark as successful", 'error')
response = redirect(origin)
else:
tis = []
for task_id in task_ids:
tis.append(TI(
task=dag.get_task(task_id),
execution_date=execution_date,
state=State.SUCCESS))
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=(
"Here's the list of task instances you are about "
"to mark as successful:"),
details=details,)
return response
@expose('/tree')
@login_required
@wwwutils.gzipped
def tree(self):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
session = settings.Session()
base_date = request.args.get('base_date')
if not base_date:
base_date = datetime.now()
else:
base_date = dateutil.parser.parse(base_date)
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else 25
from_time = datetime.min.time()
if dag.start_date:
from_time = dag.start_date.time()
from_date = (base_date-(num_runs * dag.schedule_interval)).date()
from_date = datetime.combine(from_date, from_time)
dates = utils.date_range(
from_date, base_date, dag.schedule_interval)
task_instances = {}
for ti in dag.get_task_instances(session, from_date):
task_instances[(ti.task_id, ti.execution_date)] = ti
expanded = []
def recurse_nodes(task):
children = [recurse_nodes(t) for t in task.upstream_list]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
return {
'name': task.task_id,
'instances': [
utils.alchemy_to_dict(
task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
if len(dag.roots) > 1:
# d3 likes a single root
data = {
'name': 'root',
'instances': [],
'children': [recurse_nodes(t) for t in dag.roots]
}
elif len(dag.roots) == 1:
data = recurse_nodes(dag.roots[0])
else:
flash("No tasks found.", "error")
data = []
data = json.dumps(data, indent=4, default=utils.json_ser)
session.commit()
session.close()
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
dag=dag, data=data, blur=blur)
@expose('/graph')
@login_required
def graph(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
arrange = request.args.get('arrange', "LR")
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/dagmodel/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = GraphForm(data={'execution_date': dttm, 'arrange': arrange})
task_instances = {
ti.task_id: utils.alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)
}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks
}
if not tasks:
flash("No tasks found", "error")
session.commit()
session.close()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2),)
@expose('/duration')
@login_required
def duration(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.duration:
data.append([
ti.execution_date.isoformat(),
float(ti.duration) / (60*60)
])
if data:
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
chart_options={'yAxis': {'title': {'text': 'hours'}}},
height="700px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/landing_times')
@login_required
def landing_times(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
days = int(request.args.get('days', 30))
dag = dagbag.get_dag(dag_id)
from_date = (datetime.today()-timedelta(days)).date()
from_date = datetime.combine(from_date, datetime.min.time())
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
all_data = []
for task in dag.tasks:
data = []
for ti in task.get_task_instances(session, from_date):
if ti.end_date:
data.append([
ti.execution_date.isoformat(), (
ti.end_date - (
ti.execution_date + task.schedule_interval)
).total_seconds()/(60*60)
])
all_data.append({'data': data, 'name': task.task_id})
session.commit()
session.close()
return self.render(
'airflow/chart.html',
dag=dag,
data=all_data,
height="700px",
chart_options={'yAxis': {'title': {'text': 'hours after 00:00'}}},
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
)
@expose('/refresh')
@login_required
def refresh(self):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
session = settings.Session()
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = datetime.now()
session.merge(orm_dag)
session.commit()
session.close()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect('/')
@expose('/refresh_all')
@login_required
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
def gantt(self):
session = settings.Session()
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dttm = request.args.get('execution_date')
if dttm:
dttm = dateutil.parser.parse(dttm)
else:
dttm = dag.latest_execution_date or datetime.now().date()
form = DateTimeForm(data={'execution_date': dttm})
tis = [
ti
for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
tasks = []
data = []
for i, ti in enumerate(tis):
end_date = ti.end_date or datetime.now()
tasks += [ti.task_id]
color = State.color(ti.state)
data.append({
'x': i,
'low': int(ti.start_date.strftime('%s')) * 1000,
'high': int(end_date.strftime('%s')) * 1000,
'color': color,
})
height = (len(tis) * 25) + 50
session.commit()
session.close()
hc = {
'chart': {
'type': 'columnrange',
'inverted': True,
'height': height,
},
'xAxis': {'categories': tasks},
'yAxis': {'type': 'datetime'},
'title': {
'text': None
},
'plotOptions': {
'series': {
'cursor': 'pointer',
'minPointLength': 4,
},
},
'legend': {
'enabled': False
},
'series': [{
'data': data
}]
}
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
hc=json.dumps(hc, indent=4),
height=height,
demo_mode=demo_mode,
root=root,
)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
session = settings.Session()
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
admin.add_view(Airflow(name='DAGs'))
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/')
@wwwutils.gzipped
def query(self):
session = settings.Session()
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.args.get('conn_id')
csv = request.args.get('csv') == "true"
sql = request.args.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes="table table-bordered table-striped no-wrap",
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
session.close()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
admin.add_view(QueryView(name='Ad Hoc Query', category="Data Profiling"))
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
page_size = 500
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
def log_link(v, c, m, p):
url = url_for(
'airflow.log',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
'<a href="{url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def task_instance_link(v, c, m, p):
url = url_for(
'airflow.task',
dag_id=m.dag_id,
task_id=m.task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=m.dag_id,
root=m.task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{m.task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_f(v, c, m, p):
color = State.color(m.state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{m.state}</span>'.format(**locals()))
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if datetime.now().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
mv = JobModelView(jobs.BaseJob, Session, name="Jobs", category="Browse")
admin.add_view(mv)
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date')
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
mv = LogModelView(
models.Log, Session, name="Logs", category="Browse")
admin.add_view(mv)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool')
named_filter_urls = True
column_formatters = dict(
log=log_link, task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
dag_id=dag_link, duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('start_date', True)
column_list = (
'state', 'dag_id', 'task_id', 'execution_date',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'log')
can_delete = True
page_size = 500
mv = TaskInstanceModelView(
models.TaskInstance, Session, name="Task Instances", category="Browse")
admin.add_view(mv)
mv = DagModelView(
models.DagModel, Session, name=None)
admin.add_view(mv)
# Hack to not add this view to the menu
admin._menu = admin._menu[:-1]
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port')
form_overrides = dict(password=VisiblePasswordField)
form_choices = {
'conn_type': [
('ftp', 'FTP',),
('hdfs', 'HDFS',),
('http', 'HTTP',),
('hive_cli', 'Hive Client Wrapper',),
('hive_metastore', 'Hive Metastore Thrift',),
('hiveserver2', 'Hive Server 2 Thrift',),
('mysql', 'MySQL',),
('postgres', 'Postgres',),
('oracle', 'Oracle',),
('presto', 'Presto',),
('s3', 'S3',),
('samba', 'Samba',),
('sqlite', 'Sqlite',),
]
}
mv = ConnectionModelView(
models.Connection, Session,
name="Connections", category="Admin")
admin.add_view(mv)
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
mv = UserModelView(models.User, Session, name="Users", category="Admin")
admin.add_view(mv)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
from airflow import configuration
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = configuration.AIRFLOW_CONFIG
f = open(configuration.AIRFLOW_CONFIG, 'r')
config = f.read()
f.close()
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/code.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle)
admin.add_view(ConfigurationView(name='Configuration', category="Admin"))
def label_link(v, c, m, p):
try:
default_params = eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',)
column_list = (
'label', 'conn_id', 'chart_type', 'owner', 'last_modified',)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('para', 'Parallel Coordinates'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('heatmap', 'Heatmap'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': [
(c.conn_id, c.conn_id)
for c in (
Session().query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if AUTHENTICATE and not model.user_id and current_user:
model.user_id = current_user.id
model.last_modified = datetime.now()
mv = ChartModelView(
models.Chart, Session,
name="Charts", category="Data Profiling")
admin.add_view(mv)
admin.add_link(
base.MenuLink(
category='Docs',
name='Documentation',
url='http://pythonhosted.org/airflow/'))
admin.add_link(
base.MenuLink(
category='Docs',
name='Github',
url='https://github.com/airbnb/airflow'))
class KnowEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description')
column_list = (
'label', 'event_type', 'start_date', 'end_date', 'reported_by')
column_default_sort = ("start_date", True)
mv = KnowEventView(
models.KnownEvent, Session, name="Known Events", category="Data Profiling")
admin.add_view(mv)
class KnowEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
'''
# For debugging / troubleshooting
mv = KnowEventTypeView(
models.KnownEventType,
Session, name="Known Event Types", category="Manage")
admin.add_view(mv)
class DagPickleView(SuperUserMixin, ModelView):
pass
mv = DagPickleView(
models.DagPickle,
Session, name="Pickles", category="Manage")
admin.add_view(mv)
'''
class VariableView(wwwutils.LoginMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
column_list = ('key',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val')
form_widget_args = {
'val': {
'rows': 20,
}
}
mv = VariableView(
models.Variable, Session, name="Variables", category="Admin")
admin.add_view(mv)
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
mv = PoolModelView(models.Pool, Session, name="Pools", category="Admin")
admin.add_view(mv)
class SlaMissModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
mv = SlaMissModelView(
models.SlaMiss, Session, name="SLA Misses", category="Browse")
admin.add_view(mv)
def integrate_plugins():
"""Integrate plugins to the context"""
from airflow.plugins_manager import (
admin_views, flask_blueprints, menu_links)
for v in admin_views:
admin.add_view(v)
for bp in flask_blueprints:
print bp
app.register_blueprint(bp)
for ml in menu_links:
admin.add_link(ml)
integrate_plugins()
|
apache-2.0
|
shaunstanislaus/pandashells
|
pandashells/lib/outlier_lib.py
|
7
|
2092
|
#! /usr/bin/env python
# standard library imports
from collections import Counter
from pandashells.lib import module_checker_lib
# import required dependencies
module_checker_lib.check_for_modules(['pandas', 'numpy'])
import pandas as pd
import numpy as np
# disable the chained assignment warning because raises fale alarm
pd.options.mode.chained_assignment = None
# recursive edit a series
def sigma_edit_series(sigma_thresh, in_series, iter_counter=None, max_iter=20):
iter_counter = Counter() if iter_counter is None else iter_counter
if in_series.count() == 0:
msg = "Error: No non-NaN values from which to remove outliers"
raise ValueError(msg)
iter_counter.update('n')
if iter_counter['n'] > max_iter:
msg = "Error: Max Number of iterations exceeded in sigma-editing"
raise ValueError(msg)
resid = in_series - in_series.mean()
std = resid.std()
sigma_t = sigma_thresh * std
outside = resid.abs() >= sigma_t
if any(outside):
in_series.loc[outside] = np.NaN
in_series = sigma_edit_series(
sigma_thresh, in_series, iter_counter, max_iter)
return in_series
def ensure_col_exists(df, col, df_name='dataframe'):
if not df.empty and col not in list(df.columns):
msg = 'in sigma_edit: {} does not have column {}'.format(
df_name, repr(col))
raise ValueError(msg)
def sigma_edit_dataframe(sigma_thresh, columns, df, max_iter=20):
"""
:type sigma_thresh: float
:param sigma_thresh: The sigma threshold
:type columns: list
:param columns: a list of columns to sigma edit
:type df: pandas.DataFrame
:param df: The dataframe with columns of data to sigma-edit
:type max_iter: int
:param max_iter: Cap the number of iteration at this number
:rtype: Pandas DataFrame
:returns: A dataframe with ouliers set to NaN
"""
for col in columns:
ensure_col_exists(df, col, 'df')
ser = df[col]
df.loc[:, col] = sigma_edit_series(sigma_thresh, ser, max_iter=max_iter)
return df
|
bsd-2-clause
|
subutai/nupic
|
examples/opf/clients/hotgym/anomaly/one_gym/run.py
|
10
|
4917
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.model_factory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot)
|
agpl-3.0
|
JosmanPS/scikit-learn
|
sklearn/datasets/tests/test_lfw.py
|
230
|
7880
|
"""This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
|
bsd-3-clause
|
pprett/scikit-learn
|
benchmarks/bench_plot_nmf.py
|
28
|
15630
|
"""
Benchmarks of Non-Negative Matrix Factorization
"""
# Authors: Tom Dupre la Tour (benchmark)
# Chih-Jen Linn (original projected gradient NMF implementation)
# Anthony Di Franco (projected gradient, Python and NumPy port)
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import warnings
import numbers
import numpy as np
import matplotlib.pyplot as plt
import pandas
from sklearn.utils.testing import ignore_warnings
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition.nmf import NMF
from sklearn.decomposition.nmf import _initialize_nmf
from sklearn.decomposition.nmf import _beta_divergence
from sklearn.decomposition.nmf import INTEGER_TYPES, _check_init
from sklearn.externals.joblib import Memory
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import fast_dot, safe_sparse_dot, squared_norm
from sklearn.utils import check_array
from sklearn.utils.validation import check_is_fitted, check_non_negative
mem = Memory(cachedir='.', verbose=0)
###################
# Start of _PGNMF #
###################
# This class implements a projected gradient solver for the NMF.
# The projected gradient solver was removed from scikit-learn in version 0.19,
# and a simplified copy is used here for comparison purpose only.
# It is not tested, and it may change or disappear without notice.
def _norm(x):
"""Dot product-based Euclidean norm implementation
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
"""
return np.sqrt(squared_norm(x))
def _nls_subproblem(X, W, H, tol, max_iter, alpha=0., l1_ratio=0.,
sigma=0.01, beta=0.1):
"""Non-negative least square solver
Solves a non-negative least squares subproblem using the projected
gradient descent algorithm.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Constant matrix.
W : array-like, shape (n_samples, n_components)
Constant matrix.
H : array-like, shape (n_components, n_features)
Initial guess for the solution.
tol : float
Tolerance of the stopping condition.
max_iter : int
Maximum number of iterations before timing out.
alpha : double, default: 0.
Constant that multiplies the regularization terms. Set it to zero to
have no regularization.
l1_ratio : double, default: 0.
The regularization mixing parameter, with 0 <= l1_ratio <= 1.
For l1_ratio = 0 the penalty is an L2 penalty.
For l1_ratio = 1 it is an L1 penalty.
For 0 < l1_ratio < 1, the penalty is a combination of L1 and L2.
sigma : float
Constant used in the sufficient decrease condition checked by the line
search. Smaller values lead to a looser sufficient decrease condition,
thus reducing the time taken by the line search, but potentially
increasing the number of iterations of the projected gradient
procedure. 0.01 is a commonly used value in the optimization
literature.
beta : float
Factor by which the step size is decreased (resp. increased) until
(resp. as long as) the sufficient decrease condition is satisfied.
Larger values allow to find a better step size but lead to longer line
search. 0.1 is a commonly used value in the optimization literature.
Returns
-------
H : array-like, shape (n_components, n_features)
Solution to the non-negative least squares problem.
grad : array-like, shape (n_components, n_features)
The gradient.
n_iter : int
The number of iterations done by the algorithm.
References
----------
C.-J. Lin. Projected gradient methods for non-negative matrix
factorization. Neural Computation, 19(2007), 2756-2779.
http://www.csie.ntu.edu.tw/~cjlin/nmf/
"""
WtX = safe_sparse_dot(W.T, X)
WtW = fast_dot(W.T, W)
# values justified in the paper (alpha is renamed gamma)
gamma = 1
for n_iter in range(1, max_iter + 1):
grad = np.dot(WtW, H) - WtX
if alpha > 0 and l1_ratio == 1.:
grad += alpha
elif alpha > 0:
grad += alpha * (l1_ratio + (1 - l1_ratio) * H)
# The following multiplication with a boolean array is more than twice
# as fast as indexing into grad.
if _norm(grad * np.logical_or(grad < 0, H > 0)) < tol:
break
Hp = H
for inner_iter in range(20):
# Gradient step.
Hn = H - gamma * grad
# Projection step.
Hn *= Hn > 0
d = Hn - H
gradd = np.dot(grad.ravel(), d.ravel())
dQd = np.dot(np.dot(WtW, d).ravel(), d.ravel())
suff_decr = (1 - sigma) * gradd + 0.5 * dQd < 0
if inner_iter == 0:
decr_gamma = not suff_decr
if decr_gamma:
if suff_decr:
H = Hn
break
else:
gamma *= beta
elif not suff_decr or (Hp == Hn).all():
H = Hp
break
else:
gamma /= beta
Hp = Hn
if n_iter == max_iter:
warnings.warn("Iteration limit reached in nls subproblem.",
ConvergenceWarning)
return H, grad, n_iter
def _fit_projected_gradient(X, W, H, tol, max_iter, nls_max_iter, alpha,
l1_ratio):
gradW = (np.dot(W, np.dot(H, H.T)) -
safe_sparse_dot(X, H.T, dense_output=True))
gradH = (np.dot(np.dot(W.T, W), H) -
safe_sparse_dot(W.T, X, dense_output=True))
init_grad = squared_norm(gradW) + squared_norm(gradH.T)
# max(0.001, tol) to force alternating minimizations of W and H
tolW = max(0.001, tol) * np.sqrt(init_grad)
tolH = tolW
for n_iter in range(1, max_iter + 1):
# stopping condition as discussed in paper
proj_grad_W = squared_norm(gradW * np.logical_or(gradW < 0, W > 0))
proj_grad_H = squared_norm(gradH * np.logical_or(gradH < 0, H > 0))
if (proj_grad_W + proj_grad_H) / init_grad < tol ** 2:
break
# update W
Wt, gradWt, iterW = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W, gradW = Wt.T, gradWt.T
if iterW == 1:
tolW = 0.1 * tolW
# update H
H, gradH, iterH = _nls_subproblem(X, W, H, tolH, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
if iterH == 1:
tolH = 0.1 * tolH
H[H == 0] = 0 # fix up negative zeros
if n_iter == max_iter:
Wt, _, _ = _nls_subproblem(X.T, H.T, W.T, tolW, nls_max_iter,
alpha=alpha, l1_ratio=l1_ratio)
W = Wt.T
return W, H, n_iter
class _PGNMF(NMF):
"""Non-Negative Matrix Factorization (NMF) with projected gradient solver.
This class is private and for comparison purpose only.
It may change or disappear without notice.
"""
def __init__(self, n_components=None, solver='pg', init=None,
tol=1e-4, max_iter=200, random_state=None,
alpha=0., l1_ratio=0., nls_max_iter=10):
self.nls_max_iter = nls_max_iter
self.n_components = n_components
self.init = init
self.solver = solver
self.tol = tol
self.max_iter = max_iter
self.random_state = random_state
self.alpha = alpha
self.l1_ratio = l1_ratio
def fit(self, X, y=None, **params):
self.fit_transform(X, **params)
return self
def transform(self, X):
check_is_fitted(self, 'components_')
H = self.components_
W, _, self.n_iter_ = self._fit_transform(X, H=H, update_H=False)
return W
def inverse_transform(self, W):
check_is_fitted(self, 'components_')
return np.dot(W, self.components_)
def fit_transform(self, X, y=None, W=None, H=None):
W, H, self.n_iter = self._fit_transform(X, W=W, H=H, update_H=True)
self.components_ = H
return W
def _fit_transform(self, X, y=None, W=None, H=None, update_H=True):
X = check_array(X, accept_sparse=('csr', 'csc'))
check_non_negative(X, "NMF (input X)")
n_samples, n_features = X.shape
n_components = self.n_components
if n_components is None:
n_components = n_features
if (not isinstance(n_components, INTEGER_TYPES) or
n_components <= 0):
raise ValueError("Number of components must be a positive integer;"
" got (n_components=%r)" % n_components)
if not isinstance(self.max_iter, INTEGER_TYPES) or self.max_iter < 0:
raise ValueError("Maximum number of iterations must be a positive "
"integer; got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
# check W and H, or initialize them
if self.init == 'custom' and update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
_check_init(W, (n_samples, n_components), "NMF (input W)")
elif not update_H:
_check_init(H, (n_components, n_features), "NMF (input H)")
W = np.zeros((n_samples, n_components))
else:
W, H = _initialize_nmf(X, n_components, init=self.init,
random_state=self.random_state)
if update_H: # fit_transform
W, H, n_iter = _fit_projected_gradient(
X, W, H, self.tol, self.max_iter, self.nls_max_iter,
self.alpha, self.l1_ratio)
else: # transform
Wt, _, n_iter = _nls_subproblem(X.T, H.T, W.T, self.tol,
self.nls_max_iter,
alpha=self.alpha,
l1_ratio=self.l1_ratio)
W = Wt.T
if n_iter == self.max_iter and self.tol > 0:
warnings.warn("Maximum number of iteration %d reached. Increase it"
" to improve convergence." % self.max_iter,
ConvergenceWarning)
return W, H, n_iter
#################
# End of _PGNMF #
#################
def plot_results(results_df, plot_name):
if results_df is None:
return None
plt.figure(figsize=(16, 6))
colors = 'bgr'
markers = 'ovs'
ax = plt.subplot(1, 3, 1)
for i, init in enumerate(np.unique(results_df['init'])):
plt.subplot(1, 3, i + 1, sharex=ax, sharey=ax)
for j, method in enumerate(np.unique(results_df['method'])):
mask = np.logical_and(results_df['init'] == init,
results_df['method'] == method)
selected_items = results_df[mask]
plt.plot(selected_items['time'], selected_items['loss'],
color=colors[j % len(colors)], ls='-',
marker=markers[j % len(markers)],
label=method)
plt.legend(loc=0, fontsize='x-small')
plt.xlabel("Time (s)")
plt.ylabel("loss")
plt.title("%s" % init)
plt.suptitle(plot_name, fontsize=16)
@ignore_warnings(category=ConvergenceWarning)
# use joblib to cache the results.
# X_shape is specified in arguments for avoiding hashing X
@mem.cache(ignore=['X', 'W0', 'H0'])
def bench_one(name, X, W0, H0, X_shape, clf_type, clf_params, init,
n_components, random_state):
W = W0.copy()
H = H0.copy()
clf = clf_type(**clf_params)
st = time()
W = clf.fit_transform(X, W=W, H=H)
end = time()
H = clf.components_
this_loss = _beta_divergence(X, W, H, 2.0, True)
duration = end - st
return this_loss, duration
def run_bench(X, clfs, plot_name, n_components, tol, alpha, l1_ratio):
start = time()
results = []
for name, clf_type, iter_range, clf_params in clfs:
print("Training %s:" % name)
for rs, init in enumerate(('nndsvd', 'nndsvdar', 'random')):
print(" %s %s: " % (init, " " * (8 - len(init))), end="")
W, H = _initialize_nmf(X, n_components, init, 1e-6, rs)
for max_iter in iter_range:
clf_params['alpha'] = alpha
clf_params['l1_ratio'] = l1_ratio
clf_params['max_iter'] = max_iter
clf_params['tol'] = tol
clf_params['random_state'] = rs
clf_params['init'] = 'custom'
clf_params['n_components'] = n_components
this_loss, duration = bench_one(name, X, W, H, X.shape,
clf_type, clf_params,
init, n_components, rs)
init_name = "init='%s'" % init
results.append((name, this_loss, duration, init_name))
# print("loss: %.6f, time: %.3f sec" % (this_loss, duration))
print(".", end="")
sys.stdout.flush()
print(" ")
# Use a panda dataframe to organize the results
results_df = pandas.DataFrame(results,
columns="method loss time init".split())
print("Total time = %0.3f sec\n" % (time() - start))
# plot the results
plot_results(results_df, plot_name)
return results_df
def load_20news():
print("Loading 20 newsgroups dataset")
print("-----------------------------")
from sklearn.datasets import fetch_20newsgroups
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data)
return tfidf
def load_faces():
print("Loading Olivetti face dataset")
print("-----------------------------")
from sklearn.datasets import fetch_olivetti_faces
faces = fetch_olivetti_faces(shuffle=True)
return faces.data
def build_clfs(cd_iters, pg_iters, mu_iters):
clfs = [("Coordinate Descent", NMF, cd_iters, {'solver': 'cd'}),
("Projected Gradient", _PGNMF, pg_iters, {'solver': 'pg'}),
("Multiplicative Update", NMF, mu_iters, {'solver': 'mu'}),
]
return clfs
if __name__ == '__main__':
alpha = 0.
l1_ratio = 0.5
n_components = 10
tol = 1e-15
# first benchmark on 20 newsgroup dataset: sparse, shape(11314, 39116)
plot_name = "20 Newsgroups sparse dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 6)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_20news = load_20news()
run_bench(X_20news, clfs, plot_name, n_components, tol, alpha, l1_ratio)
# second benchmark on Olivetti faces dataset: dense, shape(400, 4096)
plot_name = "Olivetti Faces dense dataset"
cd_iters = np.arange(1, 30)
pg_iters = np.arange(1, 12)
mu_iters = np.arange(1, 30)
clfs = build_clfs(cd_iters, pg_iters, mu_iters)
X_faces = load_faces()
run_bench(X_faces, clfs, plot_name, n_components, tol, alpha, l1_ratio,)
plt.show()
|
bsd-3-clause
|
OshynSong/scikit-learn
|
sklearn/decomposition/tests/test_online_lda.py
|
49
|
13124
|
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import if_safe_multiprocessing_with_blas
from sklearn.utils.validation import NotFittedError
from sklearn.externals.six.moves import xrange
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 disticnt words.
# (Each word only belongs to a single topic.)
n_topics = 3
block = n_topics * np.ones((3, 3))
blocks = [block] * n_topics
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_topics, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_topics, X = _build_sparse_mtx()
prior = 1. / n_topics
lda_1 = LatentDirichletAllocation(n_topics=n_topics, doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, evaluate_every=1,
learning_method='batch', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
evaluate_every=1, learning_method='online',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=10.,
total_samples=100, random_state=rng)
for i in xrange(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, learning_method='batch',
random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_topics = 3
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
X_trans = lda.fit_transform(X)
assert_true((X_trans > 0.0).any())
def test_lda_fit_transform():
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_topics=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_lda_partial_fit_dim_mismatch():
# test `n_features` mismatch in `partial_fit`
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_col = rng.randint(6, 10)
X_1 = np.random.randint(4, size=(10, n_col))
X_2 = np.random.randint(4, size=(10, n_col + 1))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.partial_fit(X_1)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_topics', LatentDirichletAllocation(n_topics=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
assert_raises_regexp(ValueError, regex, model.fit, X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = -np.ones((5, 10))
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
assert_raises_regexp(ValueError, regex, lda.fit, X)
def test_lda_no_component_error():
# test `transform` and `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = r"^no 'components_' attribute"
assert_raises_regexp(NotFittedError, regex, lda.transform, X)
assert_raises_regexp(NotFittedError, regex, lda.perplexity, X)
def test_lda_transform_mismatch():
# test `n_features` mismatch in partial_fit and transform
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
X_2 = rng.randint(4, size=(10, 8))
n_topics = rng.randint(3, 6)
lda = LatentDirichletAllocation(n_topics=n_topics, random_state=rng)
lda.partial_fit(X)
assert_raises_regexp(ValueError, r"^The provided data has",
lda.partial_fit, X_2)
@if_safe_multiprocessing_with_blas
def test_lda_multi_jobs():
n_topics, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
for method in ('online', 'batch'):
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_method=method,
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert_true(tuple(sorted(top_idx)) in correct_idx_grps)
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_topics = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_topics=n_topics, learning_offset=5.,
total_samples=20, random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_topics))
assert_raises_regexp(ValueError, r'Number of samples', lda.perplexity, X,
invalid_n_samples)
# invalid topic number
invalid_n_topics = rng.randint(4, size=(n_samples, n_topics + 1))
assert_raises_regexp(ValueError, r'Number of topics', lda.perplexity, X,
invalid_n_topics)
def test_lda_perplexity():
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
distr_1 = lda_1.fit_transform(X)
perp_1 = lda_1.perplexity(X, distr_1, sub_sampling=False)
distr_2 = lda_2.fit_transform(X)
perp_2 = lda_2.perplexity(X, distr_2, sub_sampling=False)
assert_greater_equal(perp_1, perp_2)
perp_1_subsampling = lda_1.perplexity(X, distr_1, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, distr_2, sub_sampling=True)
assert_greater_equal(perp_1_subsampling, perp_2_subsampling)
def test_lda_score():
# Test LDA score for batch training
# score should be higher after each iteration
n_topics, X = _build_sparse_mtx()
for method in ('online', 'batch'):
lda_1 = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert_greater_equal(score_2, score_1)
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
distr = lda.fit_transform(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X, distr)
perp_3 = lda.perplexity(X.toarray(), distr)
assert_almost_equal(perp_1, perp_2)
assert_almost_equal(perp_1, perp_3)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_topics, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=10,
random_state=0)
distr = lda.fit_transform(X)
perplexity_1 = lda.perplexity(X, distr, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
assert_allclose(_dirichlet_expectation_1d(x),
np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
|
bsd-3-clause
|
cpcloud/dask
|
dask/dataframe/reshape.py
|
1
|
6863
|
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
from .core import Series, DataFrame, map_partitions, apply_concat_apply
from . import methods
from .utils import is_categorical_dtype, is_scalar, has_known_categories
###############################################################
# Dummies
###############################################################
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
columns=None, sparse=False, drop_first=False):
"""
Convert categorical variable into dummy/indicator variables. Data must
have category dtype to infer result's ``columns``
Parameters
----------
data : Series or DataFrame with category dtype
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternativly, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`category` dtype will be converted.
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
Returns
-------
dummies : DataFrame
"""
if isinstance(data, (pd.Series, pd.DataFrame)):
return pd.get_dummies(data, prefix=prefix,
prefix_sep=prefix_sep, dummy_na=dummy_na,
columns=columns, sparse=sparse,
drop_first=drop_first)
not_cat_msg = ("`get_dummies` with non-categorical dtypes is not "
"supported. Please use `df.categorize()` beforehand to "
"convert to categorical dtype.")
unknown_cat_msg = ("`get_dummies` with unknown categories is not "
"supported. Please use `column.cat.as_known()` or "
"`df.categorize()` beforehand to ensure known "
"categories")
if isinstance(data, Series):
if not is_categorical_dtype(data):
raise NotImplementedError(not_cat_msg)
if not has_known_categories(data):
raise NotImplementedError(unknown_cat_msg)
elif isinstance(data, DataFrame):
if columns is None:
if (data.dtypes == 'object').any():
raise NotImplementedError(not_cat_msg)
columns = data._meta.select_dtypes(include=['category']).columns
else:
if not all(is_categorical_dtype(data[c]) for c in columns):
raise NotImplementedError(not_cat_msg)
if not all(has_known_categories(data[c]) for c in columns):
raise NotImplementedError(unknown_cat_msg)
if sparse:
raise NotImplementedError('sparse=True is not supported')
return map_partitions(pd.get_dummies, data, prefix=prefix,
prefix_sep=prefix_sep, dummy_na=dummy_na,
columns=columns, sparse=sparse,
drop_first=drop_first)
###############################################################
# Pivot table
###############################################################
def pivot_table(df, index=None, columns=None,
values=None, aggfunc='mean'):
"""
Create a spreadsheet-style pivot table as a DataFrame. Target ``columns``
must have category dtype to infer result's ``columns``.
``index``, ``columns``, ``values`` and ``aggfunc`` must be all scalar.
Parameters
----------
data : DataFrame
values : scalar
column to aggregate
index : scalar
column to be index
columns : scalar
column to be columns
aggfunc : {'mean', 'sum', 'count'}, default 'mean'
Returns
-------
table : DataFrame
"""
if not is_scalar(index) or index is None:
raise ValueError("'index' must be the name of an existing column")
if not is_scalar(columns) or columns is None:
raise ValueError("'columns' must be the name of an existing column")
if not is_categorical_dtype(df[columns]):
raise ValueError("'columns' must be category dtype")
if not has_known_categories(df[columns]):
raise ValueError("'columns' must have known categories. Please use "
"`df[columns].cat.as_known()` beforehand to ensure "
"known categories")
if not is_scalar(values) or values is None:
raise ValueError("'values' must be the name of an existing column")
if not is_scalar(aggfunc) or aggfunc not in ('mean', 'sum', 'count'):
raise ValueError("aggfunc must be either 'mean', 'sum' or 'count'")
# _emulate can't work for empty data
# the result must have CategoricalIndex columns
new_columns = pd.CategoricalIndex(df[columns].cat.categories, name=columns)
meta = pd.DataFrame(columns=new_columns, dtype=np.float64)
kwargs = {'index': index, 'columns': columns, 'values': values}
pv_sum = apply_concat_apply([df],
chunk=methods.pivot_sum,
aggregate=methods.pivot_agg,
meta=meta,
token='pivot_table_sum',
chunk_kwargs=kwargs)
pv_count = apply_concat_apply([df],
chunk=methods.pivot_count,
aggregate=methods.pivot_agg,
meta=meta,
token='pivot_table_count',
chunk_kwargs=kwargs)
if aggfunc == 'sum':
return pv_sum
elif aggfunc == 'count':
return pv_count
elif aggfunc == 'mean':
return pv_sum / pv_count
else:
raise ValueError
###############################################################
# Melt
###############################################################
def melt(frame, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
from dask.dataframe.core import no_default
return frame.map_partitions(pd.melt, meta=no_default, id_vars=id_vars,
value_vars=value_vars,
var_name=var_name, value_name=value_name,
col_level=col_level, token='melt')
|
bsd-3-clause
|
tylerjereddy/scipy
|
scipy/stats/_distn_infrastructure.py
|
5
|
135768
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains fraction alpha [0, 1] of the
distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# Nothing to do
return newargs
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(np.isfinite(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, n, *args, **kwds):
"""n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _b)
place(output, cond2, _a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
bsd-3-clause
|
russellgeoff/blog
|
Control/sim_and_plot.py
|
4
|
7735
|
'''
Copyright (C) 2014 Terry Stewart and Travis DeWolf
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import Controllers.gc as GC
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
class Runner:
"""
A class for drawing the arm simulation.
NOTE: If you're getting an error along the lines of
'xrange is not an iterator', make sure that you have
the most recent version of matplotlib, from their github.
"""
def __init__(self, title='', dt=1e-4, control_steps=10,
display_steps=100, t_target=1.0,
seed=1, box=[-1,1,-1,1], rotate=0.0,
control_type='', trajectory=None,
infinite_trail=False, mouse_control=False):
self.dt = dt
self.control_steps = control_steps
self.display_steps = display_steps
self.target_steps = int(t_target/float(dt*display_steps))
self.trajectory = trajectory
self.box = box
self.control_type = control_type
self.infinite_trail = infinite_trail
self.mouse_control = mouse_control
self.rotate = rotate
self.title = title
self.sim_step = 0
self.trail_index = 0
def run(self, arm, control_shell, video=None, video_time=None):
self.arm = arm
self.shell = control_shell
fig = plt.figure(figsize=(5.1,5.1), dpi=None)
fig.suptitle(self.title);
# set the padding of the subplot explicitly
fig.subplotpars.left=.1; fig.subplotpars.right=.9
fig.subplotpars.bottom=.1; fig.subplotpars.top=.9
ax = fig.add_subplot(1, 1, 1,
xlim=(self.box[0], self.box[1]),
ylim=(self.box[2], self.box[3]))
ax.xaxis.grid(); ax.yaxis.grid()
# make it a square plot
ax.set_aspect(1)
# set up plot elements
self.trail, = ax.plot([], [], color='#888888', lw=3)
self.arm_line, = ax.plot([], [], 'o-', mew=4, color='b', lw=5)
self.target_line, = ax.plot([], [], 'r-x', mew=4)
self.info = ax.text(self.box[0]+abs(.1*self.box[0]), \
self.box[3]-abs(.1*self.box[3]), \
'', va='top')
self.trail_data = np.ones((self.target_steps, 2), \
dtype='float') * np.NAN
if self.trajectory is not None:
ax.plot(self.trajectory[:,0], self.trajectory[:,1], alpha=.3)
# connect up mouse event if specified
if self.mouse_control:
self.target = self.shell.controller.gen_target(arm)
# get pixel width of fig (-.2 for the padding)
self.fig_width = (fig.get_figwidth() - .2 \
* fig.get_figwidth()) * fig.get_dpi()
def move_target(event):
# get mouse position and scale appropriately to convert to (x,y)
target = ((np.array([event.x, event.y]) - .5 * fig.get_dpi()) /\
self.fig_width) * \
(self.box[1] - self.box[0]) + self.box[0]
# set target for the controller
self.target = \
self.shell.controller.set_target_from_mouse(target)
# hook up function to mouse event
fig.canvas.mpl_connect('motion_notify_event', move_target)
if video_time is None:
frames = 50
else:
frames = int(video_time/(self.dt*self.display_steps))
anim = animation.FuncAnimation(fig, self.anim_animate,
init_func=self.anim_init, frames=50, interval=0, blit=True)
if video is not None:
anim.save(video, fps=1.0/(self.dt*self.display_steps), dpi=200)
self.anim = anim
def make_info_text(self):
text = []
text.append('t = %1.4g'%(self.sim_step*self.dt))
u_text = ' '.join('%4.3f,'%F for F in self.shell.u)
text.append('u = ['+u_text+']')
if self.control_type.startswith('adaptive'):
theta_text = ' '.join('%4.3f,'%F for F in self.control.theta)
text.append('theta = ['+theta_text+']')
return '\n'.join(text)
def anim_init(self):
self.info.set_text('')
self.arm_line.set_data([], [])
self.target_line.set_data([], [])
self.trail.set_data([], [])
return self.arm_line, self.target_line, self.info, self.trail
def anim_animate(self, i):
if self.control_type == 'random':
# update target after specified period of time passes
if self.sim_step % (self.target_steps*self.display_steps) == 0:
self.target = self.shell.controller.gen_target(self.arm)
else:
self.target = self.shell.controller.target
# before drawing
for j in range(self.display_steps):
# update control signal
if self.sim_step % self.control_steps == 0 or \
'tau' not in locals():
tau = self.shell.control(self.arm)
# apply control signal and simulate
self.arm.apply_torque(u=tau, dt=self.dt)
self.sim_step +=1
# update figure
self.arm_line.set_data(*self.arm.position(rotate=self.rotate))
self.info.set_text(self.make_info_text())
self.trail.set_data(self.trail_data[:,0], self.trail_data[:,1])
if self.target is not None:
if isinstance(self.shell.controller, GC.Control):
# convert to plottable form if necessary
target = self.arm.position(q=self.target, rotate=self.rotate)
else:
target = self.target
self.target_line.set_data(target)
# update hand trail
if self.shell.pen_down:
if self.infinite_trail:
# if we're writing, keep all pen_down history
self.trail_index += 1
# if we've hit the end of the trail, double it and copy
if self.trail_index >= self.trail_data.shape[0]-1:
trail_data = np.zeros((self.trail_data.shape[0]*2,
self.trail_data.shape[1]))*np.nan
trail_data[:self.trail_index+1] = self.trail_data
self.trail_data = trail_data
self.trail_data[self.trail_index] = \
self.arm_line.get_xydata()[-1]
else:
# else just use a buffer window
self.trail_data[:-1] = self.trail_data[1:]
self.trail_data[-1] = self.arm_line.get_xydata()[-1]
else:
# if pen up add a break in the trail
self.trail_data[self.trail_index] = [np.nan, np.nan]
return self.target_line, self.info, self.trail, self.arm_line
def show(self):
try:
plt.show()
except AttributeError:
pass
|
gpl-3.0
|
isomerase/mozziesniff
|
roboskeeter/math/sharri_fit_lognorm.py
|
2
|
2759
|
__author__ = 'richard'
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 23:07:31 2015
@author: richard
"""
from scipy import stats
import numpy as np
from matplotlib import pyplot as plt
# load csv values
csv = np.genfromtxt('data/distributions/accelerationmag_raw.csv', delimiter=",")
csv = csv.T
bin_edges = csv[0]
probabilities = csv[4]
# draw a bunch of random samples from the probability distribution
rand_samples = np.random.choice(bin_edges, 1000, p=probabilities/probabilities.sum())
# fit to lognormal
shape, loc, scale = stats.lognorm.fit(rand_samples, floc=0)
mu = np.log(scale) # Mean of log(X)
sigma = shape # Standard deviation of log(X)
geom_mean = np.exp(mu) # Geometric mean == median
geom_stdev = np.exp(sigma) # Geometric standard deviation
scale = 1.
# multiply whole dist by scalar 0.1
#shape/sigma, standard dev of log(X) = 0.932352661694 * 0.5
#Geometric std dev = 2.54047904005
#loc = 0
#scale = 0.666555094117
#mu = log(scale) = Mean of log(X) = -0.405632480939
#Geometric mean / median = 0.666555094117
SCALAR = 0.1
# plot together
plt.plot(bin_edges, probabilities, lw=2, label='sharri data')
plt.plot(bin_edges, stats.lognorm.pdf(bin_edges, shape*0.5, loc=loc, scale=scale), 'r', linewidth=3, label='fit')
#plt.plot(binspots, stats.lognorm.pdf(binspots, shape, loc=0, scale=.1), 'g', linewidth=3, label='test')
plt.legend()
plt.show()
#plt.plot(binspots, stats.lognorm.pdf(binspots, shape, loc=0, scale=.1), 'g', linewidth=3, label='test')
#plt.xlim([0,1])
#plt.show()
# print results
print "shape/sigma, standard dev of log(X) = ", sigma
print "Geometric std dev = ", geom_stdev
print "loc = ", loc
print "scale = ", scale
print "mu = log(scale) = Mean of log(X) = ", mu
print "Geometric mean / median = ", geom_mean
# Sharri results
# multiply whole dist by scalar 0.1
#shape/sigma, standard dev of log(X) = 0.932352661694 * 0.5
#Geometric std dev = 2.54047904005
#loc = 0
#scale = 0.666555094117 * 0.5
#mu = log(scale) = Mean of log(X) = -0.405632480939
#Geometric mean / median = 0.666555094117
#==============================================================================
# Dickinson results:
# shape = 0.719736466122
# loc = 0
# scale = 1.82216219069
# mu = 0.600023812816
# sigma = 0.719736466122
# geom_mean = 1.82216219069
# geom_stdev = 2.05389186923
#==============================================================================
sigma = 0.932352661694 * 0.5
scale = 1.6 * 0.666555094117
# test
x = np.linspace(0,10,200)
pdf = SCALAR * scale * (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
/ (x * sigma * np.sqrt(2 * np.pi)))
#y = np.random.lognormal(mean=mu, sigma=sigma, size=scale)
plt.plot(x, pdf)
#plt.plot(x, y)
#y = np.random.lognormal(mean=mu, sigma=sigma, size=scale)
|
mit
|
i-maintenance/CM-Adapter
|
adapter.py
|
1
|
6850
|
#! /usr/bin/env python3
import os
import csv
import json
import logging
import threading
from datetime import datetime, timedelta
import sys
import time
import pytz
import requests
import pandas as pd
from kafka import KafkaProducer
from logstash import TCPLogstashHandler
CM_APP_HOST = 'http://192.168.13.101'
KAFKA_TOPIC = 'SensorData'
BOOTSTRAP_SERVERS = ['il061:9092', 'il062:9092', 'il063:9092']
IGNORED_FIELDS = ['Record', 'Wind', 'Temp_Aussen', 'Feuchte_Aussen']
UPDATE_INTERVAL = 1 # in minutes
LOGSTASH_HOST = os.getenv('LOGSTASH_HOST', 'il060')
LOGSTASH_PORT = int(os.getenv('LOGSTASH_PORT', '5000'))
SENSORTHINGS_HOST = os.getenv('SENSORTHINGS_HOST', 'il060')
SENSORTHINGS_PORT = os.getenv('SENSORTHINGS_PORT', '8082')
# setup logging
logger = logging.getLogger('cm-adapter.logging')
logger.setLevel(logging.INFO)
console_logger = logging.StreamHandler(stream=sys.stdout)
console_logger.setFormatter(logging.Formatter(logging.BASIC_FORMAT))
logstash_handler = TCPLogstashHandler(host=LOGSTASH_HOST, port=LOGSTASH_PORT, version=1)
[logger.addHandler(l) for l in [console_logger, logstash_handler]]
logger.info('Sending logstash to %s:%d', logstash_handler.host, logstash_handler.port)
producer = KafkaProducer(bootstrap_servers=BOOTSTRAP_SERVERS,
api_version=(0, 9),
value_serializer=lambda m: json.dumps(m).encode('utf-8'),
acks='all')
def update(last_sent_time=None, id_map=None):
"""
Fetches recent sensor data from the CM setup and forwards new entries to the i-Maintenance messaging bus.
After fetching and updating data a new cycle is scheduled.
:param last_sent_time: Last time of previous update. Used to determine new entries.
If `None`, all entries will be forwarded.
:param id_map: Given mapping of SensorThings IDs. This map is updated during the iteration.
"""
try:
# fetch sensor data
sensor_data = fetch_sensor_data(cm_host=CM_APP_HOST)
if sensor_data == list():
logger.warning("CM PLC may not available")
logger.info('Fetched {} sensor entries'.format(len(sensor_data)))
# filter data
sensor_data = sensor_data.ix[sensor_data.index > last_sent_time] if last_sent_time else sensor_data
# fetch id mapping
id_map = fetch_id_mapping(host=SENSORTHINGS_HOST, port=SENSORTHINGS_PORT, fallback=id_map)
# delegate to messaging bus
publish_sensor_data(data=sensor_data, id_map=id_map, topic=KAFKA_TOPIC, ignored=IGNORED_FIELDS)
#logger.info('{}'.format(sensor_data.index))
last_sent_time = sensor_data.index[-1]
logger.info('Published {} new sensor entries till {}'.format(len(sensor_data), last_sent_time))
except Exception as e:
logger.exception(e)
# schedule next update
interval = timedelta(minutes=UPDATE_INTERVAL).total_seconds()
kwargs = {'last_sent_time': last_sent_time, 'id_map': id_map}
threading.Timer(interval=interval, function=update, kwargs=kwargs).start()
logger.info('Scheduled next update at {}'.format(datetime.now() + timedelta(minutes=UPDATE_INTERVAL)))
def fetch_sensor_data(cm_host):
"""
Fetches sensor data from the CM host.
:param cm_host: URL of CH host.
:return: Dataframe containing fetched entries, whereas the column matches the columns in the CSV file.
"""
url = cm_host + '/FileBrowser/Download?Path=/DataLogs/SalzburgResearch_Logging.csv'
headers = {'Referer': cm_host + '/Portal/Portal.mwsl?PriNav=FileBrowser&Path=/DataLogs/"'}
response = requests.get(url, headers=headers)
response.raise_for_status()
csv_data = response.text.splitlines()
# read sensor data from csv file
csv_reader = csv.reader(csv_data)
csv_header = next(csv_reader) # read header information
sensor_data = pd.DataFrame(columns=csv_header)
for row in csv_reader:
if row: # due to blank line at the bottom
sensor_data.loc[row[0]] = list(row) # index is first field (i.e. 'Record')
# convert timestamp
sensor_data['Zeitstempel'] = pd.to_datetime(sensor_data['Zeitstempel'])
sensor_data = sensor_data.set_index(['Zeitstempel']).sort_index()
return sensor_data
def publish_sensor_data(data, id_map, topic, ignored=None):
"""
Published sensor data to the i-Maintenance messaging bus (Kafka) using the SensorThings format.
:param data: Dataframe containing sensor data.
:param id_map: `dict` of mapping columns in the Dataframe to IDs in the SensorThings domain.
:param topic: Kafka topic in which the messages are published.
:param ignored: List of ignored column names. If `None` rows of all columns are published.
"""
if ignored is None:
ignored = []
for observation_time in data.index:
for sensor in [s for s in data.columns if s not in ignored]:
# This line expect the Siemens SPS to have only summertime. Change it if it fails
obs_time = observation_time + timedelta(seconds=time.altzone) - timedelta(seconds=time.timezone)
message = {'phenomenonTime': obs_time.replace(tzinfo=pytz.UTC).isoformat(),
'resultTime': datetime.utcnow().replace(tzinfo=pytz.UTC).isoformat(),
'result': float(data.loc[observation_time, sensor]),
'Datastream': {'@iot.id': id_map[sensor]}}
# print(message)
producer.send(topic, message, key=str(message['Datastream']['@iot.id']).encode('utf-8'))
# block until all messages are sent
producer.flush()
def fetch_id_mapping(host, port, fallback):
"""
Fetches IDs from SensorThings server and creates a dictionary with the proper ID mapping.
:param host: Host of SensorThings server.
:param port: Port of SensorThings server.
:param fallback: Fallback mapping in case of an error. If `None` the actual error while be raised.
:return: `dict`, which is mapping CM specific IDs to global SensorThings IDs.
"""
mapping = dict()
try:
url = 'http://{}:{}/v1.0/Datastreams'.format(host, port)
while True:
url = url.replace('localhost', host).replace('8080', port) # replace wrong base url and port
datastreams = requests.get(url=url).json()
mapping.update({d['name']: d['@iot.id'] for d in datastreams['value']})
if '@iot.nextLink' not in datastreams:
break
url = datastreams['@iot.nextLink']
logger.info('Fetched id mapping: %s', mapping, extra={'sensorthings_ids': mapping})
except Exception as e:
if not fallback:
raise e
logger.warning('Could not fetch id mapping...')
return fallback
return mapping
if __name__ == '__main__':
update()
|
apache-2.0
|
zhushun0008/sms-tools
|
lectures/09-Sound-description/plots-code/hpcp.py
|
25
|
1194
|
import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
spectralPeaks = ess.SpectralPeaks()
hpcp = ess.HPCP()
x = ess.MonoLoader(filename = '../../../sounds/cello-double.wav', sampleRate = fs)()
hpcps = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
spectralPeaks_freqs, spectralPeaks_mags = spectralPeaks(mX)
hpcp_vals = hpcp(spectralPeaks_freqs, spectralPeaks_mags)
hpcps.append(hpcp_vals)
hpcps = np.array(hpcps)
plt.figure(1, figsize=(9.5, 7))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x, 'b')
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (cello-double.wav)')
plt.subplot(2,1,2)
numFrames = int(hpcps[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
plt.pcolormesh(frmTime, np.arange(12), np.transpose(hpcps))
plt.ylabel('spectral bins')
plt.title('HPCP')
plt.autoscale(tight=True)
plt.tight_layout()
plt.savefig('hpcp.png')
plt.show()
|
agpl-3.0
|
judithfan/pix2svg
|
generative/tests/compare_test/rdm.py
|
2
|
4278
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import os
import sys
import json
import numpy as np
from tqdm import tqdm
from collections import defaultdict
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from dataset import ExhaustiveDataset
from train import load_checkpoint
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model_path', type=str, help='path to trained model')
parser.add_argument('--split', type=str, default='test', help='train|val|test|full')
parser.add_argument('--batch-size', type=int, default=64, help='size of minibatch [default: 64]')
parser.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.cuda = args.cuda and torch.cuda.is_available()
model = load_checkpoint(args.model_path, use_cuda=args.cuda)
model.eval()
if model.cuda:
model.cuda()
dataset = ExhaustiveDataset(layer=model.vgg_layer, split=args.split)
loader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
object_order = dataset.object_order
rdm_further_sums = np.zeros((32, 32))
rdm_closer_sums = np.zeros((32, 32))
rdm_further_cnts = np.zeros(32)
rdm_closer_cnts = np.zeros(32)
pbar = tqdm(total=len(loader))
for batch_idx, (sketch, sketch_object, sketch_context, sketch_path) in enumerate(loader):
batch_size = len(sketch)
sketch = Variable(sketch, volatile=True)
sketch_object_ix = [object_order.index(sketch_object[i]) for i in xrange(batch_size)]
if args.cuda:
sketch = sketch.cuda()
pred_logits = []
photo_generator = dataset.gen_photos()
for photo, _, _ in photo_generator():
photo = Variable(photo)
if args.cuda:
photo = photo.cuda()
if model.vgg_layer == 'fc6':
photo = photo.repeat(batch_size, 1)
else:
photo = photo.repeat(batch_size, 1, 1, 1)
pred_logit = model(photo, sketch)
pred_logits.append(pred_logit)
pred_logits = torch.cat(pred_logits, dim=1)
pred = F.softmax(pred_logits, dim=1)
pred = pred.cpu().data.numpy()
for t in xrange(batch_size):
if sketch_context[0] == 'closer':
rdm_closer_sums[:, sketch_object_ix[t]] += pred[t]
rdm_closer_cnts[sketch_object_ix[t]] += 1
elif sketch_context[0] == 'further':
rdm_further_sums[:, sketch_object_ix[t]] += pred[t]
rdm_further_cnts[sketch_object_ix[t]] += 1
else:
raise Exception('Unrecognized context: %s.' % sketch_context[0])
pbar.update()
pbar.close()
for i in xrange(32):
rdm_further_sums[:, i] /= rdm_further_cnts[i]
rdm_closer_sums[:, i] /= rdm_closer_cnts[i]
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
plt.figure()
ax = sns.heatmap(rdm_further_sums)
fig = ax.get_figure()
fig.savefig('./rdm-further-%s.png' % model.vgg_layer)
plt.figure()
ax = sns.heatmap(rdm_closer_sums)
fig = ax.get_figure()
fig.savefig('./rdm-closer-%s.png' % model.vgg_layer)
plt.figure()
ax = sns.heatmap(rdm_closer_sums - rdm_further_sums)
fig = ax.get_figure()
fig.savefig('./rdm-diff-%s.png' % model.vgg_layer)
rdm_diff_sums = rdm_closer_sums - rdm_further_sums
rdm_diagonals = [rdm_diff_sums[i, i] for i in xrange(32)]
rdm_boxes = [rdm_diff_sums[:8, :8], rdm_diff_sums[8:16, 8:16],
rdm_diff_sums[16:24, 16:24], rdm_diff_sums[24:32, 24:32]]
rdm_off_diagonals = []
for rdm_box in rdm_boxes:
for i in xrange(8):
for j in xrange(8):
if i != j:
rdm_off_diagonals.append(rdm_box[i, j])
plt.figure()
plt.hist(rdm_diagonals)
plt.title('RDM Difference Diagonals')
plt.savefig('./rdm-diagonals.png')
plt.figure()
plt.hist(rdm_off_diagonals)
plt.title('RDM Difference Off-Diagonals')
plt.savefig('./rdm-off-diagonals.png')
|
mit
|
kevin-intel/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
2
|
71496
|
import pickle
import pytest
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sp
import joblib
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.utils.fixes import parse_version
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone, is_classifier
from sklearn.svm import OneClassSVM
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.kernel_approximation import Nystroem
from sklearn.pipeline import make_pipeline
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
from sklearn.linear_model import _sgd_fast as sgd_fast
from sklearn.model_selection import RandomizedSearchCV
def _update_kwargs(kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
if "tol" not in kwargs:
kwargs["tol"] = None
if "max_iter" not in kwargs:
kwargs["max_iter"] = 5
class _SparseSGDClassifier(linear_model.SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super().partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super().decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super().predict_proba(X)
class _SparseSGDRegressor(linear_model.SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
# XXX untested as of v0.22
X = sp.csr_matrix(X)
return linear_model.SGDRegressor.decision_function(self, X, *args,
**kw)
class _SparseSGDOneClassSVM(linear_model.SGDOneClassSVM):
def fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.fit(self, X, *args, **kw)
def partial_fit(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.partial_fit(self, X, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return linear_model.SGDOneClassSVM.decision_function(self, X, *args,
**kw)
def SGDClassifier(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDClassifier(**kwargs)
def SGDRegressor(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDRegressor(**kwargs)
def SGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return linear_model.SGDOneClassSVM(**kwargs)
def SparseSGDClassifier(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDClassifier(**kwargs)
def SparseSGDRegressor(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDRegressor(**kwargs)
def SparseSGDOneClassSVM(**kwargs):
_update_kwargs(kwargs)
return _SparseSGDOneClassSVM(**kwargs)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
###############################################################################
# Common Test Case to classification and regression
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(klass, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass in (SparseSGDClassifier, SparseSGDRegressor):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_alpha(klass):
# Check whether expected ValueError on bad alpha
with pytest.raises(ValueError):
klass(alpha=-.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_penalty(klass):
# Check whether expected ValueError on bad penalty
with pytest.raises(ValueError):
klass(penalty='foobar', l1_ratio=0.85)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_loss(klass):
# Check whether expected ValueError on bad loss
with pytest.raises(ValueError):
klass(loss="foobar")
def _test_warm_start(klass, X, Y, lr):
# Test that explicit warm restart...
clf = klass(alpha=0.01, eta0=0.01, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = klass(alpha=0.001, eta0=0.01, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(alpha=0.01, eta0=0.01, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert clf3.t_ == clf.t_
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert clf3.t_ == clf2.t_
assert_array_almost_equal(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start(klass, lr):
_test_warm_start(klass, X, Y, lr)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_input_format(klass):
# Input format tests.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
with pytest.raises(ValueError):
clf.fit(X, Y_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_clone(klass):
# Test whether clone works ok.
clf = klass(alpha=0.01, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = klass(alpha=0.01, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_plain_has_no_average_attr(klass):
clf = klass(average=True, eta0=.01)
clf.fit(X, Y)
assert hasattr(clf, '_average_coef')
assert hasattr(clf, '_average_intercept')
assert hasattr(clf, '_standard_intercept')
assert hasattr(clf, '_standard_coef')
clf = klass()
clf.fit(X, Y)
assert not hasattr(clf, '_average_coef')
assert not hasattr(clf, '_average_intercept')
assert not hasattr(clf, '_standard_intercept')
assert not hasattr(clf, '_standard_coef')
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_late_onset_averaging_not_reached(klass):
clf1 = klass(average=600)
clf2 = klass()
for _ in range(100):
if is_classifier(clf1):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
if klass in [SGDClassifier, SparseSGDClassifier, SGDRegressor,
SparseSGDRegressor]:
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
assert_allclose(clf1.offset_, clf2.offset_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_late_onset_averaging_reached(klass):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = klass(average=7, learning_rate="constant",
loss='squared_error', eta0=eta0,
alpha=alpha, max_iter=2, shuffle=False)
clf2 = klass(average=0, learning_rate="constant",
loss='squared_error', eta0=eta0,
alpha=alpha, max_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
asgd(klass, X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_sgd_bad_alpha_for_optimal_learning_rate(klass):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
with pytest.raises(ValueError):
klass(alpha=0, learning_rate="optimal")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_early_stopping(klass):
X = iris.data[iris.target > 0]
Y = iris.target[iris.target > 0]
for early_stopping in [True, False]:
max_iter = 1000
clf = klass(early_stopping=early_stopping, tol=1e-3,
max_iter=max_iter).fit(X, Y)
assert clf.n_iter_ < max_iter
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_adaptive_longer_than_constant(klass):
clf1 = klass(learning_rate="adaptive", eta0=0.01, tol=1e-3,
max_iter=100)
clf1.fit(iris.data, iris.target)
clf2 = klass(learning_rate="constant", eta0=0.01, tol=1e-3,
max_iter=100)
clf2.fit(iris.data, iris.target)
assert clf1.n_iter_ > clf2.n_iter_
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_validation_set_not_used_for_training(klass):
X, Y = iris.data, iris.target
validation_fraction = 0.4
seed = 42
shuffle = False
max_iter = 10
clf1 = klass(early_stopping=True,
random_state=np.random.RandomState(seed),
validation_fraction=validation_fraction,
learning_rate='constant', eta0=0.01,
tol=None, max_iter=max_iter, shuffle=shuffle)
clf1.fit(X, Y)
assert clf1.n_iter_ == max_iter
clf2 = klass(early_stopping=False,
random_state=np.random.RandomState(seed),
learning_rate='constant', eta0=0.01,
tol=None, max_iter=max_iter, shuffle=shuffle)
if is_classifier(clf2):
cv = StratifiedShuffleSplit(test_size=validation_fraction,
random_state=seed)
else:
cv = ShuffleSplit(test_size=validation_fraction,
random_state=seed)
idx_train, idx_val = next(cv.split(X, Y))
idx_train = np.sort(idx_train) # remove shuffling
clf2.fit(X[idx_train], Y[idx_train])
assert clf2.n_iter_ == max_iter
assert_array_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_n_iter_no_change(klass):
X, Y = iris.data, iris.target
# test that n_iter_ increases monotonically with n_iter_no_change
for early_stopping in [True, False]:
n_iter_list = [klass(early_stopping=early_stopping,
n_iter_no_change=n_iter_no_change,
tol=1e-4, max_iter=1000
).fit(X, Y).n_iter_
for n_iter_no_change in [2, 3, 10]]
assert_array_equal(n_iter_list, sorted(n_iter_list))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDRegressor, SparseSGDRegressor])
def test_not_enough_sample_for_early_stopping(klass):
# test an error is raised if the training or validation set is empty
clf = klass(early_stopping=True, validation_fraction=0.99)
with pytest.raises(ValueError):
clf.fit(X3, Y3)
###############################################################################
# Classification Test Case
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_clf(klass):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = klass(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, max_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_bad_l1_ratio(klass):
# Check whether expected ValueError on bad l1_ratio
with pytest.raises(ValueError):
klass(l1_ratio=1.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_bad_learning_rate_schedule(klass):
# Check whether expected ValueError on bad learning_rate
with pytest.raises(ValueError):
klass(learning_rate="<unknown>")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_bad_eta0(klass):
# Check whether expected ValueError on bad eta0
with pytest.raises(ValueError):
klass(eta0=0, learning_rate="constant")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_max_iter_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(max_iter=-10000)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_shuffle_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(shuffle="false")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_early_stopping_param(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(early_stopping="false")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_validation_fraction(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(validation_fraction=-.1)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_n_iter_no_change(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(n_iter_no_change=0)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_argument_coef(klass):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset
with pytest.raises(TypeError):
klass(coef_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_provide_coef(klass):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
with pytest.raises(ValueError):
klass().fit(X, Y, coef_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_set_intercept(klass):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
if klass in [SGDClassifier, SparseSGDClassifier]:
with pytest.raises(ValueError):
klass().fit(X, Y, intercept_init=np.zeros((3,)))
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
with pytest.raises(ValueError):
klass().fit(X, Y, offset_init=np.zeros((3,)))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_early_stopping_with_partial_fit(klass):
# Test parameter validity check
with pytest.raises(ValueError):
klass(early_stopping=True).partial_fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_binary(klass):
# Checks intercept_ shape for the warm starts in binary case
klass().fit(X5, Y5, intercept_init=0)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_average_binary_computed_correctly(klass):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = klass(loss='squared_error',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_intercept_to_intercept(klass):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = klass().fit(X5, Y5)
klass().fit(X5, Y5, intercept_init=clf.intercept_)
clf = klass().fit(X, Y)
klass().fit(X, Y, intercept_init=clf.intercept_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_at_least_two_labels(klass):
# Target must have at least two labels
clf = klass(alpha=0.01, max_iter=20)
with pytest.raises(ValueError):
clf.fit(X2, np.ones(9))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_weight_class_balanced(klass):
# partial_fit with class_weight='balanced' not supported"""
regex = (r"class_weight 'balanced' is not supported for "
r"partial_fit\. In order to use 'balanced' weights, "
r"use compute_class_weight\('balanced', classes=classes, y=y\). "
r"In place of y you can us a large enough sample "
r"of the full training set target to properly "
r"estimate the class frequency distributions\. "
r"Pass the resulting weights as the class_weight "
r"parameter\.")
with pytest.raises(ValueError, match=regex):
klass(class_weight='balanced').partial_fit(X, Y, classes=np.unique(Y))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_average(klass):
eta = .001
alpha = .01
# Multi-class average test case
clf = klass(loss='squared_error',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = asgd(klass, X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_with_init_coef(klass):
# Multi-class test case
clf = klass(alpha=0.01, max_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape, (3,)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_multiclass_njobs(klass):
# Multi-class test case with multi-core support
clf = klass(alpha=0.01, max_iter=20, n_jobs=2).fit(X2, Y2)
assert clf.coef_.shape == (3, 2)
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_set_coef_multiclass(klass):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = klass().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = klass()
with pytest.raises(ValueError):
clf.fit(X2, Y2, intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = klass().fit(X2, Y2, intercept_init=np.zeros((3,)))
# TODO: Remove filterwarnings in v1.2.
@pytest.mark.filterwarnings("ignore:.*squared_loss.*:FutureWarning")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_predict_proba_method_access(klass):
# Checks that SGDClassifier predict_proba and predict_log_proba methods
# can either be accessed or raise an appropriate error message
# otherwise. See
# https://github.com/scikit-learn/scikit-learn/issues/10938 for more
# details.
for loss in linear_model.SGDClassifier.loss_functions:
clf = SGDClassifier(loss=loss)
if loss in ('log', 'modified_huber'):
assert hasattr(clf, 'predict_proba')
assert hasattr(clf, 'predict_log_proba')
else:
message = ("probability estimates are not "
"available for loss={!r}".format(loss))
assert not hasattr(clf, 'predict_proba')
assert not hasattr(clf, 'predict_log_proba')
with pytest.raises(AttributeError,
match=message):
clf.predict_proba
with pytest.raises(AttributeError,
match=message):
clf.predict_log_proba
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_proba(klass):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01,
max_iter=10, tol=None).fit(X, Y)
assert not hasattr(clf, "predict_proba")
assert not hasattr(clf, "predict_log_proba")
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = klass(loss=loss, alpha=0.01, max_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert p[0, 1] > 0.5
p = clf.predict_proba([[-1, -1]])
assert p[0, 1] < 0.5
p = clf.predict_log_proba([[3, 2]])
assert p[0, 1] > p[0, 0]
p = clf.predict_log_proba([[-1, -1]])
assert p[0, 1] < p[0, 0]
# log loss multiclass probability estimates
clf = klass(loss="log", alpha=0.01, max_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert np.all(p[0] >= 0)
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
lp = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), lp)
lp = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), lp)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = klass(loss="modified_huber", alpha=0.01, max_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if klass != SparseSGDClassifier:
assert np.argmax(d, axis=1) == np.argmax(p, axis=1)
else: # XXX the sparse test gets a different X2 (?)
assert np.argmin(d, axis=1) == np.argmin(p, axis=1)
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sgd_l1(klass):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = klass(penalty='l1', alpha=.2, fit_intercept=False,
max_iter=2000, tol=None, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert sp.issparse(clf.coef_)
pred = clf.predict(X)
assert_array_equal(pred, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_class_weights(klass):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_equal_class_weight(klass):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = klass(alpha=0.1, max_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = klass(alpha=0.1, max_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_label(klass):
# ValueError due to not existing class label.
clf = klass(alpha=0.1, max_iter=1000, class_weight={0: 0.5})
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_wrong_class_weight_format(klass):
# ValueError due to wrong class_weight argument type.
clf = klass(alpha=0.1, max_iter=1000, class_weight=[0.5])
with pytest.raises(ValueError):
clf.fit(X, Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_weights_multiplied(klass):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = klass(alpha=0.1, max_iter=20, class_weight=class_weights)
clf2 = klass(alpha=0.1, max_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_balanced_weight(klass):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = klass(alpha=0.0001, max_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = klass(alpha=0.0001, max_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
f1 = metrics.f1_score(y, clf_balanced.predict(X), average='weighted')
assert_almost_equal(f1, 0.96, decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = klass(max_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average='weighted') < 0.96
# fit a model with balanced class_weight enabled
clf = klass(max_iter=1000, class_weight="balanced",
shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert metrics.f1_score(y, y_pred, average='weighted') > 0.96
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_sample_weights(klass):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier,
SGDOneClassSVM, SparseSGDOneClassSVM])
def test_wrong_sample_weights(klass):
# Test if ValueError is raised if sample_weight has wrong shape
if klass in [SGDClassifier, SparseSGDClassifier]:
clf = klass(alpha=0.1, max_iter=1000, fit_intercept=False)
elif klass in [SGDOneClassSVM, SparseSGDOneClassSVM]:
clf = klass(nu=0.1, max_iter=1000, fit_intercept=False)
# provided sample_weight too long
with pytest.raises(ValueError):
clf.fit(X, Y, sample_weight=np.arange(7))
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_exception(klass):
clf = klass(alpha=0.01)
# classes was not specified
with pytest.raises(ValueError):
clf.partial_fit(X3, Y3)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_binary(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert clf.coef_.shape == (1, X.shape[1])
assert clf.intercept_.shape == (1,)
assert clf.decision_function([[0, 0]]).shape == (1, )
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
assert clf.decision_function([[0, 0]]).shape == (1, 3)
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_partial_fit_multiclass_average(klass):
third = X2.shape[0] // 3
clf = klass(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
clf.partial_fit(X2[third:], Y2[third:])
assert clf.coef_.shape == (3, X2.shape[1])
assert clf.intercept_.shape == (3,)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_fit_then_partial_fit(klass):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = klass()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_classif(klass, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = klass(alpha=0.01, eta0=0.01, max_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = klass(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_regression_losses(klass):
random_state = np.random.RandomState(1)
clf = klass(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive",
random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive",
random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, loss="huber", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
clf = klass(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_error", random_state=random_state)
clf.fit(X, Y)
assert 1.0 == np.mean(clf.predict(X) == Y)
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_warm_start_multiclass(klass):
_test_warm_start(klass, X2, Y2, "optimal")
@pytest.mark.parametrize('klass', [SGDClassifier, SparseSGDClassifier])
def test_multiple_fit(klass):
# Test multiple calls of fit w/ different shaped inputs.
clf = klass(alpha=0.01, shuffle=False)
clf.fit(X, Y)
assert hasattr(clf, "coef_")
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
###############################################################################
# Regression Test Case
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_reg(klass):
# Check that SGD gives any results.
clf = klass(alpha=0.1, max_iter=2, fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert clf.coef_[0] == clf.coef_[1]
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_computed_correctly(klass):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(loss='squared_error',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_averaged_partial_fit(klass):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = klass(loss='squared_error',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = asgd(klass, X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_average_sparse(klass):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = klass(loss='squared_error',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = asgd(klass, X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_least_squares_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss='squared_error', alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss='squared_error', alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_epsilon_insensitive(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_sgd_huber_fit(klass):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.99
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = klass(loss="huber", epsilon=0.1, alpha=0.1, max_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert score > 0.5
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_elasticnet_convergence(klass):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = klass(penalty='elasticnet', max_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_partial_fit(klass):
third = X.shape[0] // 3
clf = klass(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert clf.coef_.shape == (X.shape[1], )
assert clf.intercept_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1, )
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert id1, id2
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit(klass, lr):
clf = klass(alpha=0.01, max_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = klass(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert clf.t_ == t
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
@pytest.mark.parametrize('klass', [SGDRegressor, SparseSGDRegressor])
def test_loss_function_epsilon(klass):
clf = klass(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
###############################################################################
# SGD One Class SVM Test Case
# a simple implementation of ASGD to use for testing SGDOneClassSVM
def asgd_oneclass(klass, X, eta, nu, coef_init=None, offset_init=0.0):
if coef_init is None:
coef = np.zeros(X.shape[1])
else:
coef = coef_init
average_coef = np.zeros(X.shape[1])
offset = offset_init
intercept = 1 - offset
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if klass == SparseSGDOneClassSVM:
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, coef)
p += intercept
if p <= 1.0:
gradient = -1
else:
gradient = 0
coef *= max(0, 1.0 - (eta * nu / 2))
coef += -(eta * gradient * entry)
intercept += -(eta * (nu + gradient)) * decay
average_coef *= i
average_coef += coef
average_coef /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_coef, 1 - average_intercept
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize('nu', [-0.5, 2])
def test_bad_nu_values(klass, nu):
msg = r"nu must be in \(0, 1]"
with pytest.raises(ValueError, match=msg):
klass(nu=nu)
clf = klass(nu=0.05)
clf2 = clone(clf)
with pytest.raises(ValueError, match=msg):
clf2.set_params(nu=nu)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def _test_warm_start_oneclass(klass, X, lr):
# Test that explicit warm restart...
clf = klass(nu=0.5, eta0=0.01, shuffle=False,
learning_rate=lr)
clf.fit(X)
clf2 = klass(nu=0.1, eta0=0.01, shuffle=False,
learning_rate=lr)
clf2.fit(X, coef_init=clf.coef_.copy(),
offset_init=clf.offset_.copy())
# ... and implicit warm restart are equivalent.
clf3 = klass(nu=0.5, eta0=0.01, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X)
assert clf3.t_ == clf.t_
assert_allclose(clf3.coef_, clf.coef_)
clf3.set_params(nu=0.1)
clf3.fit(X)
assert clf3.t_ == clf2.t_
assert_allclose(clf3.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_warm_start_oneclass(klass, lr):
_test_warm_start_oneclass(klass, X, lr)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_clone_oneclass(klass):
# Test whether clone works ok.
clf = klass(nu=0.5)
clf = clone(clf)
clf.set_params(nu=0.1)
clf.fit(X)
clf2 = klass(nu=0.1)
clf2.fit(X)
assert_array_equal(clf.coef_, clf2.coef_)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_partial_fit_oneclass(klass):
third = X.shape[0] // 3
clf = klass(nu=0.1)
clf.partial_fit(X[:third])
assert clf.coef_.shape == (X.shape[1], )
assert clf.offset_.shape == (1,)
assert clf.predict([[0, 0]]).shape == (1, )
previous_coefs = clf.coef_
clf.partial_fit(X[third:])
# check that coef_ haven't been re-allocated
assert clf.coef_ is previous_coefs
# raises ValueError if number of features does not match previous data
with pytest.raises(ValueError):
clf.partial_fit(X[:, 1])
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
@pytest.mark.parametrize('lr',
["constant", "optimal", "invscaling", "adaptive"])
def test_partial_fit_equal_fit_oneclass(klass, lr):
clf = klass(nu=0.05, max_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X)
y_scores = clf.decision_function(T)
t = clf.t_
coef = clf.coef_
offset = clf.offset_
clf = klass(nu=0.05, eta0=0.01, max_iter=1,
learning_rate=lr, shuffle=False)
for _ in range(2):
clf.partial_fit(X)
y_scores2 = clf.decision_function(T)
assert clf.t_ == t
assert_allclose(y_scores, y_scores2)
assert_allclose(clf.coef_, coef)
assert_allclose(clf.offset_, offset)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_late_onset_averaging_reached_oneclass(klass):
# Test average
eta0 = .001
nu = .05
# 2 passes over the training set but average only at second pass
clf1 = klass(average=7, learning_rate="constant", eta0=eta0,
nu=nu, max_iter=2, shuffle=False)
# 1 pass over the training set with no averaging
clf2 = klass(average=0, learning_rate="constant", eta0=eta0,
nu=nu, max_iter=1, shuffle=False)
clf1.fit(X)
clf2.fit(X)
# Start from clf2 solution, compute averaging using asgd function and
# compare with clf1 solution
average_coef, average_offset = \
asgd_oneclass(klass, X, eta0, nu,
coef_init=clf2.coef_.ravel(),
offset_init=clf2.offset_)
assert_allclose(clf1.coef_.ravel(), average_coef.ravel())
assert_allclose(clf1.offset_, average_offset)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_computed_correctly_oneclass(klass):
# Tests the average SGD One-Class SVM matches the naive implementation
eta = .001
nu = .05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(learning_rate='constant',
eta0=eta, nu=nu,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.fit(X)
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_sgd_averaged_partial_fit_oneclass(klass):
# Tests whether the partial fit yields the same average as the fit
eta = .001
nu = .05
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
clf = klass(learning_rate='constant',
eta0=eta, nu=nu,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:])
clf.partial_fit(X[int(n_samples / 2):][:])
average_coef, average_offset = asgd_oneclass(klass, X, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
@pytest.mark.parametrize('klass', [SGDOneClassSVM, SparseSGDOneClassSVM])
def test_average_sparse_oneclass(klass):
# Checks the average coef on data with 0s
eta = .001
nu = .01
clf = klass(learning_rate='constant',
eta0=eta, nu=nu,
fit_intercept=True,
max_iter=1, average=True, shuffle=False)
n_samples = X3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):])
average_coef, average_offset = asgd_oneclass(klass, X3, eta, nu)
assert_allclose(clf.coef_, average_coef)
assert_allclose(clf.offset_, average_offset)
def test_sgd_oneclass():
# Test fit, decision_function, predict and score_samples on a toy
# dataset
X_train = np.array([[-2, -1], [-1, -1], [1, 1]])
X_test = np.array([[0.5, -2], [2, 2]])
clf = SGDOneClassSVM(nu=0.5, eta0=1, learning_rate='constant',
shuffle=False, max_iter=1)
clf.fit(X_train)
assert_allclose(clf.coef_, np.array([-0.125, 0.4375]))
assert clf.offset_[0] == -0.5
scores = clf.score_samples(X_test)
assert_allclose(scores, np.array([-0.9375, 0.625]))
dec = clf.score_samples(X_test) - clf.offset_
assert_allclose(clf.decision_function(X_test), dec)
pred = clf.predict(X_test)
assert_array_equal(pred, np.array([-1, 1]))
def test_ocsvm_vs_sgdocsvm():
# Checks SGDOneClass SVM gives a good approximation of kernelized
# One-Class SVM
nu = 0.05
gamma = 2.
random_state = 42
# Generate train and test data
rng = np.random.RandomState(random_state)
X = 0.3 * rng.randn(500, 2)
X_train = np.r_[X + 2, X - 2]
X = 0.3 * rng.randn(100, 2)
X_test = np.r_[X + 2, X - 2]
# One-Class SVM
clf = OneClassSVM(gamma=gamma, kernel='rbf', nu=nu)
clf.fit(X_train)
y_pred_ocsvm = clf.predict(X_test)
dec_ocsvm = clf.decision_function(X_test).reshape(1, -1)
# SGDOneClassSVM using kernel approximation
max_iter = 15
transform = Nystroem(gamma=gamma, random_state=random_state)
clf_sgd = SGDOneClassSVM(nu=nu, shuffle=True, fit_intercept=True,
max_iter=max_iter, random_state=random_state,
tol=-np.inf)
pipe_sgd = make_pipeline(transform, clf_sgd)
pipe_sgd.fit(X_train)
y_pred_sgdocsvm = pipe_sgd.predict(X_test)
dec_sgdocsvm = pipe_sgd.decision_function(X_test).reshape(1, -1)
assert np.mean(y_pred_sgdocsvm == y_pred_ocsvm) >= 0.99
corrcoef = np.corrcoef(np.concatenate((dec_ocsvm, dec_sgdocsvm)))[0, 1]
assert corrcoef >= 0.9
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', tol=None,
max_iter=6, l1_ratio=0.9999999999,
random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', max_iter=6,
random_state=42, tol=None).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', tol=None,
max_iter=6, l1_ratio=0.0000000001,
random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', max_iter=6,
random_state=42, tol=None).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert np.isfinite(X).all()
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert np.isfinite(X_scaled).all()
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', max_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert np.isfinite(model.coef_).all()
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
with pytest.raises(ValueError, match=msg_regxp):
model.fit(X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', max_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0, tol=None)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert np.isfinite(model.coef_).all()
@pytest.mark.parametrize('penalty', ['l2', 'l1', 'elasticnet'])
def test_large_regularization(penalty):
# Non regression tests for numerical stability issues caused by large
# regularization parameters
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
penalty=penalty, shuffle=False,
tol=None, max_iter=6)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def test_tol_parameter():
# Test that the tol parameter behaves as expected
X = StandardScaler().fit_transform(iris.data)
y = iris.target == 1
# With tol is None, the number of iteration should be equal to max_iter
max_iter = 42
model_0 = SGDClassifier(tol=None, random_state=0, max_iter=max_iter)
model_0.fit(X, y)
assert max_iter == model_0.n_iter_
# If tol is not None, the number of iteration should be less than max_iter
max_iter = 2000
model_1 = SGDClassifier(tol=0, random_state=0, max_iter=max_iter)
model_1.fit(X, y)
assert max_iter > model_1.n_iter_
assert model_1.n_iter_ > 5
# A larger tol should yield a smaller number of iteration
model_2 = SGDClassifier(tol=0.1, random_state=0, max_iter=max_iter)
model_2.fit(X, y)
assert model_1.n_iter_ > model_2.n_iter_
assert model_2.n_iter_ > 3
# Strict tolerance and small max_iter should trigger a warning
model_3 = SGDClassifier(max_iter=3, tol=1e-3, random_state=0)
warning_message = (
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit."
)
with pytest.warns(ConvergenceWarning, match=warning_message):
model_3.fit(X, y)
assert model_3.n_iter_ == 3
def _test_loss_common(loss_function, cases):
# Test the different loss functions
# cases is a list of (p, y, expected)
for p, y, expected_loss, expected_dloss in cases:
assert_almost_equal(loss_function.py_loss(p, y), expected_loss)
assert_almost_equal(loss_function.py_dloss(p, y), expected_dloss)
def test_loss_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.1, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 0.0),
(1.0, 1.0, 0.0, -1.0), (-1.0, -1.0, 0.0, 1.0), (0.5, 1.0, 0.5, -1.0),
(2.0, -1.0, 3.0, 1.0), (-0.5, -1.0, 0.5, 1.0), (0.0, 1.0, 1, -1.0)
]
_test_loss_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0), (-0.1, -1.0, 0.0, 0.0),
(0.0, 1.0, 0.0, -1.0), (0.0, -1.0, 0.0, 1.0), (0.5, -1.0, 0.5, 1.0),
(2.0, -1.0, 2.0, 1.0), (-0.5, 1.0, 0.5, -1.0), (-1.0, 1.0, 1.0, -1.0),
]
_test_loss_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0), (-2.0, -1.0, 0.0, 0.0), (1.0, -1.0, 4.0, 4.0),
(-1.0, 1.0, 4.0, -4.0), (0.5, 1.0, 0.25, -1.0), (0.5, -1.0, 2.25, 3.0)
]
_test_loss_common(loss, cases)
def test_loss_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, np.log(1.0 + np.exp(-1.0)), -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, np.log(1.0 + np.exp(1.0)), 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, np.log(1.0 + np.exp(-1.0)), 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, np.log(1.0 + np.exp(1.0)), -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, np.log(2), -0.5), (0.0, -1.0, np.log(2), 0.5),
(17.9, -1.0, 17.9, 1.0), (-17.9, 1.0, 17.9, -1.0),
]
_test_loss_common(loss, cases)
assert_almost_equal(loss.py_dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.py_loss(18.1, 1.0), np.exp(-18.1), 16)
assert_almost_equal(loss.py_dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
assert_almost_equal(loss.py_loss(-18.1, 1.0), 18.1, 16)
def test_loss_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (1.0, 1.0, 0.0, 0.0), (1.0, 0.0, 0.5, 1.0),
(0.5, -1.0, 1.125, 1.5), (-2.5, 2.0, 10.125, -4.5)
]
_test_loss_common(loss, cases)
def test_loss_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.005, 0.1), (0.0, 0.1, 0.005, -0.1),
(3.95, 4.0, 0.00125, -0.05), (5.0, 2.0, 0.295, 0.1),
(-1.0, 5.0, 0.595, -0.1)
]
_test_loss_common(loss, cases)
def test_loss_modified_huber():
# (p, y, expected_loss, expected_dloss)
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected_loss, expected_dloss)
(1.0, 1.0, 0.0, 0.0), (-1.0, -1.0, 0.0, 0.0), (2.0, 1.0, 0.0, 0.0),
(0.0, 1.0, 1.0, -2.0), (-1.0, 1.0, 4.0, -4.0), (0.5, -1.0, 2.25, 3.0),
(-2.0, 1.0, 8, -4.0), (-3.0, 1.0, 12, -4.0)
]
_test_loss_common(loss, cases)
def test_loss_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.0, 0.0), (-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0), (2.2, 2.0, 0.1, 1.0), (2.0, -1.0, 2.9, 1.0),
(2.0, 2.2, 0.1, -1.0), (-2.0, 1.0, 2.9, -1.0)
]
_test_loss_common(loss, cases)
def test_loss_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
# (p, y, expected_loss, expected_dloss)
(0.0, 0.0, 0.0, 0.0), (0.1, 0.0, 0.0, 0.0), (-2.05, -2.0, 0.0, 0.0),
(3.05, 3.0, 0.0, 0.0), (2.2, 2.0, 0.01, 0.2), (2.0, -1.0, 8.41, 5.8),
(2.0, 2.2, 0.01, -0.2), (-2.0, 1.0, 8.41, -5.8)
]
_test_loss_common(loss, cases)
def test_multi_thread_multi_class_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and thread-based parallelism.
clf = SGDClassifier(alpha=1e-3, tol=1e-3, max_iter=1000,
early_stopping=True, n_iter_no_change=100,
random_state=0, n_jobs=2)
clf.fit(iris.data, iris.target)
assert clf.n_iter_ > clf.n_iter_no_change
assert clf.n_iter_ < clf.n_iter_no_change + 20
assert clf.score(iris.data, iris.target) > 0.8
def test_multi_core_gridsearch_and_early_stopping():
# This is a non-regression test for a bad interaction between
# early stopping internal attribute and process-based multi-core
# parallelism.
param_grid = {
'alpha': np.logspace(-4, 4, 9),
'n_iter_no_change': [5, 10, 50],
}
clf = SGDClassifier(tol=1e-2, max_iter=1000, early_stopping=True,
random_state=0)
search = RandomizedSearchCV(clf, param_grid, n_iter=3, n_jobs=2,
random_state=0)
search.fit(iris.data, iris.target)
assert search.best_score_ > 0.8
@pytest.mark.parametrize("backend",
["loky", "multiprocessing", "threading"])
def test_SGDClassifier_fit_for_all_backends(backend):
# This is a non-regression smoke test. In the multi-class case,
# SGDClassifier.fit fits each class in a one-versus-all fashion using
# joblib.Parallel. However, each OvA step updates the coef_ attribute of
# the estimator in-place. Internally, SGDClassifier calls Parallel using
# require='sharedmem'. This test makes sure SGDClassifier.fit works
# consistently even when the user asks for a backend that does not provide
# sharedmem semantics.
# We further test a case where memmapping would have been used if
# SGDClassifier.fit was called from a loky or multiprocessing backend. In
# this specific case, in-place modification of clf.coef_ would have caused
# a segmentation fault when trying to write in a readonly memory mapped
# buffer.
if (parse_version(joblib.__version__) < parse_version('0.12')
and backend == 'loky'):
pytest.skip('loky backend does not exist in joblib <0.12')
random_state = np.random.RandomState(42)
# Create a classification problem with 50000 features and 20 classes. Using
# loky or multiprocessing this make the clf.coef_ exceed the threshold
# above which memmaping is used in joblib and loky (1MB as of 2018/11/1).
X = sp.random(500, 2000, density=0.02, format='csr',
random_state=random_state)
y = random_state.choice(20, 500)
# Begin by fitting a SGD classifier sequentially
clf_sequential = SGDClassifier(max_iter=1000, n_jobs=1,
random_state=42)
clf_sequential.fit(X, y)
# Fit a SGDClassifier using the specified backend, and make sure the
# coefficients are equal to those obtained using a sequential fit
clf_parallel = SGDClassifier(max_iter=1000, n_jobs=4,
random_state=42)
with joblib.parallel_backend(backend=backend):
clf_parallel.fit(X, y)
assert_array_almost_equal(clf_sequential.coef_, clf_parallel.coef_)
# TODO: Remove in v1.2
@pytest.mark.parametrize(
'Estimator',
[linear_model.SGDClassifier, linear_model.SGDRegressor]
)
def test_loss_squared_loss_deprecated(Estimator):
# Note: class BaseSGD calls self._validate_params() in __init__, therefore
# even instatiation of class raises FutureWarning for squared_loss.
with pytest.warns(FutureWarning,
match="The loss 'squared_loss' was deprecated"):
est1 = Estimator(loss="squared_loss", random_state=0)
est1.fit(X, Y)
est2 = Estimator(loss="squared_error", random_state=0)
est2.fit(X, Y)
if hasattr(est1, "predict_proba"):
assert_allclose(est1.predict_proba(X), est2.predict_proba(X))
else:
assert_allclose(est1.predict(X), est2.predict(X))
|
bsd-3-clause
|
jlegendary/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
129
|
43401
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
sample_weights = np.random.random(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
|
bsd-3-clause
|
treycausey/scikit-learn
|
sklearn/dummy.py
|
2
|
13705
|
# Author: Mathieu Blondel <[email protected]>
# Arnaud Joly <[email protected]>
# Maheshakya Wijewardena<[email protected]>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, ClassifierMixin, RegressorMixin
from .externals.six.moves import xrange
from .utils import check_random_state
from .utils.validation import safe_asarray
from sklearn.utils import deprecated
class DummyClassifier(BaseEstimator, ClassifierMixin):
"""
DummyClassifier is a classifier that makes predictions using simple rules.
This classifier is useful as a simple baseline to compare with other
(real) classifiers. Do not use it for real problems.
Parameters
----------
strategy: str
Strategy to use to generate predictions.
* "stratified": generates predictions by respecting the training
set's class distribution.
* "most_frequent": always predicts the most frequent label in the
training set.
* "uniform": generates predictions uniformly at random.
* "constant": always predicts a constant label that is provided by
the user. This is useful for metrics that evaluate a non-majority
class
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use.
constant: int or str or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
`classes_` : array or list of array of shape = [n_classes]
Class labels for each output.
`n_classes_` : array or list of array of shape = [n_classes]
Number of label for each output.
`class_prior_` : array or list of array of shape = [n_classes]
Probability of each class for each output.
`n_outputs_` : int,
Number of outputs.
`outputs_2d_` : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="stratified", random_state=None,
constant=None):
self.strategy = strategy
self.random_state = random_state
self.constant = constant
def fit(self, X, y):
"""Fit the random classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("most_frequent", "stratified", "uniform",
"constant"):
raise ValueError("Unknown strategy type.")
y = np.atleast_1d(y)
self.output_2d_ = y.ndim == 2
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
self.classes_ = []
self.n_classes_ = []
self.class_prior_ = []
if self.strategy == "constant":
if self.constant is None:
raise ValueError("Constant target value has to be specified "
"when the constant strategy is used.")
else:
constant = np.reshape(np.atleast_1d(self.constant), (-1, 1))
if constant.shape[0] != self.n_outputs_:
raise ValueError("Constant target value should have "
"shape (%d, 1)." % self.n_outputs_)
for k in xrange(self.n_outputs_):
classes, y_k = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
self.n_classes_.append(classes.shape[0])
self.class_prior_.append(np.bincount(y_k) / float(y_k.shape[0]))
# Checking in case of constant strategy if the constant provided
# by the user is in y.
if self.strategy == "constant":
if constant[k] not in self.classes_[k]:
raise ValueError("The constant target value must be "
"present in training data")
if self.n_outputs_ == 1 and not self.output_2d_:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
self.class_prior_ = self.class_prior_[0]
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
# Compute probability only once
if self.strategy == "stratified":
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
proba = [proba]
y = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ret = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
elif self.strategy == "stratified":
ret = proba[k].argmax(axis=1)
elif self.strategy == "uniform":
ret = rs.randint(n_classes_[k], size=n_samples)
elif self.strategy == "constant":
ret = np.ones(n_samples, dtype=int) * (
np.where(classes_[k] == constant[k]))
y.append(classes_[k][ret])
y = np.vstack(y).T
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
def predict_proba(self, X):
"""
Return probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-lke of shape = [n_samples, n_classes]
Returns the probability of the sample for each class in
the model, where classes are ordered arithmetically, for each
output.
"""
if not hasattr(self, "classes_"):
raise ValueError("DummyClassifier not fitted.")
X = safe_asarray(X)
# numpy random_state expects Python int and not long as size argument
# under Windows
n_samples = int(X.shape[0])
rs = check_random_state(self.random_state)
n_classes_ = self.n_classes_
classes_ = self.classes_
class_prior_ = self.class_prior_
constant = self.constant
if self.n_outputs_ == 1 and not self.output_2d_:
# Get same type even for self.n_outputs_ == 1
n_classes_ = [n_classes_]
classes_ = [classes_]
class_prior_ = [class_prior_]
constant = [constant]
P = []
for k in xrange(self.n_outputs_):
if self.strategy == "most_frequent":
ind = np.ones(n_samples, dtype=int) * class_prior_[k].argmax()
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
elif self.strategy == "stratified":
out = rs.multinomial(1, class_prior_[k], size=n_samples)
elif self.strategy == "uniform":
out = np.ones((n_samples, n_classes_[k]), dtype=np.float64)
out /= n_classes_[k]
elif self.strategy == "constant":
ind = np.where(classes_[k] == constant[k])
out = np.zeros((n_samples, n_classes_[k]), dtype=np.float64)
out[:, ind] = 1.0
P.append(out)
if self.n_outputs_ == 1 and not self.output_2d_:
P = P[0]
return P
def predict_log_proba(self, X):
"""
Return log probability estimates for the test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
P : array-like or list of array-like of shape = [n_samples, n_classes]
Returns the log probability of the sample for each class in
the model, where classes are ordered arithmetically for each
output.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
return [np.log(p) for p in proba]
class DummyRegressor(BaseEstimator, RegressorMixin):
"""
DummyRegressor is a regressor that makes predictions using
simple rules.
This regressor is useful as a simple baseline to compare with other
(real) regressors. Do not use it for real problems.
Parameters
----------
strategy: str
Strategy to use to generate predictions.
* "mean": always predicts the mean of the training set
* "median": always predicts the median of the training set
* "constant": always predicts a constant value that is provided by
the user.
constant: int or float or array of shape = [n_outputs]
The explicit constant as predicted by the "constant" strategy. This
parameter is useful only for the "constant" strategy.
Attributes
----------
`constant_' : float or array of shape [n_outputs]
Mean or median of the training targets or constant value given the by
the user.
`n_outputs_` : int,
Number of outputs.
`outputs_2d_` : bool,
True if the output at fit is 2d, else false.
"""
def __init__(self, strategy="mean", constant=None):
self.strategy = strategy
self.constant = constant
@property
@deprecated('This will be removed in version 0.17')
def y_mean_(self):
if self.strategy == 'mean':
return self.constant_
raise AttributeError
def fit(self, X, y):
"""Fit the random regressor.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
Target values.
Returns
-------
self : object
Returns self.
"""
if self.strategy not in ("mean", "median", "constant"):
raise ValueError("Unknown strategy type: %s, "
"expected 'mean', 'median' or 'constant'"
% self.strategy)
y = safe_asarray(y)
self.output_2d_ = (y.ndim == 2)
if self.strategy == "mean":
self.constant_ = np.reshape(np.mean(y, axis=0), (1, -1))
elif self.strategy == "median":
self.constant_ = np.reshape(np.median(y, axis=0), (1, -1))
elif self.strategy == "constant":
if self.constant is None:
raise TypeError("Constant target value has to be specified "
"when the constant strategy is used.")
self.constant = safe_asarray(self.constant)
if self.output_2d_ and self.constant.shape[0] != y.shape[1]:
raise ValueError(
"Constant target value should have "
"shape (%d, 1)." % y.shape[1])
self.constant_ = np.reshape(self.constant, (1, -1))
self.n_outputs_ = np.size(self.constant_) # y.shape[1] is not safe
return self
def predict(self, X):
"""
Perform classification on test vectors X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Input vectors, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_outputs]
Predicted target values for X.
"""
if not hasattr(self, "constant_"):
raise ValueError("DummyRegressor not fitted.")
X = safe_asarray(X)
n_samples = X.shape[0]
y = np.ones((n_samples, 1)) * self.constant_
if self.n_outputs_ == 1 and not self.output_2d_:
y = np.ravel(y)
return y
|
bsd-3-clause
|
bykoianko/omim
|
tools/python/transit/transit_graph_generator.py
|
10
|
18195
|
#!/usr/bin/env python3
# Generates transit graph for MWM transit section generator.
# Also shows preview of transit scheme lines.
import argparse
import copy
import json
import math
import numpy as np
import os.path
import bezier_curves
import transit_color_palette
class OsmIdCode:
NODE = 0x4000000000000000
WAY = 0x8000000000000000
RELATION = 0xC000000000000000
RESET = ~(NODE | WAY | RELATION)
TYPE2CODE = {
'n': NODE,
'r': RELATION,
'w': WAY
}
def get_extended_osm_id(osm_id, osm_type):
try:
return str(osm_id | OsmIdCode.TYPE2CODE[osm_type[0]])
except KeyError:
raise ValueError('Unknown OSM type: ' + osm_type)
def get_line_id(road_id, line_index):
return road_id << 4 | line_index
def get_interchange_node_id(min_stop_id):
return 1 << 62 | min_stop_id
def clamp(value, min_value, max_value):
return max(min(value, max_value), min_value)
def get_mercator_point(lat, lon):
lat = clamp(lat, -86.0, 86.0)
sin_x = math.sin(math.radians(lat))
y = math.degrees(0.5 * math.log((1.0 + sin_x) / (1.0 - sin_x)))
y = clamp(y, -180, 180)
return {'x': lon, 'y': y}
class TransitGraphBuilder:
def __init__(self, input_data, transit_colors, points_per_curve=100, alpha=0.5):
self.palette = transit_color_palette.Palette(transit_colors)
self.input_data = input_data
self.points_per_curve = points_per_curve
self.alpha = alpha
self.networks = []
self.lines = []
self.stops = {}
self.interchange_nodes = set()
self.transfers = {}
self.gates = {}
self.edges = []
self.segments = {}
self.shapes = []
self.transit_graph = None
self.matched_colors = {}
self.stop_names = {}
def __get_average_stops_point(self, stop_ids):
"""Returns an average position of the stops."""
count = len(stop_ids)
if count == 0:
raise ValueError('Average stops point calculation failed: the list of stop id is empty.')
average_point = [0, 0]
for stop_id in stop_ids:
point = self.__get_stop(stop_id)['point']
average_point[0] += point['x']
average_point[1] += point['y']
return [average_point[0] / count, average_point[1] / count]
def __add_gate(self, osm_id, is_entrance, is_exit, point, weight, stop_id):
"""Creates a new gate or adds information to the existing with the same weight."""
if (osm_id, weight) in self.gates:
gate_ref = self.gates[(osm_id, weight)]
if stop_id not in gate_ref['stop_ids']:
gate_ref['stop_ids'].append(stop_id)
gate_ref['entrance'] |= is_entrance
gate_ref['exit'] |= is_exit
return
gate = {'osm_id': osm_id,
'point': point,
'weight': weight,
'stop_ids': [stop_id],
'entrance': is_entrance,
'exit': is_exit
}
self.gates[(osm_id, weight)] = gate
def __get_interchange_node(self, stop_id):
"""Returns the existing interchange node or creates a new one."""
for node_stops in self.interchange_nodes:
if stop_id in node_stops:
return node_stops
return (stop_id,)
def __get_stop(self, stop_id):
"""Returns the stop or the interchange node."""
if stop_id in self.stops:
return self.stops[stop_id]
return self.transfers[stop_id]
def __check_line_title(self, line, route_name):
"""Formats correct line name."""
if line['title']:
return
name = route_name if route_name else line['number']
if len(line['stop_ids']) > 1:
first_stop = self.stop_names[line['stop_ids'][0]]
last_stop = self.stop_names[line['stop_ids'][-1]]
if first_stop and last_stop:
line['title'] = u'{0}: {1} - {2}'.format(name, first_stop, last_stop)
return
line['title'] = name
def __read_stops(self):
"""Reads stops, their exits and entrances."""
for stop_item in self.input_data['stops']:
stop = {}
stop['id'] = stop_item['id']
stop['osm_id'] = get_extended_osm_id(stop_item['osm_id'], stop_item['osm_type'])
if 'zone_id' in stop_item:
stop['zone_id'] = stop_item['zone_id']
stop['point'] = get_mercator_point(stop_item['lat'], stop_item['lon'])
stop['line_ids'] = []
# TODO: Save stop names stop_item['name'] and stop_item['int_name'] for text anchors calculation.
stop['title_anchors'] = []
self.stops[stop['id']] = stop
self.stop_names[stop['id']] = stop_item['name']
for entrance_item in stop_item['entrances']:
ex_id = get_extended_osm_id(entrance_item['osm_id'], entrance_item['osm_type'])
point = get_mercator_point(entrance_item['lat'], entrance_item['lon'])
self.__add_gate(ex_id, True, False, point, entrance_item['distance'], stop['id'])
for exit_item in stop_item['exits']:
ex_id = get_extended_osm_id(exit_item['osm_id'], exit_item['osm_type'])
point = get_mercator_point(exit_item['lat'], exit_item['lon'])
self.__add_gate(ex_id, False, True, point, exit_item['distance'], stop['id'])
def __read_transfers(self):
"""Reads transfers between stops."""
for transfer_item in self.input_data['transfers']:
edge = {'stop1_id': transfer_item[0],
'stop2_id': transfer_item[1],
'weight': transfer_item[2],
'transfer': True
}
self.edges.append(copy.deepcopy(edge))
edge['stop1_id'], edge['stop2_id'] = edge['stop2_id'], edge['stop1_id']
self.edges.append(edge)
def __read_networks(self):
"""Reads networks and routes."""
for network_item in self.input_data['networks']:
network_id = network_item['agency_id']
network = {'id': network_id,
'title': network_item['network']}
self.networks.append(network)
for route_item in network_item['routes']:
line_index = 0
# Create a line for each itinerary.
for line_item in route_item['itineraries']:
line_stops = line_item['stops']
line_id = get_line_id(route_item['route_id'], line_index)
line = {'id': line_id,
'title': line_item.get('name', ''),
'type': route_item['type'],
'network_id': network_id,
'number': route_item['ref'],
'interval': line_item['interval'],
'stop_ids': []
}
line['color'] = self.__match_color(route_item.get('colour', ''), route_item.get('casing', ''))
# TODO: Add processing of line_item['shape'] when this data will be available.
# TODO: Add processing of line_item['trip_ids'] when this data will be available.
# Create an edge for each connection of stops.
for i in range(len(line_stops)):
stop1 = line_stops[i]
line['stop_ids'].append(stop1[0])
self.stops[stop1[0]]['line_ids'].append(line_id)
if i + 1 < len(line_stops):
stop2 = line_stops[i + 1]
edge = {'stop1_id': stop1[0],
'stop2_id': stop2[0],
'weight': stop2[1] - stop1[1],
'transfer': False,
'line_id': line_id,
'shape_ids': []
}
self.edges.append(edge)
self.__check_line_title(line, route_item.get('name', ''))
self.lines.append(line)
line_index += 1
def __match_color(self, color_str, casing_str):
if color_str is None or len(color_str) == 0:
return self.palette.get_default_color()
if casing_str is None:
casing_str = ''
matched_colors_key = color_str + "/" + casing_str
if matched_colors_key in self.matched_colors:
return self.matched_colors[matched_colors_key]
c = self.palette.get_nearest_color(color_str, casing_str, self.matched_colors.values())
if c != self.palette.get_default_color():
self.matched_colors[matched_colors_key] = c
return c
def __generate_transfer_nodes(self):
"""Merges stops into transfer nodes."""
for edge in self.edges:
if edge['transfer']:
node1 = self.__get_interchange_node(edge['stop1_id'])
node2 = self.__get_interchange_node(edge['stop2_id'])
merged_node = tuple(sorted(set(node1 + node2)))
self.interchange_nodes.discard(node1)
self.interchange_nodes.discard(node2)
self.interchange_nodes.add(merged_node)
for node_stop_ids in self.interchange_nodes:
point = self.__get_average_stops_point(node_stop_ids)
transfer = {'id': get_interchange_node_id(self.stops[node_stop_ids[0]]['id']),
'stop_ids': list(node_stop_ids),
'point': {'x': point[0], 'y': point[1]},
'title_anchors': []
}
for stop_id in node_stop_ids:
self.stops[stop_id]['transfer_id'] = transfer['id']
self.transfers[transfer['id']] = transfer
def __collect_segments(self):
"""Prepares collection of segments for shapes generation."""
# Each line divided on segments by its stops and transfer nodes.
# Merge equal segments from different lines into a single one and collect adjacent stops of that segment.
# Average positions of these stops will be used as guide points for a curve generation.
for line in self.lines:
prev_seg = None
prev_id1 = None
for i in range(len(line['stop_ids']) - 1):
node1 = self.stops[line['stop_ids'][i]]
node2 = self.stops[line['stop_ids'][i + 1]]
id1 = node1.get('transfer_id', node1['id'])
id2 = node2.get('transfer_id', node2['id'])
if id1 == id2:
continue
seg = tuple(sorted([id1, id2]))
if seg not in self.segments:
self.segments[seg] = {'guide_points': {id1: set(), id2: set()}}
if prev_seg is not None:
self.segments[seg]['guide_points'][id1].add(prev_id1)
self.segments[prev_seg]['guide_points'][id1].add(id2)
prev_seg = seg
prev_id1 = id1
def __generate_shapes_for_segments(self):
"""Generates a curve for each connection of two stops / transfer nodes."""
for (id1, id2), info in self.segments.items():
point1 = [self.__get_stop(id1)['point']['x'], self.__get_stop(id1)['point']['y']]
point2 = [self.__get_stop(id2)['point']['x'], self.__get_stop(id2)['point']['y']]
if info['guide_points'][id1]:
guide1 = self.__get_average_stops_point(info['guide_points'][id1])
else:
guide1 = [2 * point1[0] - point2[0], 2 * point1[1] - point2[1]]
if info['guide_points'][id2]:
guide2 = self.__get_average_stops_point(info['guide_points'][id2])
else:
guide2 = [2 * point2[0] - point1[0], 2 * point2[1] - point1[1]]
curve_points = bezier_curves.segment_to_Catmull_Rom_curve(guide1, point1, point2, guide2,
self.points_per_curve, self.alpha)
info['curve'] = np.array(curve_points)
polyline = []
for point in curve_points:
polyline.append({'x': point[0], 'y': point[1]})
shape = {'id': {'stop1_id': id1, 'stop2_id': id2},
'polyline': polyline}
self.shapes.append(shape)
def __assign_shapes_to_edges(self):
"""Assigns a shape to each non-transfer edge."""
for edge in self.edges:
if not edge['transfer']:
stop1 = self.stops[edge['stop1_id']]
stop2 = self.stops[edge['stop2_id']]
id1 = stop1.get('transfer_id', stop1['id'])
id2 = stop2.get('transfer_id', stop2['id'])
seg = tuple(sorted([id1, id2]))
if seg in self.segments:
edge['shape_ids'].append({'stop1_id': seg[0], 'stop2_id': seg[1]})
def __create_scheme_shapes(self):
self.__collect_segments()
self.__generate_shapes_for_segments()
self.__assign_shapes_to_edges()
def build(self):
if self.transit_graph is not None:
return self.transit_graph
self.__read_stops()
self.__read_transfers()
self.__read_networks()
self.__generate_transfer_nodes()
self.__create_scheme_shapes()
self.transit_graph = {'networks': self.networks,
'lines': self.lines,
'gates': list(self.gates.values()),
'stops': list(self.stops.values()),
'transfers': list(self.transfers.values()),
'shapes': self.shapes,
'edges': self.edges}
return self.transit_graph
def show_preview(self):
import matplotlib.pyplot as plt
for (s1, s2), info in self.segments.items():
plt.plot(info['curve'][:, 0], info['curve'][:, 1], 'g')
for stop in self.stops.values():
if 'transfer_id' in stop:
point = self.transfers[stop['transfer_id']]['point']
size = 60
color = 'r'
else:
point = stop['point']
if len(stop['line_ids']) > 2:
size = 40
color = 'b'
else:
size = 20
color = 'g'
plt.scatter([point['x']], [point['y']], size, color)
plt.show()
def show_color_maching_table(self, title, colors_ref_table):
import matplotlib.pyplot as plt
import matplotlib.patches as patches
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
plt.title(title)
sz = 1.0 / (2.0 * len(self.matched_colors))
delta_y = sz * 0.5
for c in self.matched_colors:
tokens = c.split('/')
if len(tokens[1]) == 0:
tokens[1] = tokens[0]
ax.add_patch(patches.Rectangle((sz, delta_y), sz, sz, facecolor="#" + tokens[0], edgecolor="#" + tokens[1]))
rect_title = tokens[0]
if tokens[0] != tokens[1]:
rect_title += "/" + tokens[1]
ax.text(2.5 * sz, delta_y, rect_title + " -> ")
ref_color = colors_ref_table[self.matched_colors[c]]
ax.add_patch(patches.Rectangle((0.3 + sz, delta_y), sz, sz, facecolor="#" + ref_color))
ax.text(0.3 + 2.5 * sz, delta_y, ref_color + " (" + self.matched_colors[c] + ")")
delta_y += sz * 2.0
plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='input file name of transit data')
parser.add_argument('output_file', nargs='?', help='output file name of generated graph')
default_colors_path = os.path.dirname(os.path.abspath(__file__)) + '/../../../data/transit_colors.txt'
parser.add_argument('-c', '--colors', type=str, default=default_colors_path,
help='transit colors file COLORS_FILE_PATH', metavar='COLORS_FILE_PATH')
parser.add_argument('-p', '--preview', action="store_true", default=False,
help="show preview of the transit scheme")
parser.add_argument('-m', '--matched_colors', action="store_true", default=False,
help="show the matched colors table")
parser.add_argument('-a', '--alpha', type=float, default=0.5, help='the curves generator parameter value ALPHA',
metavar='ALPHA')
parser.add_argument('-n', '--num', type=int, default=100, help='the number NUM of points in a generated curve',
metavar='NUM')
args = parser.parse_args()
with open(args.input_file, 'r') as input_file:
data = json.load(input_file)
with open(args.colors, 'r') as colors_file:
colors = json.load(colors_file)
transit = TransitGraphBuilder(data, colors, args.num, args.alpha)
result = transit.build()
output_file = args.output_file
head, tail = os.path.split(os.path.abspath(args.input_file))
name, extension = os.path.splitext(tail)
if output_file is None:
output_file = os.path.join(head, name + '.transit' + extension)
with open(output_file, 'w') as json_file:
result_data = json.dumps(result, ensure_ascii=False, indent=4, sort_keys=True)
json_file.write(result_data)
print('Transit graph generated:', output_file)
if args.preview:
transit.show_preview()
if args.matched_colors:
colors_ref_table = {}
for color_name, color_info in colors['colors'].items():
colors_ref_table[color_name] = color_info['clear']
transit.show_color_maching_table(name, colors_ref_table)
|
apache-2.0
|
jameshensman/pymc3
|
pymc3/examples/ARM12_6uranium.py
|
14
|
1919
|
import numpy as np
from pymc3 import *
import pandas as pd
data = pd.read_csv(get_data_file('pymc3.examples', 'data/srrs2.dat'))
cty_data = pd.read_csv(get_data_file('pymc3.examples', 'data/cty.dat'))
data = data[data.state == 'MN']
data['fips'] = data.stfips * 1000 + data.cntyfips
cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips
data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))
data = data.merge(cty_data, 'inner', on='fips')
unique = data[['fips']].drop_duplicates()
unique['group'] = np.arange(len(unique))
unique.set_index('fips')
data = data.merge(unique, 'inner', on='fips')
obs_means = data.groupby('fips').lradon.mean()
n = len(obs_means)
lradon = np.array(data.lradon)
floor = np.array(data.floor)
group = np.array(data.group)
ufull = np.array(data.Uppm)
model = Model()
with model:
groupmean = Normal('groupmean', 0, 10. ** -2.)
# as recommended by "Prior distributions for variance parameters in
# hierarchical models"
groupsd = Uniform('groupsd', 0, 10.)
sd = Uniform('sd', 0, 10.)
floor_m = Normal('floor_m', 0, 5. ** -2.)
u_m = Normal('u_m', 0, 5. ** -2)
means = Normal('means', groupmean, groupsd ** -2., shape=n)
lr = Normal('lr', floor * floor_m + means[group] + ufull * u_m, sd ** -
2., observed=lradon)
def run(n=3000):
if n == "short":
n = 50
with model:
start = Point({
'groupmean': obs_means.mean(),
'groupsd_interval': 0,
'sd_interval': 0,
'means': np.array(obs_means),
'u_m': np.array([.72]),
'floor_m': 0.,
})
start = find_MAP(start, model.vars[:-1])
H = model.fastd2logp()
h = np.diag(H(start))
step = HamiltonianMC(model.vars, h)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
|
apache-2.0
|
HarllanAndrye/nilmtk
|
nilmtk/stats/dropoutrateresults.py
|
8
|
1783
|
import matplotlib.pyplot as plt
from ..results import Results
from ..consts import SECS_PER_DAY
class DropoutRateResults(Results):
"""
Attributes
----------
_data : pd.DataFrame
index is start date for the whole chunk
`end` is end date for the whole chunk
`dropout_rate` is float [0,1]
`n_samples` is int, used for calculating weighted mean
"""
name = "dropout_rate"
def combined(self):
"""Calculates weighted average.
Returns
-------
dropout_rate : float, [0,1]
"""
tot_samples = self._data['n_samples'].sum()
proportion = self._data['n_samples'] / tot_samples
dropout_rate = (self._data['dropout_rate'] * proportion).sum()
return dropout_rate
def unify(self, other):
super(DropoutRateResults, self).unify(other)
for i, row in self._data.iterrows():
# store mean of dropout rate
self._data['dropout_rate'].loc[i] += other._data['dropout_rate'].loc[i]
self._data['dropout_rate'].loc[i] /= 2
self._data['n_samples'].loc[i] += other._data['n_samples'].loc[i]
def to_dict(self):
return {'statistics': {'dropout_rate': self.combined()}}
def plot(self, ax=None):
if ax is None:
ax = plt.gca()
ax.xaxis.axis_date()
for index, row in self._data.iterrows():
length = (row['end'] - index).total_seconds() / SECS_PER_DAY
rect = plt.Rectangle((index, 0), # bottom left corner
length,
row['dropout_rate'], # width
color='b')
ax.add_patch(rect)
ax.autoscale_view()
|
apache-2.0
|
cbpygit/pypmj
|
environment_info.py
|
1
|
2793
|
"""Outputs info on the environment in which pypmj runs and other specific
data useful for bug filing to an output text file.
Authors : Carlo Barth
"""
import importlib
import os
import platform
from shutil import rmtree
import sys
# For final clean-up, remember if the logs folder exists in this working
# directory. If so, also remember its content.
LOG_FOLDER_EXISTS = os.path.isdir('logs')
if LOG_FOLDER_EXISTS:
LOG_FOLDER_CONTENTS = os.listdir('logs')
FMT = '{0:20s}: {1}\n'
def fmt(key_, val_):
return FMT.format(key_, val_)
SEP = '\n'+80*'='+'\n'
def main():
fout = open('pypmj_env_info.log', 'w')
fout.write(SEP)
fout.write(' Platform and version data')
fout.write(SEP+'\n')
fout.write(fmt('platform', platform.platform()))
fout.write(fmt('python version', sys.version))
# Check dependency imports and versions
dependencies = ('numpy', 'pandas', 'scipy', 'tables')
dep_versions = {}
missing_dependencies = []
for d in dependencies:
try:
module = importlib.import_module(d)
dep_versions[d] = module.__version__
except ImportError as e:
missing_dependencies.append((d, e))
for d in dep_versions:
fout.write(fmt(d+' version', dep_versions[d]))
if missing_dependencies:
for md in missing_dependencies:
d, e = md
fout.write(fmt(d+'->ImportError', e))
fout.write('\nLeaving.')
fout.close()
return
try:
import pypmj as jpy
except Exception as e:
fout.write(fmt('pypmj->ImportError', e))
fout.write('\nLeaving.')
fout.close()
return
try:
with open(jpy._config.config_file, 'r') as f:
conf_content = f.read()
fout.write(fmt('pypmj version', jpy.__version__))
fout.write(fmt('JCMsuite version', jpy.__jcm_version__))
fout.write(SEP)
fout.write(' The config file')
fout.write(SEP+'\n')
fout.write(conf_content)
fout.write(SEP)
fout.write(' jcm_license_info')
fout.write(SEP)
fout.write('\n{}\n'.format(jpy.jcm_license_info(False,True)))
fout.write(SEP)
fout.close()
except:
fout.close()
# Clean up
if LOG_FOLDER_EXISTS:
if not os.path.isdir('logs'):
return
contents = os.listdir('logs')
for c in contents:
cpath = os.path.join('logs', c)
if not c in LOG_FOLDER_CONTENTS:
if os.path.isdir(cpath):
rmtree(os.path.join('logs', c))
else:
os.remove(cpath)
else:
if os.path.isdir('logs'):
rmtree('logs')
if __name__ == '__main__':
main()
|
gpl-3.0
|
vybstat/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
377
|
3555
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
bsd-3-clause
|
cwhanse/pvlib-python
|
pvlib/ivtools/utils.py
|
3
|
15067
|
"""
The ``pvlib.ivtools.utils.py`` module contains utility functions related to
working with IV curves, or fitting equations to IV curve data.
"""
import numpy as np
import pandas as pd
# A small number used to decide when a slope is equivalent to zero
EPS = np.finfo('float').eps**(1/3)
def _numdiff(x, f):
"""
Compute first and second order derivative using possibly unequally
spaced data.
Parameters
----------
x : numeric
a numpy array of values of x
f : numeric
a numpy array of values of the function f for which derivatives are to
be computed. Must be the same length as x.
Returns
-------
df : numeric
a numpy array of len(x) containing the first derivative of f at each
point x except at the first 2 and last 2 points
df2 : numeric
a numpy array of len(x) containing the second derivative of f at each
point x except at the first 2 and last 2 points.
Notes
-----
``numdiff`` computes first and second order derivatives using a 5th order
formula that accounts for possibly unequally spaced data [1]_. Because a
5th order centered difference formula is used, ``numdiff`` returns NaNs
for the first 2 and last 2 points in the input vector for x. Ported from
PVLib Matlab [2]_.
References
----------
.. [1] M. K. Bowen, R. Smith, "Derivative formulae and errors for
non-uniformly spaced points", Proceedings of the Royal Society A, vol.
461 pp 1975 - 1997, July 2005. DOI: 10.1098/rpsa.2004.1430
.. [2] PVLib MATLAB https://github.com/sandialabs/MATLAB_PV_LIB
"""
n = len(f)
df = np.zeros(n)
df2 = np.zeros(n)
# first two points are special
df[:2] = float("Nan")
df2[:2] = float("Nan")
# Last two points are special
df[-2:] = float("Nan")
df2[-2:] = float("Nan")
# Rest of points. Take reference point to be the middle of each group of 5
# points. Calculate displacements
ff = np.vstack((f[:-4], f[1:-3], f[2:-2], f[3:-1], f[4:])).T
a0 = (np.vstack((x[:-4], x[1:-3], x[2:-2], x[3:-1], x[4:])).T
- np.tile(x[2:-2], [5, 1]).T)
u1 = np.zeros(a0.shape)
left = np.zeros(a0.shape)
u2 = np.zeros(a0.shape)
u1[:, 0] = (
a0[:, 1] * a0[:, 2] * a0[:, 3] + a0[:, 1] * a0[:, 2] * a0[:, 4]
+ a0[:, 1] * a0[:, 3] * a0[:, 4] + a0[:, 2] * a0[:, 3] * a0[:, 4])
u1[:, 1] = (
a0[:, 0] * a0[:, 2] * a0[:, 3] + a0[:, 0] * a0[:, 2] * a0[:, 4]
+ a0[:, 0] * a0[:, 3] * a0[:, 4] + a0[:, 2] * a0[:, 3] * a0[:, 4])
u1[:, 2] = (
a0[:, 0] * a0[:, 1] * a0[:, 3] + a0[:, 0] * a0[:, 1] * a0[:, 4]
+ a0[:, 0] * a0[:, 3] * a0[:, 4] + a0[:, 1] * a0[:, 3] * a0[:, 4])
u1[:, 3] = (
a0[:, 0] * a0[:, 1] * a0[:, 2] + a0[:, 0] * a0[:, 1] * a0[:, 4]
+ a0[:, 0] * a0[:, 2] * a0[:, 4] + a0[:, 1] * a0[:, 2] * a0[:, 4])
u1[:, 4] = (
a0[:, 0] * a0[:, 1] * a0[:, 2] + a0[:, 0] * a0[:, 1] * a0[:, 3]
+ a0[:, 0] * a0[:, 2] * a0[:, 3] + a0[:, 1] * a0[:, 2] * a0[:, 3])
left[:, 0] = (a0[:, 0] - a0[:, 1]) * (a0[:, 0] - a0[:, 2]) * \
(a0[:, 0] - a0[:, 3]) * (a0[:, 0] - a0[:, 4])
left[:, 1] = (a0[:, 1] - a0[:, 0]) * (a0[:, 1] - a0[:, 2]) * \
(a0[:, 1] - a0[:, 3]) * (a0[:, 1] - a0[:, 4])
left[:, 2] = (a0[:, 2] - a0[:, 0]) * (a0[:, 2] - a0[:, 1]) * \
(a0[:, 2] - a0[:, 3]) * (a0[:, 2] - a0[:, 4])
left[:, 3] = (a0[:, 3] - a0[:, 0]) * (a0[:, 3] - a0[:, 1]) * \
(a0[:, 3] - a0[:, 2]) * (a0[:, 3] - a0[:, 4])
left[:, 4] = (a0[:, 4] - a0[:, 0]) * (a0[:, 4] - a0[:, 1]) * \
(a0[:, 4] - a0[:, 2]) * (a0[:, 4] - a0[:, 3])
df[2:-2] = np.sum(-(u1 / left) * ff, axis=1)
# second derivative
u2[:, 0] = (
a0[:, 1] * a0[:, 2] + a0[:, 1] * a0[:, 3] + a0[:, 1] * a0[:, 4]
+ a0[:, 2] * a0[:, 3] + a0[:, 2] * a0[:, 4] + a0[:, 3] * a0[:, 4])
u2[:, 1] = (
a0[:, 0] * a0[:, 2] + a0[:, 0] * a0[:, 3] + a0[:, 0] * a0[:, 4]
+ a0[:, 2] * a0[:, 3] + a0[:, 2] * a0[:, 4] + a0[:, 3] * a0[:, 4])
u2[:, 2] = (
a0[:, 0] * a0[:, 1] + a0[:, 0] * a0[:, 3] + a0[:, 0] * a0[:, 4]
+ a0[:, 1] * a0[:, 3] + a0[:, 1] * a0[:, 3] + a0[:, 3] * a0[:, 4])
u2[:, 3] = (
a0[:, 0] * a0[:, 1] + a0[:, 0] * a0[:, 2] + a0[:, 0] * a0[:, 4]
+ a0[:, 1] * a0[:, 2] + a0[:, 1] * a0[:, 4] + a0[:, 2] * a0[:, 4])
u2[:, 4] = (
a0[:, 0] * a0[:, 1] + a0[:, 0] * a0[:, 2] + a0[:, 0] * a0[:, 3]
+ a0[:, 1] * a0[:, 2] + a0[:, 1] * a0[:, 4] + a0[:, 2] * a0[:, 3])
df2[2:-2] = 2. * np.sum(u2 * ff, axis=1)
return df, df2
def rectify_iv_curve(voltage, current, decimals=None):
"""
Sort the IV curve data, remove NaNs and negative
values, and combine points with duplicate voltage.
Parameters
----------
voltage : numeric [V]
current : numeric [A]
decimals : int or None, default None
number of decimal places to which voltage is rounded to remove
duplicated points. If None, no rounding is done.
Returns
-------
voltage : numeric [V]
current : numeric [A]
Notes
-----
``rectify_iv_curve`` ensures that the IV curve lies in the first quadrant
of the (voltage, current) plane. The returned IV curve:
* increases in voltage
* contains no negative current or voltage values
* contains no NaNs
* contains no points with duplicate voltage values. Where voltage
values are repeated, a single data point is substituted with current
equal to the average of current at duplicated voltages.
"""
df = pd.DataFrame(data=np.vstack((voltage, current)).T, columns=['v', 'i'])
# restrict to first quadrant
df.dropna(inplace=True)
df = df[(df['v'] >= 0) & (df['i'] >= 0)]
# sort pairs on voltage, then current
df = df.sort_values(by=['v', 'i'], ascending=[True, False])
# eliminate duplicate voltage points
if decimals is not None:
df['v'] = np.round(df['v'], decimals=decimals)
_, inv = np.unique(df['v'], return_inverse=True)
df.index = inv
# average current at each common voltage
df = df.groupby(by=inv).mean()
tmp = np.array(df).T
return tmp[0, ], tmp[1, ]
def _schumaker_qspline(x, y):
"""
Fit a quadratic spline which preserves monotonicity and
convexity in the data.
Parameters
----------
x : numeric
independent points between which the spline will interpolate.
y : numeric
dependent points between which the spline will interpolate.
Returns
-------
t : array
an ordered vector of knots, i.e., X values where the spline
changes coefficients. All values in x are used as knots.
The algorithm may insert additional knots between data points in x
where changes in convexity are indicated by the (numerical)
derivative. Consequently len(t) >= len(x).
c : array
a Nx3 matrix of coefficients where the kth row defines the quadratic
interpolant between t_k and t_(k+1), i.e., y = c[i, 0] *
(x - t_k)^2 + c[i, 1] * (x - t_k) + c[i, 2]
yhat : array
y values corresponding to the knots in t. Contains the original
data points, y, and also y values estimated from the spline at the
inserted knots.
kflag : array
a vector of len(t) of logicals, which are set to true for
elements of t that are knots inserted by the algorithm.
Notes
-----
Algorithm is taken from [1]_, which relies on prior work described in [2]_.
Ported from PVLib Matlab [3]_.
References
----------
.. [1] L. L. Schumaker, "On Shape Preserving Quadratic Spline
Interpolation", SIAM Journal on Numerical Analysis 20(4), August 1983,
pp 854 - 864
.. [2] M. H. Lam, "Monotone and Convex Quadratic Spline Interpolation",
Virginia Journal of Science 41(1), Spring 1990
.. [3] PVLib MATLAB https://github.com/sandialabs/MATLAB_PV_LIB
"""
# Make sure vectors are 1D arrays
x = x.flatten()
y = y.flatten()
n = x.size
# compute various values used by the algorithm: differences, length of line
# segments between data points, and ratios of differences.
delx = np.diff(x) # delx[i] = x[i + 1] - x[i]
dely = np.diff(y)
delta = dely / delx
# Calculate first derivative at each x value per [3]
s = np.zeros_like(x)
left = np.append(0.0, delta)
right = np.append(delta, 0.0)
pdelta = left * right
u = pdelta > 0
# [3], Eq. 9 for interior points
# fix tuning parameters in [2], Eq 9 at chi = .5 and eta = .5
s[u] = pdelta[u] / (0.5*left[u] + 0.5*right[u])
# [3], Eq. 7 for left endpoint
left_end = 2.0 * delta[0] - s[1]
if delta[0] * left_end > 0:
s[0] = left_end
# [3], Eq. 8 for right endpoint
right_end = 2.0 * delta[-1] - s[-2]
if delta[-1] * right_end > 0:
s[-1] = right_end
# determine knots. Start with initial points x
# [2], Algorithm 4.1 first 'if' condition of step 5 defines intervals
# which won't get internal knots
tests = s[:-1] + s[1:]
u = np.isclose(tests, 2.0 * delta, atol=EPS)
# u = true for an interval which will not get an internal knot
k = n + sum(~u) # total number of knots = original data + inserted knots
# set up output arrays
# knot locations, first n - 1 and very last (n + k) are original data
xk = np.zeros(k)
yk = np.zeros(k) # function values at knot locations
# logicals that will indicate where additional knots are inserted
flag = np.zeros(k, dtype=bool)
a = np.zeros((k, 3))
# structures needed to compute coefficients, have to be maintained in
# association with each knot
tmpx = x[:-1]
tmpy = y[:-1]
tmpx2 = x[1:]
tmps = s[:-1]
tmps2 = s[1:]
diffs = np.diff(s)
# structure to contain information associated with each knot, used to
# calculate coefficients
uu = np.zeros((k, 6))
uu[:(n - 1), :] = np.array([tmpx, tmpx2, tmpy, tmps, tmps2, delta]).T
# [2], Algorithm 4.1 subpart 1 of Step 5
# original x values that are left points of intervals without internal
# knots
# MATLAB differs from NumPy, boolean indices must be same size as
# array
xk[:(n - 1)][u] = tmpx[u]
yk[:(n - 1)][u] = tmpy[u]
# constant term for each polynomial for intervals without knots
a[:(n - 1), 2][u] = tmpy[u]
a[:(n - 1), 1][u] = s[:-1][u]
a[:(n - 1), 0][u] = 0.5 * diffs[u] / delx[u] # leading coefficients
# [2], Algorithm 4.1 subpart 2 of Step 5
# original x values that are left points of intervals with internal knots
xk[:(n-1)][~u] = tmpx[~u]
yk[:(n-1)][~u] = tmpy[~u]
aa = s[:-1] - delta
b = s[1:] - delta
sbar = np.zeros(k)
eta = np.zeros(k)
# will contain mapping from the left points of intervals containing an
# added knot to each inverval's internal knot value
xi = np.zeros(k)
t0 = aa * b >= 0
# first 'else' in Algorithm 4.1 Step 5
v = np.logical_and(~u, t0) # len(u) == (n - 1) always
q = np.sum(v) # number of this type of knot to add
if q > 0.:
xk[(n - 1):(n + q - 1)] = .5 * (tmpx[v] + tmpx2[v]) # knot location
uu[(n - 1):(n + q - 1), :] = np.array([tmpx[v], tmpx2[v], tmpy[v],
tmps[v], tmps2[v], delta[v]]).T
xi[:(n-1)][v] = xk[(n - 1):(n + q - 1)]
t1 = np.abs(aa) > np.abs(b)
w = np.logical_and(~u, ~v) # second 'else' in Algorithm 4.1 Step 5
w = np.logical_and(w, t1)
r = np.sum(w)
if r > 0.:
xk[(n + q - 1):(n + q + r - 1)] = tmpx2[w] + aa[w] * delx[w] / diffs[w]
uu[(n + q - 1):(n + q + r - 1), :] = np.array([tmpx[w], tmpx2[w],
tmpy[w], tmps[w],
tmps2[w], delta[w]]).T
xi[:(n - 1)][w] = xk[(n + q - 1):(n + q + r - 1)]
z = np.logical_and(~u, ~v) # last 'else' in Algorithm 4.1 Step 5
z = np.logical_and(z, ~w)
ss = np.sum(z)
if ss > 0.:
xk[(n + q + r - 1):(n + q + r + ss - 1)] = \
tmpx[z] + b[z] * delx[z] / diffs[z]
uu[(n + q + r - 1):(n + q + r + ss - 1), :] = \
np.array([tmpx[z], tmpx2[z], tmpy[z], tmps[z], tmps2[z],
delta[z]]).T
xi[:(n-1)][z] = xk[(n + q + r - 1):(n + q + r + ss - 1)]
# define polynomial coefficients for intervals with added knots
ff = ~u
sbar[:(n-1)][ff] = (
(2 * uu[:(n - 1), 5][ff] - uu[:(n-1), 4][ff])
+ (uu[:(n - 1), 4][ff] - uu[:(n-1), 3][ff])
* (xi[:(n - 1)][ff] - uu[:(n-1), 0][ff])
/ (uu[:(n - 1), 1][ff] - uu[:(n-1), 0][ff]))
eta[:(n-1)][ff] = (
(sbar[:(n - 1)][ff] - uu[:(n-1), 3][ff])
/ (xi[:(n - 1)][ff] - uu[:(n-1), 0][ff]))
sbar[(n - 1):(n + q + r + ss - 1)] = \
(2 * uu[(n - 1):(n + q + r + ss - 1), 5] -
uu[(n - 1):(n + q + r + ss - 1), 4]) + \
(uu[(n - 1):(n + q + r + ss - 1), 4] -
uu[(n - 1):(n + q + r + ss - 1), 3]) * \
(xk[(n - 1):(n + q + r + ss - 1)] -
uu[(n - 1):(n + q + r + ss - 1), 0]) / \
(uu[(n - 1):(n + q + r + ss - 1), 1] -
uu[(n - 1):(n + q + r + ss - 1), 0])
eta[(n - 1):(n + q + r + ss - 1)] = \
(sbar[(n - 1):(n + q + r + ss - 1)] -
uu[(n - 1):(n + q + r + ss - 1), 3]) / \
(xk[(n - 1):(n + q + r + ss - 1)] -
uu[(n - 1):(n + q + r + ss - 1), 0])
# constant term for polynomial for intervals with internal knots
a[:(n - 1), 2][~u] = uu[:(n - 1), 2][~u]
a[:(n - 1), 1][~u] = uu[:(n - 1), 3][~u]
a[:(n - 1), 0][~u] = 0.5 * eta[:(n - 1)][~u] # leading coefficient
a[(n - 1):(n + q + r + ss - 1), 2] = \
uu[(n - 1):(n + q + r + ss - 1), 2] + \
uu[(n - 1):(n + q + r + ss - 1), 3] * \
(xk[(n - 1):(n + q + r + ss - 1)] -
uu[(n - 1):(n + q + r + ss - 1), 0]) + \
.5 * eta[(n - 1):(n + q + r + ss - 1)] * \
(xk[(n - 1):(n + q + r + ss - 1)] -
uu[(n - 1):(n + q + r + ss - 1), 0]) ** 2.
a[(n - 1):(n + q + r + ss - 1), 1] = sbar[(n - 1):(n + q + r + ss - 1)]
a[(n - 1):(n + q + r + ss - 1), 0] = \
.5 * (uu[(n - 1):(n + q + r + ss - 1), 4] -
sbar[(n - 1):(n + q + r + ss - 1)]) / \
(uu[(n - 1):(n + q + r + ss - 1), 1] -
uu[(n - 1):(n + q + r + ss - 1), 0])
yk[(n - 1):(n + q + r + ss - 1)] = a[(n - 1):(n + q + r + ss - 1), 2]
xk[n + q + r + ss - 1] = x[n - 1]
yk[n + q + r + ss - 1] = y[n - 1]
flag[(n - 1):(n + q + r + ss - 1)] = True # these are all inserted knots
tmp = np.vstack((xk, a.T, yk, flag)).T
# sort output in terms of increasing x (original plus added knots)
tmp2 = tmp[tmp[:, 0].argsort(kind='mergesort')]
t = tmp2[:, 0]
outn = len(t)
c = tmp2[0:(outn - 1), 1:4]
yhat = tmp2[:, 4]
kflag = tmp2[:, 5]
return t, c, yhat, kflag
|
bsd-3-clause
|
LogicWang/ml
|
deep/tf/mnist/simple_mnist.py
|
1
|
4276
|
#!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
synthicity/orca
|
orca/orca.py
|
1
|
60782
|
# Orca
# Copyright (C) 2016 UrbanSim Inc.
# See full license in LICENSE.
from __future__ import print_function
try:
from inspect import getfullargspec as getargspec
except ImportError:
from inspect import getargspec
import logging
import time
import warnings
from collections import namedtuple
try:
from collections.abc import Callable
except ImportError: # Python 2.7
from collections import Callable
from contextlib import contextmanager
from functools import wraps
import pandas as pd
import tables
import tlz as tz
from . import utils
from .utils.logutil import log_start_finish
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
logger = logging.getLogger(__name__)
_TABLES = {}
_COLUMNS = {}
_STEPS = {}
_BROADCASTS = {}
_INJECTABLES = {}
_CACHING = True
_TABLE_CACHE = {}
_COLUMN_CACHE = {}
_INJECTABLE_CACHE = {}
_MEMOIZED = {}
_CS_FOREVER = 'forever'
_CS_ITER = 'iteration'
_CS_STEP = 'step'
CacheItem = namedtuple('CacheItem', ['name', 'value', 'scope'])
def clear_all():
"""
Clear any and all stored state from Orca.
"""
_TABLES.clear()
_COLUMNS.clear()
_STEPS.clear()
_BROADCASTS.clear()
_INJECTABLES.clear()
_TABLE_CACHE.clear()
_COLUMN_CACHE.clear()
_INJECTABLE_CACHE.clear()
for m in _MEMOIZED.values():
m.value.clear_cached()
_MEMOIZED.clear()
logger.debug('pipeline state cleared')
def clear_cache(scope=None):
"""
Clear all cached data.
Parameters
----------
scope : {None, 'step', 'iteration', 'forever'}, optional
Clear cached values with a given scope.
By default all cached values are removed.
"""
if not scope:
_TABLE_CACHE.clear()
_COLUMN_CACHE.clear()
_INJECTABLE_CACHE.clear()
for m in _MEMOIZED.values():
m.value.clear_cached()
logger.debug('pipeline cache cleared')
else:
for d in (_TABLE_CACHE, _COLUMN_CACHE, _INJECTABLE_CACHE):
items = tz.valfilter(lambda x: x.scope == scope, d)
for k in items:
del d[k]
for m in tz.filter(lambda x: x.scope == scope, _MEMOIZED.values()):
m.value.clear_cached()
logger.debug('cleared cached values with scope {!r}'.format(scope))
def enable_cache():
"""
Allow caching of registered variables that explicitly have
caching enabled.
"""
global _CACHING
_CACHING = True
def disable_cache():
"""
Turn off caching across Orca, even for registered variables
that have caching enabled.
"""
global _CACHING
_CACHING = False
def cache_on():
"""
Whether caching is currently enabled or disabled.
Returns
-------
on : bool
True if caching is enabled.
"""
return _CACHING
@contextmanager
def cache_disabled():
turn_back_on = True if cache_on() else False
disable_cache()
yield
if turn_back_on:
enable_cache()
# for errors that occur during Orca runs
class OrcaError(Exception):
pass
class DataFrameWrapper(object):
"""
Wraps a DataFrame so it can provide certain columns and handle
computed columns.
Parameters
----------
name : str
Name for the table.
frame : pandas.DataFrame
copy_col : bool, optional
Whether to return copies when evaluating columns.
Attributes
----------
name : str
Table name.
copy_col : bool
Whether to return copies when evaluating columns.
local : pandas.DataFrame
The wrapped DataFrame.
"""
def __init__(self, name, frame, copy_col=True):
self.name = name
self.local = frame
self.copy_col = copy_col
@property
def columns(self):
"""
Columns in this table.
"""
return self.local_columns + list_columns_for_table(self.name)
@property
def local_columns(self):
"""
Columns that are part of the wrapped DataFrame.
"""
return list(self.local.columns)
@property
def index(self):
"""
Table index.
"""
return self.local.index
def to_frame(self, columns=None):
"""
Make a DataFrame with the given columns.
Will always return a copy of the underlying table.
Parameters
----------
columns : sequence or string, optional
Sequence of the column names desired in the DataFrame. A string
can also be passed if only one column is desired.
If None all columns are returned, including registered columns.
Returns
-------
frame : pandas.DataFrame
"""
extra_cols = _columns_for_table(self.name)
if columns is not None:
columns = [columns] if isinstance(columns, str) else columns
columns = set(columns)
set_extra_cols = set(extra_cols)
local_cols = set(self.local.columns) & columns - set_extra_cols
df = self.local[list(local_cols)].copy()
extra_cols = {k: extra_cols[k] for k in (columns & set_extra_cols)}
else:
df = self.local.copy()
with log_start_finish(
'computing {!r} columns for table {!r}'.format(
len(extra_cols), self.name),
logger):
for name, col in extra_cols.items():
with log_start_finish(
'computing column {!r} for table {!r}'.format(
name, self.name),
logger):
df[name] = col()
return df
def update_col(self, column_name, series):
"""
Add or replace a column in the underlying DataFrame.
Parameters
----------
column_name : str
Column to add or replace.
series : pandas.Series or sequence
Column data.
"""
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
self.local[column_name] = series
def __setitem__(self, key, value):
return self.update_col(key, value)
def get_column(self, column_name):
"""
Returns a column as a Series.
Parameters
----------
column_name : str
Returns
-------
column : pandas.Series
"""
with log_start_finish(
'getting single column {!r} from table {!r}'.format(
column_name, self.name),
logger):
extra_cols = _columns_for_table(self.name)
if column_name in extra_cols:
with log_start_finish(
'computing column {!r} for table {!r}'.format(
column_name, self.name),
logger):
column = extra_cols[column_name]()
else:
column = self.local[column_name]
if self.copy_col:
return column.copy()
else:
return column
def __getitem__(self, key):
return self.get_column(key)
def __getattr__(self, key):
return self.get_column(key)
def column_type(self, column_name):
"""
Report column type as one of 'local', 'series', or 'function'.
Parameters
----------
column_name : str
Returns
-------
col_type : {'local', 'series', 'function'}
'local' means that the column is part of the registered table,
'series' means the column is a registered Pandas Series,
and 'function' means the column is a registered function providing
a Pandas Series.
"""
extra_cols = list_columns_for_table(self.name)
if column_name in extra_cols:
col = _COLUMNS[(self.name, column_name)]
if isinstance(col, _SeriesWrapper):
return 'series'
elif isinstance(col, _ColumnFuncWrapper):
return 'function'
elif column_name in self.local_columns:
return 'local'
raise KeyError('column {!r} not found'.format(column_name))
def update_col_from_series(self, column_name, series, cast=False):
"""
Update existing values in a column from another series.
Index values must match in both column and series. Optionally
casts data type to match the existing column.
Parameters
---------------
column_name : str
series : panas.Series
cast: bool, optional, default False
"""
logger.debug('updating column {!r} in table {!r}'.format(
column_name, self.name))
col_dtype = self.local[column_name].dtype
if series.dtype != col_dtype:
if cast:
series = series.astype(col_dtype)
else:
err_msg = "Data type mismatch, existing:{}, update:{}"
err_msg = err_msg.format(col_dtype, series.dtype)
raise ValueError(err_msg)
self.local.loc[series.index, column_name] = series
def __len__(self):
return len(self.local)
def clear_cached(self):
"""
Remove cached results from this table's computed columns.
"""
_TABLE_CACHE.pop(self.name, None)
for col in _columns_for_table(self.name).values():
col.clear_cached()
logger.debug('cleared cached columns for table {!r}'.format(self.name))
class TableFuncWrapper(object):
"""
Wrap a function that provides a DataFrame.
Parameters
----------
name : str
Name for the table.
func : callable
Callable that returns a DataFrame.
cache : bool, optional
Whether to cache the results of calling the wrapped function.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
copy_col : bool, optional
Whether to return copies when evaluating columns.
Attributes
----------
name : str
Table name.
cache : bool
Whether caching is enabled for this table.
copy_col : bool
Whether to return copies when evaluating columns.
"""
def __init__(
self, name, func, cache=False, cache_scope=_CS_FOREVER,
copy_col=True):
self.name = name
self._func = func
self._argspec = getargspec(func)
self.cache = cache
self.cache_scope = cache_scope
self.copy_col = copy_col
self._columns = []
self._index = None
self._len = 0
@property
def columns(self):
"""
Columns in this table. (May contain only computed columns
if the wrapped function has not been called yet.)
"""
return self._columns + list_columns_for_table(self.name)
@property
def local_columns(self):
"""
Only the columns contained in the DataFrame returned by the
wrapped function. (No registered columns included.)
"""
if self._columns:
return self._columns
else:
self._call_func()
return self._columns
@property
def index(self):
"""
Index of the underlying table. Will be None if that index is
unknown.
"""
return self._index
def _call_func(self):
"""
Call the wrapped function and return the result wrapped by
DataFrameWrapper.
Also updates attributes like columns, index, and length.
"""
if _CACHING and self.cache and self.name in _TABLE_CACHE:
logger.debug('returning table {!r} from cache'.format(self.name))
return _TABLE_CACHE[self.name].value
with log_start_finish(
'call function to get frame for table {!r}'.format(
self.name),
logger):
kwargs = _collect_variables(names=self._argspec.args,
expressions=self._argspec.defaults)
frame = self._func(**kwargs)
self._columns = list(frame.columns)
self._index = frame.index
self._len = len(frame)
wrapped = DataFrameWrapper(self.name, frame, copy_col=self.copy_col)
if self.cache:
_TABLE_CACHE[self.name] = CacheItem(
self.name, wrapped, self.cache_scope)
return wrapped
def __call__(self):
return self._call_func()
def to_frame(self, columns=None):
"""
Make a DataFrame with the given columns.
Will always return a copy of the underlying table.
Parameters
----------
columns : sequence, optional
Sequence of the column names desired in the DataFrame.
If None all columns are returned.
Returns
-------
frame : pandas.DataFrame
"""
return self._call_func().to_frame(columns)
def get_column(self, column_name):
"""
Returns a column as a Series.
Parameters
----------
column_name : str
Returns
-------
column : pandas.Series
"""
frame = self._call_func()
return DataFrameWrapper(self.name, frame,
copy_col=self.copy_col).get_column(column_name)
def __getitem__(self, key):
return self.get_column(key)
def __getattr__(self, key):
return self.get_column(key)
def __len__(self):
return self._len
def column_type(self, column_name):
"""
Report column type as one of 'local', 'series', or 'function'.
Parameters
----------
column_name : str
Returns
-------
col_type : {'local', 'series', 'function'}
'local' means that the column is part of the registered table,
'series' means the column is a registered Pandas Series,
and 'function' means the column is a registered function providing
a Pandas Series.
"""
extra_cols = list_columns_for_table(self.name)
if column_name in extra_cols:
col = _COLUMNS[(self.name, column_name)]
if isinstance(col, _SeriesWrapper):
return 'series'
elif isinstance(col, _ColumnFuncWrapper):
return 'function'
elif column_name in self.local_columns:
return 'local'
raise KeyError('column {!r} not found'.format(column_name))
def clear_cached(self):
"""
Remove this table's cached result and that of associated columns.
"""
_TABLE_CACHE.pop(self.name, None)
for col in _columns_for_table(self.name).values():
col.clear_cached()
logger.debug(
'cleared cached result and cached columns for table {!r}'.format(
self.name))
def func_source_data(self):
"""
Return data about the wrapped function source, including file name,
line number, and source code.
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str
"""
return utils.func_source_data(self._func)
class _ColumnFuncWrapper(object):
"""
Wrap a function that returns a Series.
Parameters
----------
table_name : str
Table with which the column will be associated.
column_name : str
Name for the column.
func : callable
Should return a Series that has an
index matching the table to which it is being added.
cache : bool, optional
Whether to cache the result of calling the wrapped function.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
Attributes
----------
name : str
Column name.
table_name : str
Name of table this column is associated with.
cache : bool
Whether caching is enabled for this column.
"""
def __init__(
self, table_name, column_name, func, cache=False,
cache_scope=_CS_FOREVER):
self.table_name = table_name
self.name = column_name
self._func = func
self._argspec = getargspec(func)
self.cache = cache
self.cache_scope = cache_scope
def __call__(self):
"""
Evaluate the wrapped function and return the result.
"""
if (_CACHING and
self.cache and
(self.table_name, self.name) in _COLUMN_CACHE):
logger.debug(
'returning column {!r} for table {!r} from cache'.format(
self.name, self.table_name))
return _COLUMN_CACHE[(self.table_name, self.name)].value
with log_start_finish(
('call function to provide column {!r} for table {!r}'
).format(self.name, self.table_name), logger):
kwargs = _collect_variables(names=self._argspec.args,
expressions=self._argspec.defaults)
col = self._func(**kwargs)
if self.cache:
_COLUMN_CACHE[(self.table_name, self.name)] = CacheItem(
(self.table_name, self.name), col, self.cache_scope)
return col
def clear_cached(self):
"""
Remove any cached result of this column.
"""
x = _COLUMN_CACHE.pop((self.table_name, self.name), None)
if x is not None:
logger.debug(
'cleared cached value for column {!r} in table {!r}'.format(
self.name, self.table_name))
def func_source_data(self):
"""
Return data about the wrapped function source, including file name,
line number, and source code.
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str
"""
return utils.func_source_data(self._func)
class _SeriesWrapper(object):
"""
Wrap a Series for the purpose of giving it the same interface as a
`_ColumnFuncWrapper`.
Parameters
----------
table_name : str
Table with which the column will be associated.
column_name : str
Name for the column.
series : pandas.Series
Series with index matching the table to which it is being added.
Attributes
----------
name : str
Column name.
table_name : str
Name of table this column is associated with.
"""
def __init__(self, table_name, column_name, series):
self.table_name = table_name
self.name = column_name
self._column = series
def __call__(self):
return self._column
def clear_cached(self):
"""
Here for compatibility with `_ColumnFuncWrapper`.
"""
pass
class _InjectableFuncWrapper(object):
"""
Wraps a function that will provide an injectable value elsewhere.
Parameters
----------
name : str
func : callable
cache : bool, optional
Whether to cache the result of calling the wrapped function.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
Attributes
----------
name : str
Name of this injectable.
cache : bool
Whether caching is enabled for this injectable function.
"""
def __init__(self, name, func, cache=False, cache_scope=_CS_FOREVER):
self.name = name
self._func = func
self._argspec = getargspec(func)
self.cache = cache
self.cache_scope = cache_scope
def __call__(self):
if _CACHING and self.cache and self.name in _INJECTABLE_CACHE:
logger.debug(
'returning injectable {!r} from cache'.format(self.name))
return _INJECTABLE_CACHE[self.name].value
with log_start_finish(
'call function to provide injectable {!r}'.format(self.name),
logger):
kwargs = _collect_variables(names=self._argspec.args,
expressions=self._argspec.defaults)
result = self._func(**kwargs)
if self.cache:
_INJECTABLE_CACHE[self.name] = CacheItem(
self.name, result, self.cache_scope)
return result
def clear_cached(self):
"""
Clear a cached result for this injectable.
"""
x = _INJECTABLE_CACHE.pop(self.name, None)
if x:
logger.debug(
'injectable {!r} removed from cache'.format(self.name))
class _StepFuncWrapper(object):
"""
Wrap a step function for argument matching.
Parameters
----------
step_name : str
func : callable
Attributes
----------
name : str
Name of step.
"""
def __init__(self, step_name, func):
self.name = step_name
self._func = func
self._argspec = getargspec(func)
def __call__(self):
with log_start_finish('calling step {!r}'.format(self.name), logger):
kwargs = _collect_variables(names=self._argspec.args,
expressions=self._argspec.defaults)
return self._func(**kwargs)
def _tables_used(self):
"""
Tables injected into the step.
Returns
-------
tables : set of str
"""
args = list(self._argspec.args)
if self._argspec.defaults:
default_args = list(self._argspec.defaults)
else:
default_args = []
# Combine names from argument names and argument default values.
names = args[:len(args) - len(default_args)] + default_args
tables = set()
for name in names:
parent_name = name.split('.')[0]
if is_table(parent_name):
tables.add(parent_name)
return tables
def func_source_data(self):
"""
Return data about a step function's source, including file name,
line number, and source code.
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str
"""
return utils.func_source_data(self._func)
def is_table(name):
"""
Returns whether a given name refers to a registered table.
"""
return name in _TABLES
def list_tables():
"""
List of table names.
"""
return list(_TABLES.keys())
def list_columns():
"""
List of (table name, registered column name) pairs.
"""
return list(_COLUMNS.keys())
def list_steps():
"""
List of registered step names.
"""
return list(_STEPS.keys())
def list_injectables():
"""
List of registered injectables.
"""
return list(_INJECTABLES.keys())
def list_broadcasts():
"""
List of registered broadcasts as (cast table name, onto table name).
"""
return list(_BROADCASTS.keys())
def is_expression(name):
"""
Checks whether a given name is a simple variable name or a compound
variable expression.
Parameters
----------
name : str
Returns
-------
is_expr : bool
"""
return '.' in name
def _collect_variables(names, expressions=None):
"""
Map labels and expressions to registered variables.
Handles argument matching.
Example:
_collect_variables(names=['zones', 'zone_id'],
expressions=['parcels.zone_id'])
Would return a dict representing:
{'parcels': <DataFrameWrapper for zones>,
'zone_id': <pandas.Series for parcels.zone_id>}
Parameters
----------
names : list of str
List of registered variable names and/or labels.
If mixing names and labels, labels must come at the end.
expressions : list of str, optional
List of registered variable expressions for labels defined
at end of `names`. Length must match the number of labels.
Returns
-------
variables : dict
Keys match `names`. Values correspond to registered variables,
which may be wrappers or evaluated functions if appropriate.
"""
# Map registered variable labels to expressions.
if not expressions:
expressions = []
offset = len(names) - len(expressions)
labels_map = dict(tz.concatv(
zip(names[:offset], names[:offset]),
zip(names[offset:], expressions)))
all_variables = tz.merge(_INJECTABLES, _TABLES)
variables = {}
for label, expression in labels_map.items():
# In the future, more registered variable expressions could be
# supported. Currently supports names of registered variables
# and references to table columns.
if '.' in expression:
# Registered variable expression refers to column.
table_name, column_name = expression.split('.')
table = get_table(table_name)
variables[label] = table.get_column(column_name)
else:
thing = all_variables[expression]
if isinstance(thing, (_InjectableFuncWrapper, TableFuncWrapper)):
# Registered variable object is function.
variables[label] = thing()
else:
variables[label] = thing
return variables
def add_table(
table_name, table, cache=False, cache_scope=_CS_FOREVER,
copy_col=True):
"""
Register a table with Orca.
Parameters
----------
table_name : str
Should be globally unique to this table.
table : pandas.DataFrame or function
If a function, the function should return a DataFrame.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
cache : bool, optional
Whether to cache the results of a provided callable. Does not
apply if `table` is a DataFrame.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
copy_col : bool, optional
Whether to return copies when evaluating columns.
Returns
-------
wrapped : `DataFrameWrapper` or `TableFuncWrapper`
"""
if isinstance(table, Callable):
table = TableFuncWrapper(table_name, table, cache=cache,
cache_scope=cache_scope, copy_col=copy_col)
else:
table = DataFrameWrapper(table_name, table, copy_col=copy_col)
# clear any cached data from a previously registered table
table.clear_cached()
logger.debug('registering table {!r}'.format(table_name))
_TABLES[table_name] = table
return table
def table(
table_name=None, cache=False, cache_scope=_CS_FOREVER, copy_col=True):
"""
Decorates functions that return DataFrames.
Decorator version of `add_table`. Table name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
"""
def decorator(func):
if table_name:
name = table_name
else:
name = func.__name__
add_table(
name, func, cache=cache, cache_scope=cache_scope,
copy_col=copy_col)
return func
return decorator
def get_raw_table(table_name):
"""
Get a wrapped table by name and don't do anything to it.
Parameters
----------
table_name : str
Returns
-------
table : DataFrameWrapper or TableFuncWrapper
"""
if is_table(table_name):
return _TABLES[table_name]
else:
raise KeyError('table not found: {}'.format(table_name))
def get_table(table_name):
"""
Get a registered table.
Decorated functions will be converted to `DataFrameWrapper`.
Parameters
----------
table_name : str
Returns
-------
table : `DataFrameWrapper`
"""
table = get_raw_table(table_name)
if isinstance(table, TableFuncWrapper):
table = table()
return table
def table_type(table_name):
"""
Returns the type of a registered table.
The type can be either "dataframe" or "function".
Parameters
----------
table_name : str
Returns
-------
table_type : {'dataframe', 'function'}
"""
table = get_raw_table(table_name)
if isinstance(table, DataFrameWrapper):
return 'dataframe'
elif isinstance(table, TableFuncWrapper):
return 'function'
def add_column(
table_name, column_name, column, cache=False, cache_scope=_CS_FOREVER):
"""
Add a new column to a table from a Series or callable.
Parameters
----------
table_name : str
Table with which the column will be associated.
column_name : str
Name for the column.
column : pandas.Series or callable
Series should have an index matching the table to which it
is being added. If a callable, the function's argument
names and keyword argument values will be matched to
registered variables when the function needs to be
evaluated by Orca. The function should return a Series.
cache : bool, optional
Whether to cache the results of a provided callable. Does not
apply if `column` is a Series.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
"""
if isinstance(column, Callable):
column = \
_ColumnFuncWrapper(
table_name, column_name, column,
cache=cache, cache_scope=cache_scope)
else:
column = _SeriesWrapper(table_name, column_name, column)
# clear any cached data from a previously registered column
column.clear_cached()
logger.debug('registering column {!r} on table {!r}'.format(
column_name, table_name))
_COLUMNS[(table_name, column_name)] = column
return column
def column(table_name, column_name=None, cache=False, cache_scope=_CS_FOREVER):
"""
Decorates functions that return a Series.
Decorator version of `add_column`. Series index must match
the named table. Column name defaults to name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
The index of the returned Series must match the named table.
"""
def decorator(func):
if column_name:
name = column_name
else:
name = func.__name__
add_column(
table_name, name, func, cache=cache, cache_scope=cache_scope)
return func
return decorator
def list_columns_for_table(table_name):
"""
Return a list of all the extra columns registered for a given table.
Parameters
----------
table_name : str
Returns
-------
columns : list of str
"""
return [cname for tname, cname in _COLUMNS.keys() if tname == table_name]
def _columns_for_table(table_name):
"""
Return all of the columns registered for a given table.
Parameters
----------
table_name : str
Returns
-------
columns : dict of column wrappers
Keys will be column names.
"""
return {cname: col
for (tname, cname), col in _COLUMNS.items()
if tname == table_name}
def column_map(tables, columns):
"""
Take a list of tables and a list of column names and resolve which
columns come from which table.
Parameters
----------
tables : sequence of _DataFrameWrapper or _TableFuncWrapper
Could also be sequence of modified pandas.DataFrames, the important
thing is that they have ``.name`` and ``.columns`` attributes.
columns : sequence of str
The column names of interest.
Returns
-------
col_map : dict
Maps table names to lists of column names.
"""
if not columns:
return {t.name: None for t in tables}
columns = set(columns)
colmap = {
t.name: list(set(t.columns).intersection(columns)) for t in tables}
foundcols = tz.reduce(
lambda x, y: x.union(y), (set(v) for v in colmap.values()))
if foundcols != columns:
raise RuntimeError('Not all required columns were found. '
'Missing: {}'.format(list(columns - foundcols)))
return colmap
def get_raw_column(table_name, column_name):
"""
Get a wrapped, registered column.
This function cannot return columns that are part of wrapped
DataFrames, it's only for columns registered directly through Orca.
Parameters
----------
table_name : str
column_name : str
Returns
-------
wrapped : _SeriesWrapper or _ColumnFuncWrapper
"""
try:
return _COLUMNS[(table_name, column_name)]
except KeyError:
raise KeyError('column {!r} not found for table {!r}'.format(
column_name, table_name))
def _memoize_function(f, name, cache_scope=_CS_FOREVER):
"""
Wraps a function for memoization and ties it's cache into the
Orca cacheing system.
Parameters
----------
f : function
name : str
Name of injectable.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
"""
cache = {}
@wraps(f)
def wrapper(*args, **kwargs):
try:
cache_key = (
args or None, frozenset(kwargs.items()) if kwargs else None)
in_cache = cache_key in cache
except TypeError:
raise TypeError(
'function arguments must be hashable for memoization')
if _CACHING and in_cache:
return cache[cache_key]
else:
result = f(*args, **kwargs)
cache[cache_key] = result
return result
wrapper.__wrapped__ = f
wrapper.cache = cache
wrapper.clear_cached = lambda: cache.clear()
_MEMOIZED[name] = CacheItem(name, wrapper, cache_scope)
return wrapper
def add_injectable(
name, value, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
"""
Add a value that will be injected into other functions.
Parameters
----------
name : str
value
If a callable and `autocall` is True then the function's
argument names and keyword argument values will be matched
to registered variables when the function needs to be
evaluated by Orca. The return value will
be passed to any functions using this injectable. In all other
cases, `value` will be passed through untouched.
autocall : bool, optional
Set to True to have injectable functions automatically called
(with argument matching) and the result injected instead of
the function itself.
cache : bool, optional
Whether to cache the return value of an injectable function.
Only applies when `value` is a callable and `autocall` is True.
cache_scope : {'step', 'iteration', 'forever'}, optional
Scope for which to cache data. Default is to cache forever
(or until manually cleared). 'iteration' caches data for each
complete iteration of the pipeline, 'step' caches data for
a single step of the pipeline.
memoize : bool, optional
If autocall is False it is still possible to cache function results
by setting this flag to True. Cached values are stored in a dictionary
keyed by argument values, so the argument values must be hashable.
Memoized functions have their caches cleared according to the same
rules as universal caching.
"""
if isinstance(value, Callable):
if autocall:
value = _InjectableFuncWrapper(
name, value, cache=cache, cache_scope=cache_scope)
# clear any cached data from a previously registered value
value.clear_cached()
elif not autocall and memoize:
value = _memoize_function(value, name, cache_scope=cache_scope)
logger.debug('registering injectable {!r}'.format(name))
_INJECTABLES[name] = value
def injectable(
name=None, autocall=True, cache=False, cache_scope=_CS_FOREVER,
memoize=False):
"""
Decorates functions that will be injected into other functions.
Decorator version of `add_injectable`. Name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
"""
def decorator(func):
if name:
n = name
else:
n = func.__name__
add_injectable(
n, func, autocall=autocall, cache=cache, cache_scope=cache_scope,
memoize=memoize)
return func
return decorator
def is_injectable(name):
"""
Checks whether a given name can be mapped to an injectable.
"""
return name in _INJECTABLES
def get_raw_injectable(name):
"""
Return a raw, possibly wrapped injectable.
Parameters
----------
name : str
Returns
-------
inj : _InjectableFuncWrapper or object
"""
if is_injectable(name):
return _INJECTABLES[name]
else:
raise KeyError('injectable not found: {!r}'.format(name))
def injectable_type(name):
"""
Classify an injectable as either 'variable' or 'function'.
Parameters
----------
name : str
Returns
-------
inj_type : {'variable', 'function'}
If the injectable is an automatically called function or any other
type of callable the type will be 'function', all other injectables
will be have type 'variable'.
"""
inj = get_raw_injectable(name)
if isinstance(inj, (_InjectableFuncWrapper, Callable)):
return 'function'
else:
return 'variable'
def get_injectable(name):
"""
Get an injectable by name. *Does not* evaluate wrapped functions.
Parameters
----------
name : str
Returns
-------
injectable
Original value or evaluated value of an _InjectableFuncWrapper.
"""
i = get_raw_injectable(name)
return i() if isinstance(i, _InjectableFuncWrapper) else i
def get_injectable_func_source_data(name):
"""
Return data about an injectable function's source, including file name,
line number, and source code.
Parameters
----------
name : str
Returns
-------
filename : str
lineno : int
The line number on which the function starts.
source : str
"""
if injectable_type(name) != 'function':
raise ValueError('injectable {!r} is not a function'.format(name))
inj = get_raw_injectable(name)
if isinstance(inj, _InjectableFuncWrapper):
return utils.func_source_data(inj._func)
elif hasattr(inj, '__wrapped__'):
return utils.func_source_data(inj.__wrapped__)
else:
return utils.func_source_data(inj)
def add_step(step_name, func):
"""
Add a step function to Orca.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
Parameters
----------
step_name : str
func : callable
"""
if isinstance(func, Callable):
logger.debug('registering step {!r}'.format(step_name))
_STEPS[step_name] = _StepFuncWrapper(step_name, func)
else:
raise TypeError('func must be a callable')
def step(step_name=None):
"""
Decorates functions that will be called by the `run` function.
Decorator version of `add_step`. step name defaults to
name of function.
The function's argument names and keyword argument values
will be matched to registered variables when the function
needs to be evaluated by Orca.
The argument name "iter_var" may be used to have the current
iteration variable injected.
"""
def decorator(func):
if step_name:
name = step_name
else:
name = func.__name__
add_step(name, func)
return func
return decorator
def is_step(step_name):
"""
Check whether a given name refers to a registered step.
"""
return step_name in _STEPS
def get_step(step_name):
"""
Get a wrapped step by name.
Parameters
----------
"""
if is_step(step_name):
return _STEPS[step_name]
else:
raise KeyError('no step named {}'.format(step_name))
Broadcast = namedtuple(
'Broadcast',
['cast', 'onto', 'cast_on', 'onto_on', 'cast_index', 'onto_index'])
def broadcast(cast, onto, cast_on=None, onto_on=None,
cast_index=False, onto_index=False):
"""
Register a rule for merging two tables by broadcasting one onto
the other.
Parameters
----------
cast, onto : str
Names of registered tables.
cast_on, onto_on : str, optional
Column names used for merge, equivalent of ``left_on``/``right_on``
parameters of pandas.merge.
cast_index, onto_index : bool, optional
Whether to use table indexes for merge. Equivalent of
``left_index``/``right_index`` parameters of pandas.merge.
"""
logger.debug(
'registering broadcast of table {!r} onto {!r}'.format(cast, onto))
_BROADCASTS[(cast, onto)] = \
Broadcast(cast, onto, cast_on, onto_on, cast_index, onto_index)
def _get_broadcasts(tables):
"""
Get the broadcasts associated with a set of tables.
Parameters
----------
tables : sequence of str
Table names for which broadcasts have been registered.
Returns
-------
casts : dict of `Broadcast`
Keys are tuples of strings like (cast_name, onto_name).
"""
tables = set(tables)
casts = tz.keyfilter(
lambda x: x[0] in tables and x[1] in tables, _BROADCASTS)
if tables - set(tz.concat(casts.keys())):
raise ValueError('Not enough links to merge all tables.')
return casts
def is_broadcast(cast_name, onto_name):
"""
Checks whether a relationship exists for broadcast `cast_name`
onto `onto_name`.
"""
return (cast_name, onto_name) in _BROADCASTS
def get_broadcast(cast_name, onto_name):
"""
Get a single broadcast.
Broadcasts are stored data about how to do a Pandas join.
A Broadcast object is a namedtuple with these attributes:
- cast: the name of the table being broadcast
- onto: the name of the table onto which "cast" is broadcast
- cast_on: The optional name of a column on which to join.
None if the table index will be used instead.
- onto_on: The optional name of a column on which to join.
None if the table index will be used instead.
- cast_index: True if the table index should be used for the join.
- onto_index: True if the table index should be used for the join.
Parameters
----------
cast_name : str
The name of the table being braodcast.
onto_name : str
The name of the table onto which `cast_name` is broadcast.
Returns
-------
broadcast : Broadcast
"""
if is_broadcast(cast_name, onto_name):
return _BROADCASTS[(cast_name, onto_name)]
else:
raise KeyError(
'no rule found for broadcasting {!r} onto {!r}'.format(
cast_name, onto_name))
# utilities for merge_tables
def _all_reachable_tables(t):
"""
A generator that provides all the names of tables that can be
reached via merges starting at the given target table.
"""
for k, v in t.items():
for tname in _all_reachable_tables(v):
yield tname
yield k
def _recursive_getitem(d, key):
"""
Descend into a dict of dicts to return the one that contains
a given key. Every value in the dict must be another dict.
"""
if key in d:
return d
else:
for v in d.values():
return _recursive_getitem(v, key)
else:
raise KeyError('Key not found: {}'.format(key))
def _dict_value_to_pairs(d):
"""
Takes the first value of a dictionary (which it self should be
a dictionary) and turns it into a series of {key: value} dicts.
For example, _dict_value_to_pairs({'c': {'a': 1, 'b': 2}}) will yield
{'a': 1} and {'b': 2}.
"""
d = d[tz.first(d)]
for k, v in d.items():
yield {k: v}
def _is_leaf_node(merge_node):
"""
Returns True for dicts like {'a': {}}.
"""
return len(merge_node) == 1 and not next(iter(merge_node.values()))
def _next_merge(merge_node):
"""
Gets a node that has only leaf nodes below it. This table and
the ones below are ready to be merged to make a new leaf node.
"""
if all(_is_leaf_node(d) for d in _dict_value_to_pairs(merge_node)):
return merge_node
else:
for d in tz.remove(_is_leaf_node, _dict_value_to_pairs(merge_node)):
return _next_merge(d)
else:
raise OrcaError('No node found for next merge.')
def merge_tables(target, tables, columns=None, drop_intersection=True):
"""
Merge a number of tables onto a target table. Tables must have
registered merge rules via the `broadcast` function.
Parameters
----------
target : str, DataFrameWrapper, or TableFuncWrapper
Name of the table (or wrapped table) onto which tables will be merged.
tables : list of `DataFrameWrapper`, `TableFuncWrapper`, or str
All of the tables to merge. Should include the target table.
columns : list of str, optional
If given, columns will be mapped to `tables` and only those columns
will be requested from each table. The final merged table will have
only these columns. By default all columns are used from every
table.
drop_intersection : bool
If True, keep the left most occurence of any column name if it occurs
on more than one table. This prevents getting back the same column
with suffixes applied by pd.merge. If false, columns names will be
suffixed with the table names - e.g. zone_id_buildings and
zone_id_parcels.
Returns
-------
merged : pandas.DataFrame
"""
# allow target to be string or table wrapper
if isinstance(target, (DataFrameWrapper, TableFuncWrapper)):
target = target.name
# allow tables to be strings or table wrappers
tables = [get_table(t)
if not isinstance(t, (DataFrameWrapper, TableFuncWrapper)) else t
for t in tables]
merges = {t.name: {} for t in tables}
tables = {t.name: t for t in tables}
casts = _get_broadcasts(tables.keys())
logger.debug(
'attempting to merge tables {} to target table {}'.format(
tables.keys(), target))
# relate all the tables by registered broadcasts
for table, onto in casts:
merges[onto][table] = merges[table]
merges = {target: merges[target]}
# verify that all the tables can be merged to the target
all_tables = set(_all_reachable_tables(merges))
if all_tables != set(tables.keys()):
raise RuntimeError(
('Not all tables can be merged to target "{}". Unlinked tables: {}'
).format(target, list(set(tables.keys()) - all_tables)))
# add any columns necessary for indexing into other tables
# during merges
if columns:
columns = list(columns)
for c in casts.values():
if c.onto_on:
columns.append(c.onto_on)
if c.cast_on:
columns.append(c.cast_on)
# get column map for which columns go with which table
colmap = column_map(tables.values(), columns)
# get frames
frames = {name: t.to_frame(columns=colmap[name])
for name, t in tables.items()}
past_intersections = set()
# perform merges until there's only one table left
while merges[target]:
nm = _next_merge(merges)
onto = tz.first(nm)
onto_table = frames[onto]
# loop over all the tables that can be broadcast onto
# the onto_table and merge them all in.
for cast in nm[onto]:
cast_table = frames[cast]
bc = casts[(cast, onto)]
with log_start_finish(
'merge tables {} and {}'.format(onto, cast), logger):
intersection = set(onto_table.columns).\
intersection(cast_table.columns)
# intersection is ok if it's the join key
intersection.discard(bc.onto_on)
intersection.discard(bc.cast_on)
# otherwise drop so as not to create conflicts
if drop_intersection:
cast_table = cast_table.drop(intersection, axis=1)
else:
# add suffix to past intersections which wouldn't get
# picked up by the merge - these we have to rename by hand
renames = dict(zip(
past_intersections,
[c+'_'+onto for c in past_intersections]
))
onto_table = onto_table.rename(columns=renames)
# keep track of past intersections in case there's an odd
# number of intersections
past_intersections = past_intersections.union(intersection)
onto_table = pd.merge(
onto_table, cast_table,
suffixes=['_'+onto, '_'+cast],
left_on=bc.onto_on, right_on=bc.cast_on,
left_index=bc.onto_index, right_index=bc.cast_index)
# replace the existing table with the merged one
frames[onto] = onto_table
# free up space by dropping the cast table
del frames[cast]
# mark the onto table as having no more things to broadcast
# onto it.
_recursive_getitem(merges, onto)[onto] = {}
logger.debug('finished merge')
return frames[target]
def get_step_table_names(steps):
"""
Returns a list of table names injected into the provided steps.
Parameters
----------
steps: list of str
Steps to gather table inputs from.
Returns
-------
list of str
"""
table_names = set()
for s in steps:
table_names |= get_step(s)._tables_used()
return list(table_names)
def write_tables(fname, table_names=None, prefix=None, compress=False, local=False):
"""
Writes tables to a pandas.HDFStore file.
Parameters
----------
fname : str
File name for HDFStore. Will be opened in append mode and closed
at the end of this function.
table_names: list of str, optional, default None
List of tables to write. If None, all registered tables will
be written.
prefix: str
If not None, used to prefix the output table names so that
multiple iterations can go in the same file.
compress: boolean
Whether to compress output file using standard HDF5-readable
zlib compression, default False.
"""
if table_names is None:
table_names = list_tables()
tables = (get_table(t) for t in table_names)
key_template = '{}/{{}}'.format(prefix) if prefix is not None else '{}'
# set compression options to zlib level-1 if compress arg is True
complib = compress and 'zlib' or None
complevel = compress and 1 or 0
with pd.HDFStore(fname, mode='a', complib=complib, complevel=complevel) as store:
for t in tables:
# if local arg is True, store only local columns
columns = None
if local is True:
columns = t.local_columns
store[key_template.format(t.name)] = t.to_frame(columns=columns)
iter_step = namedtuple('iter_step', 'step_num,step_name')
def run(steps, iter_vars=None, data_out=None, out_interval=1,
out_base_tables=None, out_run_tables=None, compress=False,
out_base_local=True, out_run_local=True):
"""
Run steps in series, optionally repeatedly over some sequence.
The current iteration variable is set as a global injectable
called ``iter_var``.
Parameters
----------
steps : list of str
List of steps to run identified by their name.
iter_vars : iterable, optional
The values of `iter_vars` will be made available as an injectable
called ``iter_var`` when repeatedly running `steps`.
data_out : str, optional
An optional filename to which all tables injected into any step
in `steps` will be saved every `out_interval` iterations.
File will be a pandas HDF data store.
out_interval : int, optional
Iteration interval on which to save data to `data_out`. For example,
2 will save out every 2 iterations, 5 every 5 iterations.
Default is every iteration.
The results of the first and last iterations are always included.
The input (base) tables are also included and prefixed with `base/`,
these represent the state of the system before any steps have been
executed.
The interval is defined relative to the first iteration. For example,
a run begining in 2015 with an out_interval of 2, will write out
results for 2015, 2017, etc.
out_base_tables: list of str, optional, default None
List of base tables to write. If not provided, tables injected
into 'steps' will be written.
out_run_tables: list of str, optional, default None
List of run tables to write. If not provided, tables injected
into 'steps' will be written.
compress: boolean, optional, default False
Whether to compress output file using standard HDF5 zlib compression.
Compression yields much smaller files using slightly more CPU.
out_base_local: boolean, optional, default True
For tables in out_base_tables, whether to store only local columns (True)
or both, local and computed columns (False).
out_run_local: boolean, optional, default True
For tables in out_run_tables, whether to store only local columns (True)
or both, local and computed columns (False).
"""
iter_vars = iter_vars or [None]
max_i = len(iter_vars)
# get the tables to write out
if out_base_tables is None or out_run_tables is None:
step_tables = get_step_table_names(steps)
if out_base_tables is None:
out_base_tables = step_tables
if out_run_tables is None:
out_run_tables = step_tables
# write out the base (inputs)
if data_out:
add_injectable('iter_var', iter_vars[0])
write_tables(data_out, out_base_tables, 'base', compress=compress, local=out_base_local)
# run the steps
for i, var in enumerate(iter_vars, start=1):
add_injectable('iter_var', var)
if var is not None:
print('Running iteration {} with iteration value {!r}'.format(
i, var))
logger.debug(
'running iteration {} with iteration value {!r}'.format(
i, var))
t1 = time.time()
for j, step_name in enumerate(steps):
add_injectable('iter_step', iter_step(j, step_name))
print('Running step {!r}'.format(step_name))
with log_start_finish(
'run step {!r}'.format(step_name), logger,
logging.INFO):
step = get_step(step_name)
t2 = time.time()
step()
print("Time to execute step '{}': {:.2f} s".format(
step_name, time.time() - t2))
clear_cache(scope=_CS_STEP)
print(
('Total time to execute iteration {} '
'with iteration value {!r}: '
'{:.2f} s').format(i, var, time.time() - t1))
# write out the results for the current iteration
if data_out:
if (i - 1) % out_interval == 0 or i == max_i:
write_tables(data_out, out_run_tables, var, compress=compress, local=out_run_local)
clear_cache(scope=_CS_ITER)
@contextmanager
def injectables(**kwargs):
"""
Temporarily add injectables to the pipeline environment.
Takes only keyword arguments.
Injectables will be returned to their original state when the context
manager exits.
"""
global _INJECTABLES
original = _INJECTABLES.copy()
_INJECTABLES.update(kwargs)
yield
_INJECTABLES = original
@contextmanager
def temporary_tables(**kwargs):
"""
Temporarily set DataFrames as registered tables.
Tables will be returned to their original state when the context
manager exits. Caching is not enabled for tables registered via
this function.
"""
global _TABLES
original = _TABLES.copy()
for k, v in kwargs.items():
if not isinstance(v, pd.DataFrame):
raise ValueError('tables only accepts DataFrames')
add_table(k, v)
yield
_TABLES = original
def eval_variable(name, **kwargs):
"""
Execute a single variable function registered with Orca
and return the result. Any keyword arguments are temporarily set
as injectables. This gives the value as would be injected into a function.
Parameters
----------
name : str
Name of variable to evaluate.
Use variable expressions to specify columns.
Returns
-------
object
For injectables and columns this directly returns whatever
object is returned by the registered function.
For tables this returns a DataFrameWrapper as if the table
had been injected into a function.
"""
with injectables(**kwargs):
vars = _collect_variables([name], [name])
return vars[name]
def eval_step(name, **kwargs):
"""
Evaluate a step as would be done within the pipeline environment
and return the result. Any keyword arguments are temporarily set
as injectables.
Parameters
----------
name : str
Name of step to run.
Returns
-------
object
Anything returned by a step. (Though note that in Orca runs
return values from steps are ignored.)
"""
with injectables(**kwargs):
return get_step(name)()
|
bsd-3-clause
|
aasensio/hierarchicalQuietSun
|
doPlot.py
|
1
|
3989
|
import numpy as np
import matplotlib.pyplot as pl
import scipy.special as sp
from matplotlib.ticker import MaxNLocator
from scipy.integrate import simps
import scipy.signal as sg
def logNormalAvgPrior(x, mu, sigma):
pf = np.zeros(len(x))
for i in range(len(x)):
logy = -np.log(sigma) - np.log(x[i]) - (np.log(x[i]) - mu)**2 / (2.0*sigma**2)
pf[i] = np.mean(np.exp(logy))
return pf
def betaAvgPrior(x, alpha, beta, left, right):
Beta = sp.beta(alpha, beta)
pf = np.zeros(len(x))
for i in range(len(x)):
ylog = ( (1.0-alpha-beta) * np.log(right-left) - (sp.gammaln(alpha) + sp.gammaln(beta) - sp.gammaln(alpha+beta)) +
(alpha-1.0) * np.log(x[i] - left) + (beta-1.0) * np.log(right - x[i]) )
pf[i] = np.mean(np.exp(ylog))
return pf
samples = np.load('samplesHyperPar.npy')
ch = samples.T
ch = ch[:,np.random.permutation(ch.shape[1])]
for i in range(4):
ch[i,:] = sg.medfilt(ch[i,:],kernel_size=7)
pl.close('all')
fig1 = pl.figure(num=1, figsize=(17,8))
pl.clf()
loop = 1
nTicks = 5
labels = [r'$\alpha_B$',r'$\beta_B$',r'$\alpha_\mu$',r'$\beta_\mu$',r'$\alpha_f$',r'$\beta_f$']
bellotOrozco = np.loadtxt('bellot_orozco.dat')
for i in range(4):
ax = fig1.add_subplot(2,5,loop)
ax.plot(ch[i,:], color='#969696')
ax.set_xlabel('Iteration')
ax.set_ylabel(labels[i])
ax.xaxis.set_major_locator(MaxNLocator(nTicks))
loop += 1
ax = fig1.add_subplot(2,5,loop)
ax.hist(ch[i,:], color='#507FED', normed=True, bins=20)
#ax.hist(ch[i,:], color='#507FED', normed=True, cumulative=True, alpha=0.5)
ax.set_xlabel(labels[i])
ax.set_ylabel('p('+labels[i]+'|D)')
ax.xaxis.set_major_locator(MaxNLocator(nTicks))
loop += 1
print np.mean(ch[i,:]), np.std(ch[i,:])
if ((i+1) % 2 == 0):
loop += 1
EX = np.exp(ch[0,:]+ch[1,:]**2/2.0)
EX2 = np.sqrt(np.exp(2.0*ch[0,:]+2.0*ch[1,:]**2))
print "E(B)={0} +- {1}".format(np.mean(EX),np.std(EX))
print "sqrt(E(B^2))={0} +- {1}".format(np.mean(EX2),np.std(EX2))
EX = np.mean((ch[2,:]-ch[3,:])/(ch[2,:]+ch[3,:]))
# Magnetic field strength
B = np.linspace(0.1,800,500)
pB = np.zeros(500)
alpha = ch[0,:]
beta = ch[1,:]
pB = logNormalAvgPrior(B, alpha, beta)
ax = fig1.add_subplot(2,5,5)
ax.plot(B,pB, color='#507FED', linewidth=2)
#pBTypeII = IGAvgPrior(B, np.mean(ch[0,:]), np.mean(ch[1,:]))
#ax.plot(B,pBTypeII, '--', color='#969696', linewidth=2)
ax.set_xlabel(r'B [G]')
ax.set_ylabel(r'$\langle$ p(B|D) $\rangle$')
ax.xaxis.set_major_locator(MaxNLocator(nTicks))
# Inclination
left = -1.0
right = 1.0
mu = np.linspace(left + 1e-2,right - 1e-2,100)
pmu = np.zeros(100)
alpha = ch[2,:]
beta = ch[3,:]
pmu = betaAvgPrior(mu, alpha, beta, left, right)
ax = fig1.add_subplot(2,5,10)
ax.plot(mu,pmu, color='#507FED', linewidth=2)
#pBTypeII = betaAvgPrior(mu, np.mean(ch[2,:]), np.mean(ch[3,:]), left, right)
#ax.plot(mu,pBTypeII, '--', color='#969696', linewidth=2)
ax.set_xlabel(r'$\mu$')
ax.set_ylabel(r'$\langle$ p($\mu$|D) $\rangle$')
ax.xaxis.set_major_locator(MaxNLocator(nTicks))
# Filling factor
#left = 0.0
#right = 1.0
#f = np.linspace(left + 1e-4, right - 1e-4, 100)
#pf = np.zeros(100)
#alpha = ch[4,:]
#beta = ch[5,:]
#pf = betaAvgPrior(f, alpha, beta, left, right)
#ax = fig1.add_subplot(3,5,15)
#ax.plot(f,pf)
#ax.set_xlabel('f')
#ax.set_ylabel('p(f)')
#ax.xaxis.set_major_locator(MaxNLocator(6))
fig1.tight_layout()
fig1.savefig("posteriorHyper.pdf")
pl.close('all')
theta = np.linspace(0,180,100)
fig = pl.figure(num=1)
ax = fig.add_subplot(111)
normaliz = simps(np.sin(theta*np.pi/180.0) * pmu[::-1], x=theta)
ax.plot(theta, np.sin(theta*np.pi/180.0) * pmu[::-1] / normaliz, color='#507FED', linewidth=2)
normaliz = simps(np.sin(theta*np.pi/180.0), x=theta)
ax.plot(theta, np.sin(theta*np.pi/180.0) / normaliz, color='#969696', linewidth=2)
ax.plot(bellotOrozco[:,0], bellotOrozco[:,1], '--', color='#969696', linewidth=2)
ax.set_xlabel(r'$\theta$ [deg]')
ax.set_ylabel(r'$\langle$ p($\theta$|D) $\rangle$')
ax.axvline(90.0,color='k',linestyle='--', linewidth=2)
fig.savefig("posteriorInclination.pdf")
|
mit
|
webmasterraj/FogOrNot
|
flask/lib/python2.7/site-packages/pandas/io/data.py
|
2
|
44585
|
"""
Module contains tools for collecting data from various remote sources
"""
import warnings
import tempfile
import datetime as dt
import time
from collections import defaultdict
import numpy as np
from pandas.compat import(
StringIO, bytes_to_str, range, lmap, zip
)
import pandas.compat as compat
from pandas import Panel, DataFrame, Series, read_csv, concat, to_datetime, DatetimeIndex, DateOffset
from pandas.core.common import is_list_like, PandasError
from pandas.io.common import urlopen, ZipFile, urlencode
from pandas.tseries.offsets import MonthEnd
from pandas.util.testing import _network_error_classes
from pandas.io.html import read_html
class SymbolWarning(UserWarning):
pass
class RemoteDataError(PandasError, IOError):
pass
def DataReader(name, data_source=None, start=None, end=None,
retry_count=3, pause=0.001):
"""
Imports data from a number of online sources.
Currently supports Yahoo! Finance, Google Finance, St. Louis FED (FRED)
and Kenneth French's data library.
Parameters
----------
name : str or list of strs
the name of the dataset. Some data sources (yahoo, google, fred) will
accept a list of names.
data_source: str
the data source ("yahoo", "google", "fred", or "ff")
start : {datetime, None}
left boundary for range (defaults to 1/1/2010)
end : {datetime, None}
right boundary for range (defaults to today)
Examples
----------
# Data from Yahoo! Finance
gs = DataReader("GS", "yahoo")
# Data from Google Finance
aapl = DataReader("AAPL", "google")
# Data from FRED
vix = DataReader("VIXCLS", "fred")
# Data from Fama/French
ff = DataReader("F-F_Research_Data_Factors", "famafrench")
ff = DataReader("F-F_Research_Data_Factors_weekly", "famafrench")
ff = DataReader("6_Portfolios_2x3", "famafrench")
ff = DataReader("F-F_ST_Reversal_Factor", "famafrench")
"""
start, end = _sanitize_dates(start, end)
if data_source == "yahoo":
return get_data_yahoo(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "google":
return get_data_google(symbols=name, start=start, end=end,
adjust_price=False, chunksize=25,
retry_count=retry_count, pause=pause)
elif data_source == "fred":
return get_data_fred(name, start, end)
elif data_source == "famafrench":
return get_data_famafrench(name)
def _sanitize_dates(start, end):
from pandas.core.datetools import to_datetime
start = to_datetime(start)
end = to_datetime(end)
if start is None:
start = dt.datetime(2010, 1, 1)
if end is None:
end = dt.datetime.today()
return start, end
def _in_chunks(seq, size):
"""
Return sequence in 'chunks' of size defined by size
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
_yahoo_codes = {'symbol': 's', 'last': 'l1', 'change_pct': 'p2', 'PE': 'r',
'time': 't1', 'short_ratio': 's7'}
_YAHOO_QUOTE_URL = 'http://finance.yahoo.com/d/quotes.csv?'
def get_quote_yahoo(symbols):
"""
Get current yahoo quote
Returns a DataFrame
"""
if isinstance(symbols, compat.string_types):
sym_list = symbols
else:
sym_list = '+'.join(symbols)
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
request = ''.join(compat.itervalues(_yahoo_codes)) # code request string
header = list(_yahoo_codes.keys())
data = defaultdict(list)
url_str = _YAHOO_QUOTE_URL + 's=%s&f=%s' % (sym_list, request)
with urlopen(url_str) as url:
lines = url.readlines()
for line in lines:
fields = line.decode('utf-8').strip().split(',')
for i, field in enumerate(fields):
if field[-2:] == '%"':
v = float(field.strip('"%'))
elif field[0] == '"':
v = field.strip('"')
else:
try:
v = float(field)
except ValueError:
v = field
data[header[i]].append(v)
idx = data.pop('symbol')
return DataFrame(data, index=idx)
def get_quote_google(symbols):
raise NotImplementedError("Google Finance doesn't have this functionality")
def _retry_read_url(url, retry_count, pause, name):
for _ in range(retry_count):
time.sleep(pause)
# kludge to close the socket ASAP
try:
with urlopen(url) as resp:
lines = resp.read()
except _network_error_classes:
pass
else:
rs = read_csv(StringIO(bytes_to_str(lines)), index_col=0,
parse_dates=True, na_values='-')[::-1]
# Yahoo! Finance sometimes does this awesome thing where they
# return 2 rows for the most recent business day
if len(rs) > 2 and rs.index[-1] == rs.index[-2]: # pragma: no cover
rs = rs[:-1]
#Get rid of unicode characters in index name.
try:
rs.index.name = rs.index.name.decode('unicode_escape').encode('ascii', 'ignore')
except AttributeError:
#Python 3 string has no decode method.
rs.index.name = rs.index.name.encode('ascii', 'ignore').decode()
return rs
raise IOError("after %d tries, %s did not "
"return a 200 for url %r" % (retry_count, name, url))
_HISTORICAL_YAHOO_URL = 'http://ichart.finance.yahoo.com/table.csv?'
def _get_hist_yahoo(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from yahoo.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
url = (_HISTORICAL_YAHOO_URL + 's=%s' % sym +
'&a=%s' % (start.month - 1) +
'&b=%s' % start.day +
'&c=%s' % start.year +
'&d=%s' % (end.month - 1) +
'&e=%s' % end.day +
'&f=%s' % end.year +
'&g=%s' % interval +
'&ignore=.csv')
return _retry_read_url(url, retry_count, pause, 'Yahoo!')
_HISTORICAL_GOOGLE_URL = 'http://www.google.com/finance/historical?'
def _get_hist_google(sym, start, end, interval, retry_count, pause):
"""
Get historical data for the given name from google.
Date format is datetime
Returns a DataFrame.
"""
start, end = _sanitize_dates(start, end)
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
url = "%s%s" % (_HISTORICAL_GOOGLE_URL,
urlencode({"q": sym,
"startdate": start.strftime('%b %d, ' '%Y'),
"enddate": end.strftime('%b %d, %Y'),
"output": "csv"}))
return _retry_read_url(url, retry_count, pause, 'Google')
def _adjust_prices(hist_data, price_list=None):
"""
Return modifed DataFrame or Panel with adjusted prices based on
'Adj Close' price. Adds 'Adj_Ratio' column.
"""
if price_list is None:
price_list = 'Open', 'High', 'Low', 'Close'
adj_ratio = hist_data['Adj Close'] / hist_data['Close']
data = hist_data.copy()
for item in price_list:
data[item] = hist_data[item] * adj_ratio
data['Adj_Ratio'] = adj_ratio
del data['Adj Close']
return data
def _calc_return_index(price_df):
"""
Return a returns index from a input price df or series. Initial value
(typically NaN) is set to 1.
"""
df = price_df.pct_change().add(1).cumprod()
mask = df.ix[1].notnull() & df.ix[0].isnull()
df.ix[0][mask] = 1
# Check for first stock listings after starting date of index in ret_index
# If True, find first_valid_index and set previous entry to 1.
if (~mask).any():
for sym in mask.index[~mask]:
tstamp = df[sym].first_valid_index()
t_idx = df.index.get_loc(tstamp) - 1
df[sym].ix[t_idx] = 1
return df
_YAHOO_COMPONENTS_URL = 'http://download.finance.yahoo.com/d/quotes.csv?'
def get_components_yahoo(idx_sym):
"""
Returns DataFrame containing list of component information for
index represented in idx_sym from yahoo. Includes component symbol
(ticker), exchange, and name.
Parameters
----------
idx_sym : str
Stock index symbol
Examples:
'^DJI' (Dow Jones Industrial Average)
'^NYA' (NYSE Composite)
'^IXIC' (NASDAQ Composite)
See: http://finance.yahoo.com/indices for other index symbols
Returns
-------
idx_df : DataFrame
"""
stats = 'snx'
# URL of form:
# http://download.finance.yahoo.com/d/quotes.csv?s=@%5EIXIC&f=snxl1d1t1c1ohgv
url = _YAHOO_COMPONENTS_URL + 's={0}&f={1}&e=.csv&h={2}'
idx_mod = idx_sym.replace('^', '@%5E')
url_str = url.format(idx_mod, stats, 1)
idx_df = DataFrame()
mask = [True]
comp_idx = 1
# LOOP across component index structure,
# break when no new components are found
while True in mask:
url_str = url.format(idx_mod, stats, comp_idx)
with urlopen(url_str) as resp:
raw = resp.read()
lines = raw.decode('utf-8').strip().strip('"').split('"\r\n"')
lines = [line.strip().split('","') for line in lines]
temp_df = DataFrame(lines, columns=['ticker', 'name', 'exchange'])
temp_df = temp_df.drop_duplicates()
temp_df = temp_df.set_index('ticker')
mask = ~temp_df.index.isin(idx_df.index)
comp_idx = comp_idx + 50
idx_df = idx_df.append(temp_df[mask])
return idx_df
def _dl_mult_symbols(symbols, start, end, interval, chunksize, retry_count, pause,
method):
stocks = {}
failed = []
for sym_group in _in_chunks(symbols, chunksize):
for sym in sym_group:
try:
stocks[sym] = method(sym, start, end, interval, retry_count, pause)
except IOError:
warnings.warn('Failed to read symbol: {0!r}, replacing with '
'NaN.'.format(sym), SymbolWarning)
failed.append(sym)
try:
if len(stocks) > 0 and len(failed) > 0:
df_na = stocks.values()[0].copy()
df_na[:] = np.nan
for sym in failed:
stocks[sym] = df_na
return Panel(stocks).swapaxes('items', 'minor')
except AttributeError:
# cannot construct a panel with just 1D nans indicating no data
raise RemoteDataError("No data fetched using "
"{0!r}".format(method.__name__))
_source_functions = {'google': _get_hist_google, 'yahoo': _get_hist_yahoo}
def _get_data_from(symbols, start, end, interval, retry_count, pause, adjust_price,
ret_index, chunksize, source):
src_fn = _source_functions[source]
# If a single symbol, (e.g., 'GOOG')
if isinstance(symbols, (compat.string_types, int)):
hist_data = src_fn(symbols, start, end, interval, retry_count, pause)
# Or multiple symbols, (e.g., ['GOOG', 'AAPL', 'MSFT'])
elif isinstance(symbols, DataFrame):
hist_data = _dl_mult_symbols(symbols.index, start, end, interval, chunksize,
retry_count, pause, src_fn)
else:
hist_data = _dl_mult_symbols(symbols, start, end, interval, chunksize,
retry_count, pause, src_fn)
if source.lower() == 'yahoo':
if ret_index:
hist_data['Ret_Index'] = _calc_return_index(hist_data['Adj Close'])
if adjust_price:
hist_data = _adjust_prices(hist_data)
return hist_data
def get_data_yahoo(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25, interval='d'):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Yahoo! Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
adjust_price : bool, default False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
interval : string, default 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly and 'v' for dividend.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
if interval not in ['d', 'w', 'm', 'v']:
raise ValueError("Invalid interval: valid values are 'd', 'w', 'm' and 'v'")
return _get_data_from(symbols, start, end, interval, retry_count, pause,
adjust_price, ret_index, chunksize, 'yahoo')
def get_data_google(symbols=None, start=None, end=None, retry_count=3,
pause=0.001, adjust_price=False, ret_index=False,
chunksize=25):
"""
Returns DataFrame/Panel of historical stock prices from symbols, over date
range, start to end. To avoid being penalized by Google Finance servers,
pauses between downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, (defaults to '1/1/2010')
Starting date, timestamp. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980')
end : string, (defaults to today)
Ending date, timestamp. Same format as starting date.
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
Returns
-------
hist_data : DataFrame (str) or Panel (array-like object, DataFrame)
"""
return _get_data_from(symbols, start, end, None, retry_count, pause,
adjust_price, ret_index, chunksize, 'google')
_FRED_URL = "http://research.stlouisfed.org/fred2/series/"
def get_data_fred(name, start=dt.datetime(2010, 1, 1),
end=dt.datetime.today()):
"""
Get data for the given name from the St. Louis FED (FRED).
Date format is datetime
Returns a DataFrame.
If multiple names are passed for "series" then the index of the
DataFrame is the outer join of the indicies of each series.
"""
start, end = _sanitize_dates(start, end)
if not is_list_like(name):
names = [name]
else:
names = name
urls = [_FRED_URL + '%s' % n + '/downloaddata/%s' % n + '.csv' for
n in names]
def fetch_data(url, name):
with urlopen(url) as resp:
data = read_csv(resp, index_col=0, parse_dates=True,
header=None, skiprows=1, names=["DATE", name],
na_values='.')
try:
return data.truncate(start, end)
except KeyError:
if data.ix[3].name[7:12] == 'Error':
raise IOError("Failed to get the data. Check that {0!r} is "
"a valid FRED series.".format(name))
raise
df = concat([fetch_data(url, n) for url, n in zip(urls, names)],
axis=1, join='outer')
return df
_FAMAFRENCH_URL = 'http://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp'
def get_data_famafrench(name):
# path of zip files
zip_file_path = '{0}/{1}.zip'.format(_FAMAFRENCH_URL, name)
with urlopen(zip_file_path) as url:
raw = url.read()
with tempfile.TemporaryFile() as tmpf:
tmpf.write(raw)
with ZipFile(tmpf, 'r') as zf:
data = zf.open(zf.namelist()[0]).readlines()
line_lengths = np.array(lmap(len, data))
file_edges = np.where(line_lengths == 2)[0]
datasets = {}
edges = zip(file_edges + 1, file_edges[1:])
for i, (left_edge, right_edge) in enumerate(edges):
dataset = [d.split() for d in data[left_edge:right_edge]]
if len(dataset) > 10:
ncol_raw = np.array(lmap(len, dataset))
ncol = np.median(ncol_raw)
header_index = np.where(ncol_raw == ncol - 1)[0][-1]
header = dataset[header_index]
ds_header = dataset[header_index + 1:]
# to ensure the header is unique
header = ['{0} {1}'.format(j, hj) for j, hj in enumerate(header,
start=1)]
index = np.array([d[0] for d in ds_header], dtype=int)
dataset = np.array([d[1:] for d in ds_header], dtype=float)
datasets[i] = DataFrame(dataset, index, columns=header)
return datasets
# Items needed for options class
CUR_MONTH = dt.datetime.now().month
CUR_YEAR = dt.datetime.now().year
CUR_DAY = dt.datetime.now().day
def _two_char(s):
return '{0:0>2}'.format(s)
class Options(object):
"""
***Experimental***
This class fetches call/put data for a given stock/expiry month.
It is instantiated with a string representing the ticker symbol.
The class has the following methods:
get_options_data:(month, year, expiry)
get_call_data:(month, year, expiry)
get_put_data: (month, year, expiry)
get_near_stock_price(opt_frame, above_below)
get_all_data(call, put)
get_forward_data(months, call, put) (deprecated)
Examples
--------
# Instantiate object with ticker
>>> aapl = Options('aapl', 'yahoo')
# Fetch next expiry call data
>>> calls = aapl.get_call_data()
# Can now access aapl.calls instance variable
>>> aapl.calls
# Fetch next expiry put data
>>> puts = aapl.get_put_data()
# Can now access aapl.puts instance variable
>>> aapl.puts
# cut down the call data to be 3 below and 3 above the stock price.
>>> cut_calls = aapl.get_near_stock_price(call=True, above_below=3)
# Fetch call and put data with expiry from now to 8 months out
>>> forward_data = aapl.get_forward_data(8, call=True, put=True)
# Fetch all call and put data
>>> all_data = aapl.get_all_data()
"""
_TABLE_LOC = {'calls': 1, 'puts': 2}
_OPTIONS_BASE_URL = 'http://finance.yahoo.com/q/op?s={sym}'
_FINANCE_BASE_URL = 'http://finance.yahoo.com'
def __init__(self, symbol, data_source=None):
""" Instantiates options_data with a ticker saved as symbol """
self.symbol = symbol.upper()
if data_source is None:
warnings.warn("Options(symbol) is deprecated, use Options(symbol,"
" data_source) instead", FutureWarning)
data_source = "yahoo"
if data_source != "yahoo":
raise NotImplementedError("currently only yahoo supported")
def get_options_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_options() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls and appl.puts will always be the calls
and puts for the next expiry. If the user calls this method with
a different expiry, the ivar will be named callsYYMMDD or putsYYMMDD,
where YY, MM and DD are, respectively, two digit representations of
the year, month and day for the expiry of the options.
"""
return concat([f(month, year, expiry)
for f in (self.get_put_data,
self.get_call_data)]).sortlevel()
def _get_option_frames_from_yahoo(self, expiry):
url = self._yahoo_url_from_expiry(expiry)
option_frames = self._option_frames_from_url(url)
frame_name = '_frames' + self._expiry_to_string(expiry)
setattr(self, frame_name, option_frames)
return option_frames
@staticmethod
def _expiry_to_string(expiry):
m1 = _two_char(expiry.month)
d1 = _two_char(expiry.day)
return str(expiry.year)[-2:] + m1 + d1
def _yahoo_url_from_expiry(self, expiry):
try:
expiry_links = self._expiry_links
except AttributeError:
_, expiry_links = self._get_expiry_dates_and_links()
return self._FINANCE_BASE_URL + expiry_links[expiry]
def _option_frames_from_url(self, url):
frames = read_html(url)
nframes = len(frames)
frames_req = max(self._TABLE_LOC.values())
if nframes < frames_req:
raise RemoteDataError("%s options tables found (%s expected)" % (nframes, frames_req))
if not hasattr(self, 'underlying_price'):
try:
self.underlying_price, self.quote_time = self._underlying_price_and_time_from_url(url)
except IndexError:
self.underlying_price, self.quote_time = np.nan, np.nan
calls = frames[self._TABLE_LOC['calls']]
puts = frames[self._TABLE_LOC['puts']]
if len(calls) == 0 or len(puts) == 0:
raise RemoteDataError('Received no data from Yahoo at url: %s' % url)
calls = self._process_data(calls, 'call')
puts = self._process_data(puts, 'put')
return {'calls': calls, 'puts': puts}
def _underlying_price_and_time_from_url(self, url):
root = self._parse_url(url)
underlying_price = self._underlying_price_from_root(root)
quote_time = self._quote_time_from_root(root)
return underlying_price, quote_time
@staticmethod
def _underlying_price_from_root(root):
underlying_price = root.xpath('.//*[@class="time_rtq_ticker Fz-30 Fw-b"]')[0]\
.getchildren()[0].text
underlying_price = underlying_price.replace(',', '') #GH11
try:
underlying_price = float(underlying_price)
except ValueError:
underlying_price = np.nan
return underlying_price
@staticmethod
def _quote_time_from_root(root):
#Gets the time of the quote, note this is actually the time of the underlying price.
try:
quote_time_text = root.xpath('.//*[@class="time_rtq Fz-m"]')[0].getchildren()[1].getchildren()[0].text
##TODO: Enable timezone matching when strptime can match EST with %Z
quote_time_text = quote_time_text.split(' ')[0]
quote_time = dt.datetime.strptime(quote_time_text, "%I:%M%p")
quote_time = quote_time.replace(year=CUR_YEAR, month=CUR_MONTH, day=CUR_DAY)
except ValueError:
quote_time = np.nan
return quote_time
def _get_option_data(self, expiry, name):
frame_name = '_frames' + self._expiry_to_string(expiry)
try:
frames = getattr(self, frame_name)
except AttributeError:
frames = self._get_option_frames_from_yahoo(expiry)
option_data = frames[name]
if expiry != self.expiry_dates[0]:
name += self._expiry_to_string(expiry)
setattr(self, name, option_data)
return option_data
def get_call_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets call/put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
call_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
calls and puts. See the following example:
>>> aapl = Options('aapl', 'yahoo') # Create object
>>> aapl.calls # will give an AttributeError
>>> aapl.get_call_data() # Get data and set ivars
>>> aapl.calls # Doesn't throw AttributeError
Also note that aapl.calls will always be the calls for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named callsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, call=True, put=False)
def get_put_data(self, month=None, year=None, expiry=None):
"""
***Experimental***
Gets put data for the stock with the expiration data in the
given month and year
Parameters
----------
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
put_data: pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Notes
-----
Note: Format of returned data frame is dependent on Yahoo and may change.
When called, this function will add instance variables named
puts. See the following example:
>>> aapl = Options('aapl') # Create object
>>> aapl.puts # will give an AttributeError
>>> aapl.get_put_data() # Get data and set ivars
>>> aapl.puts # Doesn't throw AttributeError
return self.__setattr__(self, str(str(x) + str(y)))
Also note that aapl.puts will always be the puts for the next
expiry. If the user calls this method with a different month
or year, the ivar will be named putsYYMMDD where YY, MM and DD are,
respectively, two digit representations of the year, month and day
for the expiry of the options.
"""
expiry = self._try_parse_dates(year, month, expiry)
return self._get_data_in_date_range(expiry, put=True, call=False)
def get_near_stock_price(self, above_below=2, call=True, put=False,
month=None, year=None, expiry=None):
"""
***Experimental***
Returns a data frame of options that are near the current stock price.
Parameters
----------
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken
call : bool
Tells the function whether or not it should be using calls
put : bool
Tells the function weather or not it should be using puts
month : number, int, optional(default=None)
The month the options expire. This should be either 1 or 2
digits.
year : number, int, optional(default=None)
The year the options expire. This should be a 4 digit int.
expiry : date-like or convertible or list-like object, optional (default=None)
The date (or dates) when options expire (defaults to current month)
Returns
-------
chopped: DataFrame
The resultant DataFrame chopped down to be 2 * above_below + 1 rows
desired. If there isn't data as far out as the user has asked for
then
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
expiry = self._try_parse_dates(year, month, expiry)
data = self._get_data_in_date_range(expiry, call=call, put=put)
return self.chop_data(data, above_below, self.underlying_price)
def chop_data(self, df, above_below=2, underlying_price=None):
"""Returns a data frame only options that are near the current stock price."""
if not underlying_price:
try:
underlying_price = self.underlying_price
except AttributeError:
underlying_price = np.nan
max_strike = max(df.index.get_level_values('Strike'))
min_strike = min(df.index.get_level_values('Strike'))
if not np.isnan(underlying_price) and min_strike < underlying_price < max_strike:
start_index = np.where(df.index.get_level_values('Strike')
> underlying_price)[0][0]
get_range = slice(start_index - above_below,
start_index + above_below + 1)
df = df[get_range].dropna(how='all')
return df
def _try_parse_dates(self, year, month, expiry):
"""
Validates dates provided by user. Ensures the user either provided both a month and a year or an expiry.
Parameters
----------
year : int
Calendar year
month : int
Calendar month
expiry : date-like or convertible, (preferred)
Expiry date
Returns
-------
list of expiry dates (datetime.date)
"""
#Checks if the user gave one of the month or the year but not both and did not provide an expiry:
if (month is not None and year is None) or (month is None and year is not None) and expiry is None:
msg = "You must specify either (`year` and `month`) or `expiry` " \
"or none of these options for the next expiry."
raise ValueError(msg)
if expiry is not None:
if hasattr(expiry, '__iter__'):
expiry = [self._validate_expiry(exp) for exp in expiry]
else:
expiry = [self._validate_expiry(expiry)]
if len(expiry) == 0:
raise ValueError('No expiries available for given input.')
elif year is None and month is None:
#No arguments passed, provide next expiry
year = CUR_YEAR
month = CUR_MONTH
expiry = dt.date(year, month, 1)
expiry = [self._validate_expiry(expiry)]
else:
#Year and month passed, provide all expiries in that month
expiry = [expiry for expiry in self.expiry_dates if expiry.year == year and expiry.month == month]
if len(expiry) == 0:
raise ValueError('No expiries available in %s-%s' % (year, month))
return expiry
def _validate_expiry(self, expiry):
"""Ensures that an expiry date has data available on Yahoo
If the expiry date does not have options that expire on that day, return next expiry"""
expiry_dates = self.expiry_dates
expiry = to_datetime(expiry)
if hasattr(expiry, 'date'):
expiry = expiry.date()
if expiry in expiry_dates:
return expiry
else:
index = DatetimeIndex(expiry_dates).order()
return index[index.date >= expiry][0].date()
def get_forward_data(self, months, call=True, put=False, near=False,
above_below=2):
"""
***Experimental***
Gets either call, put, or both data for months starting in the current
month and going out in the future a specified amount of time.
Parameters
----------
months : number, int
How many months to go out in the collection of the data. This is
inclusive.
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=False)
Whether or not to collect data for put options.
near : bool, optional (default=False)
Whether this function should get only the data near the
current stock price. Uses Options.get_near_stock_price
above_below : number, int, optional (default=2)
The number of strike prices above and below the stock price that
should be taken if the near option is set to True
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
warnings.warn("get_forward_data() is deprecated", FutureWarning)
end_date = dt.date.today() + MonthEnd(months)
dates = (date for date in self.expiry_dates if date <= end_date.date())
data = self._get_data_in_date_range(dates, call=call, put=put)
if near:
data = self.chop_data(data, above_below=above_below)
return data
def get_all_data(self, call=True, put=True):
"""
***Experimental***
Gets either call, put, or both data for all available months starting
in the current month.
Parameters
----------
call : bool, optional (default=True)
Whether or not to collect data for call options
put : bool, optional (default=True)
Whether or not to collect data for put options.
Returns
-------
pandas.DataFrame
A DataFrame with requested options data.
Index:
Strike: Option strike, int
Expiry: Option expiry, Timestamp
Type: Call or Put, string
Symbol: Option symbol as reported on Yahoo, string
Columns:
Last: Last option price, float
Chg: Change from prior day, float
Bid: Bid price, float
Ask: Ask price, float
Vol: Volume traded, int64
Open_Int: Open interest, int64
IsNonstandard: True if the the deliverable is not 100 shares, otherwise false
Underlying: Ticker of the underlying security, string
Underlying_Price: Price of the underlying security, float64
Quote_Time: Time of the quote, Timestamp
Note: Format of returned data frame is dependent on Yahoo and may change.
"""
try:
expiry_dates = self.expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return self._get_data_in_date_range(dates=expiry_dates, call=call, put=put)
def _get_data_in_date_range(self, dates, call=True, put=True):
to_ret = Series({'calls': call, 'puts': put})
to_ret = to_ret[to_ret].index
data = []
for name in to_ret:
for expiry_date in dates:
nam = name + self._expiry_to_string(expiry_date)
try: # Try to access on the instance
frame = getattr(self, nam)
except AttributeError:
frame = self._get_option_data(expiry=expiry_date, name=name)
data.append(frame)
return concat(data).sortlevel()
@property
def expiry_dates(self):
"""
Returns a list of available expiry dates
"""
try:
expiry_dates = self._expiry_dates
except AttributeError:
expiry_dates, _ = self._get_expiry_dates_and_links()
return expiry_dates
def _get_expiry_dates_and_links(self):
"""
Gets available expiry dates.
Returns
-------
Tuple of:
List of datetime.date objects
Dict of datetime.date objects as keys and corresponding links
"""
url = self._OPTIONS_BASE_URL.format(sym=self.symbol)
root = self._parse_url(url)
try:
links = root.xpath('//*[@id="options_menu"]/form/select/option')
except IndexError:
raise RemoteDataError('Expiry dates not available')
expiry_dates = [dt.datetime.strptime(element.text, "%B %d, %Y").date() for element in links]
links = [element.attrib['data-selectbox-link'] for element in links]
if len(expiry_dates) == 0:
raise RemoteDataError('Data not available')
expiry_links = dict(zip(expiry_dates, links))
self._expiry_links = expiry_links
self._expiry_dates = expiry_dates
return expiry_dates, expiry_links
def _parse_url(self, url):
"""
Downloads and parses a URL, returns xml root.
"""
try:
from lxml.html import parse
except ImportError:
raise ImportError("Please install lxml if you want to use the "
"{0!r} class".format(self.__class__.__name__))
try:
doc = parse(url)
except _network_error_classes:
raise RemoteDataError("Unable to parse URL "
"{0!r}".format(url))
else:
root = doc.getroot()
if root is None:
raise RemoteDataError("Parsed URL {0!r} has no root"
"element".format(url))
return root
def _process_data(self, frame, type):
"""
Adds columns for Expiry, IsNonstandard (ie: deliverable is not 100 shares)
and Tag (the tag indicating what is actually deliverable, None if standard).
"""
frame.columns = ['Strike', 'Symbol', 'Last', 'Bid', 'Ask', 'Chg', 'PctChg', 'Vol', 'Open_Int', 'IV']
frame["Rootexp"] = frame.Symbol.str[0:-9]
frame["Root"] = frame.Rootexp.str[0:-6]
frame["Expiry"] = to_datetime(frame.Rootexp.str[-6:])
#Removes dashes in equity ticker to map to option ticker.
#Ex: BRK-B to BRKB140517C00100000
frame["IsNonstandard"] = frame['Root'] != self.symbol.replace('-', '')
del frame["Rootexp"]
frame["Underlying"] = self.symbol
try:
frame['Underlying_Price'] = self.underlying_price
frame["Quote_Time"] = self.quote_time
except AttributeError:
frame['Underlying_Price'] = np.nan
frame["Quote_Time"] = np.nan
frame.rename(columns={'Open Int': 'Open_Int'}, inplace=True)
frame['Type'] = type
frame.set_index(['Strike', 'Expiry', 'Type', 'Symbol'], inplace=True)
return frame
|
gpl-2.0
|
RoboJackets/igvc-software
|
igvc_perception/src/multiclass_segmentation/data_utils/split_data.py
|
1
|
1578
|
# Import dependencies
from sklearn.model_selection import train_test_split
import numpy as np
import argparse
import os
# Set size and seed
test_size = 0.2
random_state = 42
X = np.array([])
Y = np.array([])
# Establish arguments as file paths
ap = argparse.ArgumentParser()
ap.add_argument("-dir", "--dir", required=True, help="igvc_dataset location")
args = vars(ap.parse_args())
# Load images and masks as numpy arrays
for dir_name in os.listdir(args["dir"]):
data_path = os.path.join(args["dir"], dir_name)
for data_name in os.listdir(data_path):
if data_name == "masks.npy":
masks_path = os.path.join(data_path, data_name)
data_npy = np.load(masks_path)
if X.shape[0] == 0:
X = data_npy
else:
X = np.vstack((X, data_npy))
# X = np.concatenate((X,data_npy),axis=0)
elif data_name == "images.npy":
images_path = os.path.join(data_path, data_name)
data_npy = np.load(images_path)
if Y.shape[0] == 0:
Y = data_npy
else:
Y = np.vstack((Y, data_npy))
# Y = np.concatenate((Y,data_npy),axis=0)
# Split the numpy arrays and save as .npy files
labels_train, labels_test, images_train, images_test = train_test_split(
X, Y, test_size=test_size, random_state=random_state
)
np.save("data/train_images.npy", images_train)
np.save("data/test_images.npy", images_test)
np.save("data/train_masks.npy", labels_train)
np.save("data/test_masks.npy", labels_test)
|
mit
|
lina9527/easybi
|
data/__init__.py
|
3
|
35310
|
"""Loads datasets, dashboards and slices in a new superset instance"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import json
import os
import textwrap
import datetime
import random
import pandas as pd
from sqlalchemy import String, DateTime, Date, Float, BigInteger
from superset import app, db, utils
from superset.models import core as models
from superset.security import get_or_create_main_db
from superset.connectors.connector_registry import ConnectorRegistry
# Shortcuts
DB = models.Database
Slice = models.Slice
Dash = models.Dashboard
TBL = ConnectorRegistry.sources['table']
config = app.config
DATA_FOLDER = os.path.join(config.get("BASE_DIR"), 'data')
misc_dash_slices = [] # slices assembled in a "Misc Chart" dashboard
def merge_slice(slc):
o = db.session.query(Slice).filter_by(slice_name=slc.slice_name).first()
if o:
db.session.delete(o)
db.session.add(slc)
db.session.commit()
def get_slice_json(defaults, **kwargs):
d = defaults.copy()
d.update(kwargs)
return json.dumps(d, indent=4, sort_keys=True)
def load_energy():
"""Loads an energy related dataset to use with sankey and graphs"""
tbl_name = 'energy_usage'
with gzip.open(os.path.join(DATA_FOLDER, 'energy.json.gz')) as f:
pdf = pd.read_json(f)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'source': String(255),
'target': String(255),
'value': Float(),
},
index=False)
print("Creating table [wb_health_population] reference")
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = "Energy consumption"
tbl.database = get_or_create_main_db()
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
slc = Slice(
slice_name="Energy Sankey",
viz_type='sankey',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Energy Sankey",
"viz_type": "sankey",
"where": ""
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Energy Force Layout",
viz_type='directed_force',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"charge": "-500",
"collapsed_fieldsets": "",
"groupby": [
"source",
"target"
],
"having": "",
"link_length": "200",
"metric": "sum__value",
"row_limit": "5000",
"slice_name": "Force",
"viz_type": "directed_force",
"where": ""
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
slc = Slice(
slice_name="Heatmap",
viz_type='heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=textwrap.dedent("""\
{
"all_columns_x": "source",
"all_columns_y": "target",
"canvas_image_rendering": "pixelated",
"collapsed_fieldsets": "",
"having": "",
"linear_color_scheme": "blue_white_yellow",
"metric": "sum__value",
"normalize_across": "heatmap",
"slice_name": "Heatmap",
"viz_type": "heatmap",
"where": "",
"xscale_interval": "1",
"yscale_interval": "1"
}
""")
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
def load_world_bank_health_n_pop():
"""Loads the world bank health dataset, slices and a dashboard"""
tbl_name = 'wb_health_population'
with gzip.open(os.path.join(DATA_FOLDER, 'countries.json.gz')) as f:
pdf = pd.read_json(f)
pdf.columns = [col.replace('.', '_') for col in pdf.columns]
pdf.year = pd.to_datetime(pdf.year)
pdf.to_sql(
tbl_name,
db.engine,
if_exists='replace',
chunksize=50,
dtype={
'year': DateTime(),
'country_code': String(3),
'country_name': String(255),
'region': String(255),
},
index=False)
print("Creating table [wb_health_population] reference")
tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first()
if not tbl:
tbl = TBL(table_name=tbl_name)
tbl.description = utils.readfile(os.path.join(DATA_FOLDER, 'countries.md'))
tbl.main_dttm_col = 'year'
tbl.database = get_or_create_main_db()
tbl.filter_select_enabled = True
db.session.merge(tbl)
db.session.commit()
tbl.fetch_metadata()
defaults = {
"compare_lag": "10",
"compare_suffix": "o10Y",
"limit": "25",
"granularity": "year",
"groupby": [],
"metric": 'sum__SP_POP_TOTL',
"metrics": ["sum__SP_POP_TOTL"],
"row_limit": config.get("ROW_LIMIT"),
"since": "2014-01-01",
"until": "2014-01-02",
"where": "",
"markup_type": "markdown",
"country_fieldtype": "cca3",
"secondary_metric": "sum__SP_POP_TOTL",
"entity": "country_code",
"show_bubbles": True,
}
print("Creating slices")
slices = [
Slice(
slice_name="Region Filter",
viz_type='filter_box',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='filter_box',
groupby=['region', 'country_name'])),
Slice(
slice_name="World's Population",
viz_type='big_number',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since='2000',
viz_type='big_number',
compare_lag="10",
metric='sum__SP_POP_TOTL',
compare_suffix="over 10Y")),
Slice(
slice_name="Most Populated Countries",
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='table',
metrics=["sum__SP_POP_TOTL"],
groupby=['country_name'])),
Slice(
slice_name="Growth Rate",
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='line',
since="1960-01-01",
metrics=["sum__SP_POP_TOTL"],
num_period_compare="10",
groupby=['country_name'])),
Slice(
slice_name="% Rural",
viz_type='world_map',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='world_map',
metric="sum__SP_RUR_TOTL_ZS",
num_period_compare="10")),
Slice(
slice_name="Life Expectancy VS Rural %",
viz_type='bubble',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='bubble',
since="2011-01-01",
until="2011-01-02",
series="region",
limit=0,
entity="country_name",
x="sum__SP_RUR_TOTL_ZS",
y="sum__SP_DYN_LE00_IN",
size="sum__SP_POP_TOTL",
max_bubble_size="50",
filters=[{
"col": "country_code",
"val": [
"TCA", "MNP", "DMA", "MHL", "MCO", "SXM", "CYM",
"TUV", "IMY", "KNA", "ASM", "ADO", "AMA", "PLW",
],
"op": "not in"}],
)),
Slice(
slice_name="Rural Breakdown",
viz_type='sunburst',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type='sunburst',
groupby=["region", "country_name"],
secondary_metric="sum__SP_RUR_TOTL",
since="2011-01-01",
until="2011-01-01",)),
Slice(
slice_name="World's Pop Growth",
viz_type='area',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since="1960-01-01",
until="now",
viz_type='area',
groupby=["region"],)),
Slice(
slice_name="Box plot",
viz_type='box_plot',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since="1960-01-01",
until="now",
whisker_options="Min/max (no outliers)",
viz_type='box_plot',
groupby=["region"],)),
Slice(
slice_name="Treemap",
viz_type='treemap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since="1960-01-01",
until="now",
viz_type='treemap',
metrics=["sum__SP_POP_TOTL"],
groupby=["region", "country_code"],)),
Slice(
slice_name="Parallel Coordinates",
viz_type='para',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
since="2011-01-01",
until="2011-01-01",
viz_type='para',
limit=100,
metrics=[
"sum__SP_POP_TOTL",
'sum__SP_RUR_TOTL_ZS',
'sum__SH_DYN_AIDS'],
secondary_metric='sum__SP_POP_TOTL',
series="country_name",)),
]
misc_dash_slices.append(slices[-1].slice_name)
for slc in slices:
merge_slice(slc)
print("Creating a World's Health Bank dashboard")
dash_name = "World's Bank Data"
slug = "world_health"
dash = db.session.query(Dash).filter_by(slug=slug).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
[
{
"col": 1,
"row": 0,
"size_x": 2,
"size_y": 2,
"slice_id": "1231"
},
{
"col": 1,
"row": 2,
"size_x": 2,
"size_y": 2,
"slice_id": "1232"
},
{
"col": 10,
"row": 0,
"size_x": 3,
"size_y": 7,
"slice_id": "1233"
},
{
"col": 1,
"row": 4,
"size_x": 6,
"size_y": 3,
"slice_id": "1234"
},
{
"col": 3,
"row": 0,
"size_x": 7,
"size_y": 4,
"slice_id": "1235"
},
{
"col": 5,
"row": 7,
"size_x": 8,
"size_y": 4,
"slice_id": "1236"
},
{
"col": 7,
"row": 4,
"size_x": 3,
"size_y": 3,
"slice_id": "1237"
},
{
"col": 1,
"row": 7,
"size_x": 4,
"size_y": 4,
"slice_id": "1238"
},
{
"col": 9,
"row": 11,
"size_x": 4,
"size_y": 4,
"slice_id": "1239"
},
{
"col": 1,
"row": 11,
"size_x": 8,
"size_y": 4,
"slice_id": "1240"
}
]
""")
l = json.loads(js)
for i, pos in enumerate(l):
pos['slice_id'] = str(slices[i].id)
dash.dashboard_title = dash_name
dash.position_json = json.dumps(l, indent=4)
dash.slug = slug
dash.slices = slices[:-1]
db.session.merge(dash)
db.session.commit()
def load_css_templates():
"""Loads 2 css templates to demonstrate the feature"""
print('Creating default CSS templates')
CSS = models.CssTemplate # noqa
obj = db.session.query(CSS).filter_by(template_name='Flat').first()
if not obj:
obj = CSS(template_name="Flat")
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #FAFAFA;
border: 1px solid #CCC;
box-shadow: none;
border-radius: 0px;
}
.gridster div.widget:hover {
border: 1px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
obj = (
db.session.query(CSS).filter_by(template_name='Courier Black').first())
if not obj:
obj = CSS(template_name="Courier Black")
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #EEE;
border: 2px solid #444;
border-radius: 15px;
box-shadow: none;
}
h2 {
color: white;
font-size: 52px;
}
.navbar {
box-shadow: none;
}
.gridster div.widget:hover {
border: 2px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
.nvd3 text {
font-size: 12px;
font-family: inherit;
}
body{
background: #000;
font-family: Courier, Monaco, monospace;;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
def load_birth_names():
"""Loading birth name dataset from a zip file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'birth_names.json.gz')) as f:
pdf = pd.read_json(f)
pdf.ds = pd.to_datetime(pdf.ds, unit='ms')
pdf.to_sql(
'birth_names',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
'gender': String(16),
'state': String(10),
'name': String(255),
},
index=False)
l = []
print("Done loading table!")
print("-" * 80)
print("Creating table [birth_names] reference")
obj = db.session.query(TBL).filter_by(table_name='birth_names').first()
if not obj:
obj = TBL(table_name='birth_names')
obj.main_dttm_col = 'ds'
obj.database = get_or_create_main_db()
obj.filter_select_enabled = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
defaults = {
"compare_lag": "10",
"compare_suffix": "o10Y",
"limit": "25",
"granularity": "ds",
"groupby": [],
"metric": 'sum__num',
"metrics": ["sum__num"],
"row_limit": config.get("ROW_LIMIT"),
"since": "100 years ago",
"until": "now",
"viz_type": "table",
"where": "",
"markup_type": "markdown",
}
print("Creating some slices")
slices = [
Slice(
slice_name="Girls",
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
row_limit=50)),
Slice(
slice_name="Boys",
viz_type='table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
groupby=['name'],
filters=[{
'col': 'gender',
'op': 'in',
'val': ['boy'],
}],
row_limit=50)),
Slice(
slice_name="Participants",
viz_type='big_number',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="big_number", granularity="ds",
compare_lag="5", compare_suffix="over 5Y")),
Slice(
slice_name="Genders",
viz_type='pie',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="pie", groupby=['gender'])),
Slice(
slice_name="Genders by State",
viz_type='dist_bar',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
filters=[{
'col': 'state',
'op': 'not in',
'val': ['other'],
}],
viz_type="dist_bar",
metrics=['sum__sum_girls', 'sum__sum_boys'],
groupby=['state'])),
Slice(
slice_name="Trends",
viz_type='line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="line", groupby=['name'],
granularity='ds', rich_tooltip=True, show_legend=True)),
Slice(
slice_name="Average and Sum Trends",
viz_type='dual_line',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="dual_line", metric='avg__num', metric_2='sum__num',
granularity='ds')),
Slice(
slice_name="Title",
viz_type='markup',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="markup", markup_type="html",
code="""\
<div style="text-align:center">
<h1>Birth Names Dashboard</h1>
<p>
The source dataset came from
<a href="https://github.com/hadley/babynames">[here]</a>
</p>
<img src="/static/assets/images/babytux.jpg">
</div>
""")),
Slice(
slice_name="Name Cloud",
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="word_cloud", size_from="10",
series='name', size_to="70", rotation="square",
limit='100')),
Slice(
slice_name="Pivot Table",
viz_type='pivot_table',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="pivot_table", metrics=['sum__num'],
groupby=['name'], columns=['state'])),
Slice(
slice_name="Number of Girls",
viz_type='big_number_total',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(
defaults,
viz_type="big_number_total", granularity="ds",
filters=[{
'col': 'gender',
'op': 'in',
'val': ['girl'],
}],
subheader='total female participants')),
]
for slc in slices:
merge_slice(slc)
print("Creating a dashboard")
dash = db.session.query(Dash).filter_by(dashboard_title="Births").first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
[
{
"col": 9,
"row": 6,
"size_x": 2,
"size_y": 4,
"slice_id": "1267"
},
{
"col": 11,
"row": 6,
"size_x": 2,
"size_y": 4,
"slice_id": "1268"
},
{
"col": 1,
"row": 0,
"size_x": 2,
"size_y": 2,
"slice_id": "1269"
},
{
"col": 3,
"row": 0,
"size_x": 2,
"size_y": 2,
"slice_id": "1270"
},
{
"col": 5,
"row": 3,
"size_x": 8,
"size_y": 3,
"slice_id": "1271"
},
{
"col": 1,
"row": 6,
"size_x": 8,
"size_y": 4,
"slice_id": "1272"
},
{
"col": 10,
"row": 0,
"size_x": 3,
"size_y": 3,
"slice_id": "1273"
},
{
"col": 5,
"row": 0,
"size_x": 5,
"size_y": 3,
"slice_id": "1274"
},
{
"col": 1,
"row": 2,
"size_x": 4,
"size_y": 4,
"slice_id": "1275"
}
]
""")
l = json.loads(js)
for i, pos in enumerate(l):
pos['slice_id'] = str(slices[i].id)
dash.dashboard_title = "Births"
dash.position_json = json.dumps(l, indent=4)
dash.slug = "births"
dash.slices = slices[:-1]
db.session.merge(dash)
db.session.commit()
def load_unicode_test_data():
"""Loading unicode test dataset from a csv file in the repo"""
df = pd.read_csv(os.path.join(DATA_FOLDER, 'unicode_utf8_unixnl_test.csv'),
encoding="utf-8")
# generate date/numeric data
df['date'] = datetime.datetime.now().date()
df['value'] = [random.randint(1, 100) for _ in range(len(df))]
df.to_sql( # pylint: disable=no-member
'unicode_test',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'phrase': String(500),
'short_phrase': String(10),
'with_missing': String(100),
'date': Date(),
'value': Float(),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table [unicode_test] reference")
obj = db.session.query(TBL).filter_by(table_name='unicode_test').first()
if not obj:
obj = TBL(table_name='unicode_test')
obj.main_dttm_col = 'date'
obj.database = get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity": "date",
"groupby": [],
"metric": 'sum__value',
"row_limit": config.get("ROW_LIMIT"),
"since": "100 years ago",
"until": "now",
"where": "",
"viz_type": "word_cloud",
"size_from": "10",
"series": "short_phrase",
"size_to": "70",
"rotation": "square",
"limit": "100",
}
print("Creating a slice")
slc = Slice(
slice_name="Unicode Cloud",
viz_type='word_cloud',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
print("Creating a dashboard")
dash = (
db.session.query(Dash)
.filter_by(dashboard_title="Unicode Test")
.first()
)
if not dash:
dash = Dash()
pos = {
"size_y": 4,
"size_x": 4,
"col": 1,
"row": 1,
"slice_id": slc.id,
}
dash.dashboard_title = "Unicode Test"
dash.position_json = json.dumps([pos], indent=4)
dash.slug = "unicode-test"
dash.slices = [slc]
db.session.merge(dash)
db.session.commit()
def load_random_time_series_data():
"""Loading random time series data from a zip file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'random_time_series.json.gz')) as f:
pdf = pd.read_json(f)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.to_sql(
'random_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': DateTime,
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table [random_time_series] reference")
obj = db.session.query(TBL).filter_by(table_name='random_time_series').first()
if not obj:
obj = TBL(table_name='random_time_series')
obj.main_dttm_col = 'ds'
obj.database = get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity": "day",
"row_limit": config.get("ROW_LIMIT"),
"since": "1 year ago",
"until": "now",
"metric": "count",
"where": "",
"viz_type": "cal_heatmap",
"domain_granularity": "month",
"subdomain_granularity": "day",
}
print("Creating a slice")
slc = Slice(
slice_name="Calendar Heatmap",
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
def load_country_map_data():
"""Loading data for map with country map"""
csv_path = os.path.join(DATA_FOLDER, 'birth_france_data_for_country_map.csv')
data = pd.read_csv(csv_path, encoding="utf-8")
data['date'] = datetime.datetime.now().date()
data.to_sql( # pylint: disable=no-member
'birth_france_by_region',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'DEPT_ID': String(10),
'2003': BigInteger,
'2004': BigInteger,
'2005': BigInteger,
'2006': BigInteger,
'2007': BigInteger,
'2008': BigInteger,
'2009': BigInteger,
'2010': BigInteger,
'2011': BigInteger,
'2012': BigInteger,
'2013': BigInteger,
'2014': BigInteger,
'date': Date()
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first()
if not obj:
obj = TBL(table_name='birth_france_by_region')
obj.main_dttm_col = 'date'
obj.database = get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity": "",
"since": "",
"until": "",
"where": "",
"viz_type": "country_map",
"entity": "DEPT_ID",
"metric": "avg__2004",
"row_limit": 500000,
}
print("Creating a slice")
slc = Slice(
slice_name="Birth in France by department in 2016",
viz_type='country_map',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
def load_long_lat_data():
"""Loading lat/long data from a csv file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'san_francisco.csv.gz')) as f:
pdf = pd.read_csv(f, encoding="utf-8")
pdf['date'] = datetime.datetime.now().date()
pdf['occupancy'] = [random.randint(1, 6) for _ in range(len(pdf))]
pdf['radius_miles'] = [random.uniform(1, 3) for _ in range(len(pdf))]
pdf.to_sql( # pylint: disable=no-member
'long_lat',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'longitude': Float(),
'latitude': Float(),
'number': Float(),
'street': String(100),
'unit': String(10),
'city': String(50),
'district': String(50),
'region': String(50),
'postcode': Float(),
'id': String(100),
'date': Date(),
'occupancy': Float(),
'radius_miles': Float(),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table reference")
obj = db.session.query(TBL).filter_by(table_name='long_lat').first()
if not obj:
obj = TBL(table_name='long_lat')
obj.main_dttm_col = 'date'
obj.database = get_or_create_main_db()
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
"granularity": "day",
"since": "2014-01-01",
"until": "now",
"where": "",
"viz_type": "mapbox",
"all_columns_x": "LON",
"all_columns_y": "LAT",
"mapbox_style": "mapbox://styles/mapbox/light-v9",
"all_columns": ["occupancy"],
"row_limit": 500000,
}
print("Creating a slice")
slc = Slice(
slice_name="Mapbox Long/Lat",
viz_type='mapbox',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.append(slc.slice_name)
merge_slice(slc)
def load_multiformat_time_series_data():
"""Loading time series data from a zip file in the repo"""
with gzip.open(os.path.join(DATA_FOLDER, 'multiformat_time_series.json.gz')) as f:
pdf = pd.read_json(f)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.ds2 = pd.to_datetime(pdf.ds2, unit='s')
pdf.to_sql(
'multiformat_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
"ds": Date,
'ds2': DateTime,
"epoch_s": BigInteger,
"epoch_ms": BigInteger,
"string0": String(100),
"string1": String(100),
"string2": String(100),
"string3": String(100),
},
index=False)
print("Done loading table!")
print("-" * 80)
print("Creating table [multiformat_time_series] reference")
obj = db.session.query(TBL).filter_by(table_name='multiformat_time_series').first()
if not obj:
obj = TBL(table_name='multiformat_time_series')
obj.main_dttm_col = 'ds'
obj.database = get_or_create_main_db()
dttm_and_expr_dict = {
'ds': [None, None],
'ds2': [None, None],
'epoch_s': ['epoch_s', None],
'epoch_ms': ['epoch_ms', None],
'string2': ['%Y%m%d-%H%M%S', None],
'string1': ['%Y-%m-%d^%H:%M:%S', None],
'string0': ['%Y-%m-%d %H:%M:%S.%f', None],
'string3': ['%Y/%m/%d%H:%M:%S.%f', None],
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print("Creating some slices")
for i, col in enumerate(tbl.columns):
slice_data = {
"metric": 'count',
"granularity_sqla": col.column_name,
"granularity": "day",
"row_limit": config.get("ROW_LIMIT"),
"since": "1 year ago",
"until": "now",
"where": "",
"viz_type": "cal_heatmap",
"domain_granularity": "month",
"subdomain_granularity": "day",
}
slc = Slice(
slice_name="Calendar Heatmap multiformat " + str(i),
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.append(slc.slice_name)
def load_misc_dashboard():
"""Loading a dashboard featuring misc charts"""
print("Creating the dashboard")
db.session.expunge_all()
DASH_SLUG = "misc_charts"
dash = db.session.query(Dash).filter_by(slug=DASH_SLUG).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
[
{
"col": 1,
"row": 7,
"size_x": 6,
"size_y": 4,
"slice_id": "442"
},
{
"col": 1,
"row": 2,
"size_x": 6,
"size_y": 5,
"slice_id": "443"
},
{
"col": 7,
"row": 2,
"size_x": 6,
"size_y": 4,
"slice_id": "444"
},
{
"col": 9,
"row": 0,
"size_x": 4,
"size_y": 2,
"slice_id": "455"
},
{
"col": 7,
"row": 6,
"size_x": 6,
"size_y": 5,
"slice_id": "467"
},
{
"col": 1,
"row": 0,
"size_x": 8,
"size_y": 2,
"slice_id": "475"
}
]
""")
l = json.loads(js)
slices = (
db.session
.query(Slice)
.filter(Slice.slice_name.in_(misc_dash_slices))
.all()
)
slices = sorted(slices, key=lambda x: x.id)
for i, pos in enumerate(l):
pos['slice_id'] = str(slices[i].id)
dash.dashboard_title = "Misc Charts"
dash.position_json = json.dumps(l, indent=4)
dash.slug = DASH_SLUG
dash.slices = slices
db.session.merge(dash)
db.session.commit()
|
mit
|
adam-m-jcbs/Simmy
|
AMReX/Maestro/SubChandra/subchandra.py
|
1
|
38721
|
# This code implements the various classes from the simmy framework to represent
# a sub-chandra grid of models. The code is based in part on code I previously
# wrote to carry out the models described in
# http://adsabs.harvard.edu/abs/2016ApJ...827...84J
# Author: Adam Jacobs
# Creation date: September 22, 2017
# Usage: load as a module
# Requirements
# + Python 3
# + Common scientific python tools: NumPy, matplotlib
# TODO: fill in other requirements
#TODO Implement intermediate packages, e.g. for Maestro?
###########################################
### Global Imports, Data, and Constants ###
###########################################
import sys
from simmy import SimulationGrid, Machine, Simulation, SimConfig, SimOutput
# Global checks, assertions
if not sys.version_info >= (3,):
#TODO if you can, perhaps make this work for 2.7+ and 3 with from __future__
raise RuntimeError("subchandra requires Python 3")
class SubChandraGrid(SimulationGrid):
"""A grid of Maestro simulations of sub-Chandrasekhar mass CO WDs with
helium shells."""
def __init__(self, label, stage_base, scratch_base):
"""Construct the SubChandraGrid object.
Arguments:
label --> string describing the grid
stage_base --> base directory where simulations will be staged
scratch_base --> base directory for the scratch space
where runs will be executed
"""
super(label, stage_base, scratch_base)
def listSimulations(self):
"""Print a list of simulations in this grid.
This will only list 'active' simulations that the user is still
exploring. Simulations that are no longer being explored can be archived
so that they will not pollute the list.
Details listed: label, if it's in scratch space, if it's in the queue.
"""
from simmy import START_GREEN, START_RED, START_BLUE, RESET
from simmy import Machine
from os.path import isfile, isdir, join
from glob import glob
active_sims = self._getActiveSimDirs()
curcomp = Machine.getCurrentMachine()
#Define heading/formatting for simulation list
heading = '{0:29s}|{1:14s}'.format('Label', 'In scratch?')
list_format = '{0:29s}|{1:14s}'
if curcomp.has_queue:
heading = '{0:29s}|{1:14s}|{2:14s}'.format('Label', 'In scratch?', 'In queue?')
list_format = '{0:29s}|{1:14s}|{2:14s}'
yep = START_GREEN + '{0:14s}'.format("Yes!") + RESET
nope = START_RED + '{0:14s}'.format("No!") + RESET
purged = START_BLUE + '{0:14s}'.format("Purged!") + RESET
print(heading)
for s in active_sims:
#Check scratch: is it there, not, or there but purged?
simdir = join(self._scratch_base, s)
sc_str = nope
if isdir(simdir):
sc_str = purged
found_all_expected_files = (
len(glob( join(simdir, 'main.*') )) > 0 and
len(glob( join(simdir, 'inputs*') )) > 0 and
len(glob( join(simdir, 'helm_table.dat') )) > 0
)
if found_all_expected_files:
sc_str = yep
#Check if the simulation is queued and define summary string
if curcomp.has_queue:
#Check queue
# ASSUMPTION: sim directory is same as queue label
q_str = nope
if curcomp.isQueued(s):
q_str = yep
outstr = list_format.format(s, sc_str, q_str)
else:
outstr = list_format.format(s, sc_str)
print(outstr)
def _getActiveSims(self):
"""Return a list of Simulation objects representing the simulations in this grid."""
ret = []
for d in listdir(self._stage_base):
if d == 'inactive':
continue
#TODO This uses Simulation, which should generally be subclassed.
#Think about if we should do some fancy reflection of some sort to
#use the proper subclass' static factory method
newSimObject = SCSimulation.genFromDir(d)
ret.append(newSimObject)
return ret
class SCSimulation(Simulation):
"""Represents a particular sub-Chandra simulation."""
def __init__(self, label, base_dir):
"""Construct an SCSimulation object using an existing simulation.
Arguments:
label --> label for this simulation, will also be name of dir
where it's stored
base_dir --> Path to the base directory this simulation is stored in.
By convention this will also be the label for the
grid of models this simulation belongs to.
"""
super(label, base_dir)
def _genConfig(self, simdir):
"""Generate an SCConfig object for this simulation based on existing
configuration."""
return SCConfig(simdir)
def _genOutput(self, simdir):
"""Generate an SCOutput object for this simulation based on existing
configuration."""
return SCOutput(simdir)
@classmethod
def _buildMe(cls, label, base_dir, **kwargs):
"""Build a new Sub-Chandra simulation.
Required Keyword Arguments:
+ M_tot = Mass of the WD core in M_sol.
+ M_He = Mass of He envelope in M_sol.
+ temp_base = Temperature at the base of the He envelope in K.
+ delta = Transition delta from core to envelope in cm.
+ temp_core = Isothermal core temperature in K.
+ max_levs = Number of levels of refinement.
+ coarse_res = Resolution of the base (coarsest) grid.
+ drdxfac = Factor by which finest grid's resolution is multiplied
to get the base state resolution in spherical geometry.
+ octant = Boolean, .true. means we model an octant, not the full star.
"""
#TODO Add other valid kwargs that one might want to modify, for now we
#start with those most likely to be changed from simulation to
#simulation.
#Get Machine configuration
#Get partially initialized SCConfig
#Use config to build initial model
#Finish SCConfig initialization
#Construct and return new SCSimulation
class SCConfig(SimConfig):
"""Represents all of the configuration needed to specify a sub-Chandra
simulation. This includes inputs files, initial models, and the location of
needed binary files.
The class tries to handle the details and set reasonable defaults for things
that don't change much from simulation to simulation. The user-tunable
properties of the configuration are stored in ConfigRecords."""
#TODO Organize methods: public first, static last, etc
def __init__(self, simdir, config_recs=None):
"""Constructs an SCConfig object using an existing configuration in the
given directory."""
super().__init__(simdir, config_recs)
def _initFromDir(self):
"""Initialize this object using an existing configuration."""
#Should include
# + inputs file data
# + initial model data
# + Xlocation of template files?
# + Xjob configuration
#
#TODO Should fields all be strings or have appropriate type?
# Strings makes it easy to pull them out of and put them back in file form.
config_recs = []
self._initFilesFromDir()
#TODO For now I'm having SCConfig keep a local reference to the
# ConfigRecords. This suggests to me I might want to have subclasses of
# ConfigRecord like InputsRecord and IMRecord. I like the SimConfig super
# class being able to do generic operations by iterating over
# ConfigRecords, so if I do subclass I want to be careful to maintain this
# ability.
self._inputs_rec = self._initInputsRecFromDir()
self._im_rec = self._initIMRecFromDir()
config_recs.append(self._inputs_rec)
config_recs.append(self._im_rec)
return config_recs
def _initFromRecs(self):
"""Initialize this object using the partially initialized ConfigRecords
in self._config_recs.
The minimum non-default field values that need to be defined for this are:
Initial Model: M_tot, M_he, delta, temp_core, temp_base, im_exe
Everything else can be derived.
"""
#TODO Other things can be derived as mentioned above, but maybe I want
#to make sure they can be customized without derivation overwriting?
#Design
# + Use the given parameters to build a first attempt at initial model
# with large radius
# + Based on result, rebuild initial model with a more reasonable
# radius
# + Store initial model and fully initialize im record
# + Use results to fully initialize inputs record
#
# Reference _computeRmacAndCutoffs, _generateInitModel from subchandra.py
#Select out the config records.
# TODO As mentioned elsewhere, this seems
# like I'm working around a poor design. Should consider writing
# ConfigRecord subclasses or develop an otherwise more robust method
# for identifying the records.
for rec in self._config_recs:
if rec._label.count('Inputs Configuration') > 0:
self._inputs_rec = rec
if rec._label.count('Initial Model Configuration') > 0:
self._im_rec = rec
self._initFilePaths()
self._buildIM()
def _buildIM(self):
"""Use the partially initialized self._im_rec and self._inputs_rec to
build an initial model. This will also set derived values in
self._im_rec and self._inputs_rec.
Fields needed from self._im_rec.
Fields with * can reasonably be left to the default:
+ M_tot = Mass of the WD core in M_sol.
+ M_He = Mass of He envelope in M_sol.
+ temp_base = Temperature at the base of the He envelope in K.
+ *delta = Transition delta from core to envelope in cm.
+ *temp_core = Isothermal core temperature in K.
Fields needed from self._inputs_rec:
+ max_levs = Number of levels of refinement.
+ coarse_res = Resolution of the base (coarsest) grid.
+ drdxfac = Factor by which finest grid's resolution is multiplied
to get the base state resolution in spherical geometry.
+ octant = Boolean, .true. means we model an octant, not the full star.
+ *sponge_start_factor = Factor by which to multiply
sponge_center_density. Decides starting density for numerical
sponge.
"""
from subprocess import call, Popen, PIPE, STDOUT
from os.path import join, isfile, dirname
from os import remove
from glob import glob
from shlex import split
from simmy import TemplateFile
#TODO
# + Add ability to build the initial model exe? As of now, user needs
# to have built it.
#Design
# + Use the given parameters to build a first attempt at initial model
# with large radius
# + Based on result, rebuild initial model with a more reasonable
# radius
#Calculate base state resolution, which should also be the initial
#model's resolution
max_levs = int(self._inputs_rec.getField('max_levs'))
coarse_res = int(self._inputs_rec.getField('coarse_res'))
fine_res = coarse_res*2**(max_levs-1)
drdxfac = int(self._inputs_rec.getField('drdxfac'))
octant = self._inputs_rec.getField('octant')
octant = octant.lower().count('true') > 0
if octant:
base_state_res = drdxfac*fine_res
else:
base_state_res = drdxfac*fine_res/2
#TODO Here I'm forcing the correct im resolution, but users may expect
# any nx they pass to be used. Should design to make it clear users
# can't set nx.
self._im_rec.setField('nx', str(int(base_state_res)))
#For now, choose a pretty huge size. We'll adjust down later.
self._im_rec.setField('xmax', '1.1e9')
#We should now have all the information we need to write a _params file
#for the initial model builder.
im_template_text, lead_space = SCConfig._getIMTempText()
field_dict = self._im_rec.getFieldDict()
im_tempfile = TemplateFile(field_dict, im_template_text, lead_space)
self._im_rec.associateFile(im_tempfile)
self._im_rec.saveFile(self._params_file)
#Execute the initial model builder
#Make sure helmtable is linked
#TODO For now, just checking. Would be nice to link if table not found
im_exe = self._im_rec.getField('im_exe')
helm_path = join(dirname(im_exe), 'helm_table.dat')
if not isfile(helm_path):
raise RuntimeError("No helm_table.dat, cannot execute initial model builder.")
if not isfile('helm_table.dat'):
call(['ln', '-s', helm_path])
#Build the executable command
im_exe_cmd = im_exe + ' ' + self._params_file
print(im_exe_cmd)
#Execute, removing any old IM data files
old_files = glob('sub_chandra.M_WD*')
for f in old_files:
remove(f)
im_proc = Popen(split(im_exe_cmd), stdout=PIPE, stderr=PIPE)
(im_out, im_err) = im_proc.communicate()
if im_err:
print('init1d error: ', im_err)
raise RuntimeError("Initial model builder failed.")
#Read in data, find the radius of peak temperature, choose a maximum
#radius of rpeak + rpeak*rfac
#Set the anelastic cutoff and sponge central density as the density at
#the top of the convective envelope (where temperature levels off)
#divided by sponge_st_fac
#ASSUMPTION: initial model data for exactly one model
# exists in the current directory
imfile = glob('sub_chandra.M_WD*.hse*')[0]
rfac = 0.5
rad, rho, temp = loadtxt(imfile, usecols=(0,1,2), unpack=True)
rmax = 0
ancut = 0.0
te_old = 0.0
ssfac = self._inputs_rec.getField('sponge_start_factor')
for r, d, t in zip(rad, rho, temp):
dt = t - te_old
if dt < 0.0:
rmax = r
if rmax and dt == 0.0:
ancut = d_old/simconfig.sponge_st_fac
scd = ancut
break
te_old = t
d_old = d
rmax = rmax + rmax*rfac
rmax_final = round(rmax, -7)
ancut_final = round(ancut, -3)
scd_final = round(scd, -3)
#Using data from the model, rebuild with more reasonable radius
#Set values in config records and save files with new data
def _initFilesFromDir(self):
"""Initialize variables with paths to inputs and config files for an
existing simulation.
Initializes self._inputs_file, self._params_file, self._imdata_files
"""
from os.path import join, basename
from glob import glob
#NOTE Convention established here:
# A simulation directory contains a `model` directory with inputs
# file in it. inputs file looks like
# inputs<dim>d.<simulation label>
#TODO Handle multiple inputs files in model dir?
#TODO Use regex instead of glob, it's safer and better-defined
#Input file that is passed to executable as arg
inputs_list = glob(join(self._simdir, 'model', 'inputs*'))
if len(inputs_list) > 1:
raise NotImplementedError("Having multiple inputs files in model dir not currently implemented")
self._inputs_file = inputs_list[0] #NOTE full path
#print('inputs file: {}'.format(self._inputs_file))
#Parameters file describing initial model parameters
params_list = glob(join(self._simdir, 'model', '_params*'))
if len(params_list) > 1:
raise NotImplementedError("Having multiple _params files in model dir not currently implemented")
self._params_file = params_list[0]
#print('_params file: {}'.format(self._params_file))
#Initial model data, pointed to by inputs file
hse_list = glob(join(self._simdir, 'model', 'sub_chandra.*.hse.*'))
if len(hse_list) > 1:
raise NotImplementedError("Having multiple hse initial model files in model dir not currently implemented")
extras_list = glob(join(self._simdir, 'model', 'sub_chandra.*.extras.*'))
if len(hse_list) > 1:
raise NotImplementedError("Having multiple extras initial model files in model dir not currently implemented")
self._imdata_files = (hse_list[0], extras_list[0])
#print('im data files: {}'.format(self._imdata_files))
def _initFilePaths(self):
"""Initialize variables with paths to inputs and config files to be
written for a new simulation.
Initializes self._inputs_file, self._params_file.
self._imdata_files is initialized after the initial model is built.
"""
from os.path import join, basename
from glob import glob
#NOTE Convention established here:
# A simulation directory contains a `model` directory with inputs
# file in it. inputs file looks like
# inputs<dim>d.<simulation label>
#TODO Handle multiple inputs files in model dir?
#TODO Use regex instead of glob, it's safer and better-defined
#All config files for the model go here
base_dir = join(self._simdir, 'model')
model_label = self._label
#Input file that is passed to executable as arg
inputs_filename = "inputs3d.{}".format(model_label)
self._inputs_file = join(base_dir, inputs_filename)
#print('inputs file: {}'.format(self._inputs_file))
#Parameters file describing initial model parameters
params_filename = "_params.{}".format(model_label)
self._params_file = join(base_dir, params_filename)
#print('_params file: {}'.format(self._params_file))
def _initInputsRecFromDir(self):
"""Initialize a ConfigRecord of inputs variables based on the inputs file."""
from simmy import ConfigRecord, TemplateFile
#An inputs file consists of definitions of the form "var = value".
#Here we convert this into a ConfigRecord that will allow easy programmatic
#access to and manipulation of these variables.
#Get file variables
file_vars = {}
with open(self._inputs_file, 'r') as f:
for line in f:
tokens = line.partition('=')
if tokens[1]: #Only do anything if a '=' was found
key = tokens[0].strip()
strval = tokens[2].strip()
file_vars[key] = strval
#Define fields and initialize ConfigRecord
inputs_rec = SCConfig.genInputsConfigRec()
for key, val in file_vars.items():
try:
inputs_rec.setField(key, val)
except KeyError:
pass
#print('{} is an extra key in the file'.format(key))
#Define TemplateFile
inputs_template_text, lead_space = SCConfig._getInputsTempText()
field_dict = inputs_rec.getFieldDict()
inputs_tempfile = TemplateFile(field_dict, inputs_template_text, lead_space)
#Associate the file and return
inputs_rec.associateFile(inputs_tempfile)
return inputs_rec
def _initIMRecFromDir(self):
"""Initialize an initial model ConfigRecord from the files
found in the simulation directory.
Returns a ConfigRecord representing initial model configuration.
Contains all of the data from initial model files and the _params file
used to generate this data.
"""
from numpy import loadtxt, array
from simmy import ConfigRecord, TemplateFile
#Store initial model parameters from _params file
#TODO I use this logic multiple times, move to helper function?
file_vars = {}
with open(self._params_file, 'r') as f:
for line in f:
tokens = line.partition('=')
if tokens[1]: #Only do anything if a '=' was found
key = tokens[0].strip()
strval = tokens[2].strip()
file_vars[key] = strval
#Define fields and initialize ConfigRecord
im_rec = SCConfig.genIMConfigRec()
for key, val in file_vars.items():
try:
im_rec.setField(key, val)
except KeyError:
pass
#print('{} is an extra key in the file'.format(key))
#Store initial model data
hse_file = self._imdata_files[0]
extras_file = self._imdata_files[1]
#TODO Make sure loadtxt is robust for things like blank lines, bad lines, etc
rad, rho, temp, pressure, Xhe4, Xc12, Xo16, Xfe56 = loadtxt(
hse_file, unpack=True)
rad, cs, ent = loadtxt(extras_file, unpack=True)
im_rec.setField('radius', rad)
im_rec.setField('density', rho)
im_rec.setField('temperature', temp)
im_rec.setField('pressure', pressure)
im_rec.setField('soundspeed', cs)
im_rec.setField('entropy', ent)
self._ihe4, self._ic12, self._io16 = 0, 1, 2
im_rec.setField('species', array([Xhe4, Xc12, Xo16]))
#Define TemplateFile
im_template_text, lead_space = self._getIMTempText()
field_dict = im_rec.getFieldDict()
im_tempfile = TemplateFile(field_dict, im_template_text, lead_space)
#Associate the file and return
im_rec.associateFile(im_tempfile)
return im_rec
@staticmethod
def genInputsConfigRec():
"""Return an inputs ConfigRecord with some default values set.
This provides a baseline for users to fully initialize and then use to
create new SCConfig objects from scratch.
"""
from simmy import ConfigRecord, TemplateFile
#Define fields and initialize ConfigRecord
fields_dict, fieldmap = SCConfig._getInputsFields()
rec_label = 'Inputs Configuration'
rec_desc = """Configuration of the inputs file. This is the file passed
to the Maestro executable that sets various Maestro parameters,
configures the simulation, and provides the location of initial model
data."""
inputs_rec = ConfigRecord(fields_dict, rec_label, rec_desc, fieldmap)
inputs_defaults = SCConfig._getInputsDefaults()
for key, val in inputs_defaults.items():
inputs_rec.setField(key, val)
return inputs_rec
@staticmethod
def genIMConfigRec(im_exe=None):
"""Return an initial model ConfigRecord with some default values set.
This provides a baseline for users to fully initialize and then use to
create new SCConfig objects from scratch.
im_exe is the full path to the executable that builds the initial model.
If you want to build a configuration from scratch, you'll need to
provide this.
"""
from simmy import ConfigRecord
#Define fields and initialize ConfigRecord
fields_dict = SCConfig._getIMFields()
rec_label = "Initial Model Configuration"
rec_desc = """Configures the initial model for this simulation. This
corresponds to the _params file used by init1d to build an initial 1D
model to be mapped to the 3D domain. The data from this model are also
stored."""
im_rec = ConfigRecord(fields_dict, rec_label, rec_desc)
im_defaults = SCConfig._getIMDefaults()
for key, val in im_defaults.items():
im_rec.setField(key, val)
if im_exe is not None:
im_rec.setField('im_exe', im_exe)
return im_rec
@staticmethod
def _getInputsDefaults():
"""Get a dictionary of default values for inputs fields."""
#TODO I'm redundantly setting things that do not make sense to have a
#default of None. Helps me keep track, but maybe should just delete.
inputs_defaults = {}
inputs_defaults['im_file'] = None
inputs_defaults['drdxfac'] = '5'
inputs_defaults['job_name'] = None
inputs_defaults['max_levs'] = '4'
inputs_defaults['coarse_res'] = '512'
inputs_defaults['anelastic_cutoff'] = None
inputs_defaults['sponge_start_factor'] = '2.0'
inputs_defaults['octant'] = ".false."
inputs_defaults['dim'] = '3'
inputs_defaults['physical_size'] = None
inputs_defaults['plot_deltat'] = '5.0'
inputs_defaults['mini_plot_deltat'] = '0.2'
inputs_defaults['chk_int'] = '10'
return inputs_defaults
@staticmethod
def _getIMDefaults():
"""Get a dictionary of default values for initial_model fields."""
#TODO I'm redundantly setting things that do not make sense to have a
#default of None. Helps me keep track, but maybe should just delete.
im_defaults = {}
im_defaults['im_exe'] = None
im_defaults['M_tot'] = None
im_defaults['M_He'] = None
im_defaults['delta'] = None
im_defaults['temp_core'] = None
im_defaults['temp_base'] = None
im_defaults['mixed_co_wd'] = '.false.'
im_defaults['low_density_cutoff'] = '1.d-4'
im_defaults['temp_fluff'] = '7.5d7'
im_defaults['smallt'] = '1.d6'
im_defaults['xmin'] = '0.0'
#The initial model resolution should match Maestro's base state
#resolution. This is derived from inputs. TODO Users are allowed to
#override this, but shouldn't?
#These should be derived:
im_defaults['nx'] = None
im_defaults['xmax'] = None
#The physical size of initial model also can be derived from inputs.
#for octant, it is same as domain size. For full star, half.
#TODO In practice, an initial model is tried to get an idea of the
# physical size of the domain, which is then put into inputs.
# However, below we get IM size from inputs. Would be
# nice to formalize this algorithm here instead of the current method of
# doing it manually.
# The basic algorithm is to do an initial model with the desired
# properties with a relatively huge xmax. Then, redo the initial
# model with xmax set to be the radius of T_peak + 50% of that radius.
# This will also be the size of the domain of the 3D grid.
# This gives reasonable balance of buffer zone between surface of star
# and edge of domain without wasting too many resources on unimportant
# parts of the domain.
im_defaults['radius'] = None
im_defaults['density'] = None
im_defaults['temperature'] = None
im_defaults['pressure'] = None
im_defaults['soundspeed'] = None
im_defaults['entropy'] = None
im_defaults['species'] = None
return im_defaults
def _deriveInputs(self, inputs_rec):
"""Derive inputs fields based on a partially initialized inputs ConfigRecord."""
#TODO Derive job_name, im_file, anelastic cutoff, physical size
pltfile_base = self._label + "_plt"
inputs_rec.setField('plot_base_name', pltfile_base)
miniplt_base = self._label + "_miniplt"
inputs_rec.setField('mini_plot_base_name',miniplt_base)
chkfile_base = self._label + "_chk"
inputs_rec.setField('check_base_name', chkfile_base)
if inputs_rec.getField('octant').lower().count('false') > 0:
inputs_rec.setField('bc_lo', '12')
inputs_rec.setField('bc_hi', '12')
else:
inputs_rec.setField('bc_lo', '13')
inputs_rec.setField('bc_hi', '12')
def _deriveIM(self, im_rec, inputs_rec):
"""Derive initial model fields based on a partially initialized inputs
and im ConfigRecord."""
#TODO Implement this
pass
#def _initInputsDict(self):
@staticmethod
def _getInputsFields():
"""Get a dictionary of inputs fields and their descriptions, as well as
a mapping from file variable names to ConfigRecord field names."""
inputs_fields = {}
fieldmap = {}
inputs_fields['im_file'] = 'Initial model file with data to be read into the Maestro basestate.'
fieldmap['model_file'] = 'im_file'
inputs_fields['drdxfac'] = '5'
inputs_fields['job_name'] = 'Description of the simulation.'
inputs_fields['max_levs'] = 'Number of levels the AMR will refine to.'
inputs_fields['coarse_res'] = 'Resolution of the base (coarsest) level'
fieldmap['n_cellx'] = 'coarse_res'
fieldmap['n_celly'] = 'coarse_res'
fieldmap['n_cellz'] = 'coarse_res'
inputs_fields['anelastic_cutoff'] = 'Density cutoff below which the Maestro velocity constraint is simplified to the anelastic constraint.'
inputs_fields['sponge_start_factor'] = 'The numerical sponge will start at sponge_center_density*sponge_start_factor.'
inputs_fields['octant'] = "Boolean that sets if an octant or full star should be modeled."
inputs_fields['dim'] = 'Dimensionality of the problem.'
fieldmap['dm_in'] = 'dim'
inputs_fields['physical_size'] = 'Sidelength in cm of the square domain.'
fieldmap['prob_hi_x'] = 'physical_size'
fieldmap['prob_hi_y'] = 'physical_size'
fieldmap['prob_hi_z'] = 'physical_size'
inputs_fields['plot_deltat'] = 'Time interval in s at which to save pltfiles.'
inputs_fields['mini_plot_deltat'] = 'Time interval in s at which to save minipltfiles.'
inputs_fields['chk_int'] = 'Timestep interval at which to save chkpoint files.'
inputs_fields['plot_base_name'] = 'Basename for pltfiles. Pltfiles will be saved with this name plus their timestep.'
inputs_fields['mini_plot_base_name'] = 'Basename for minipltfiles. Minipltfiles will be saved with this name plus their timestep.'
inputs_fields['check_base_name'] = 'Basename for checkpoint files. Chkfiles will be saved with this name plus their timestep.'
inputs_fields['bc_lo'] = 'Integer flag for the lower (x=y=z=0) boundary'
fieldmap['bcx_lo'] = 'bc_lo'
fieldmap['bcy_lo'] = 'bc_lo'
fieldmap['bcz_lo'] = 'bc_lo'
inputs_fields['bc_hi'] = 'Integer flag for the hi (x=y=z=max) boundary'
fieldmap['bcx_hi'] = 'bc_hi'
fieldmap['bcy_hi'] = 'bc_hi'
fieldmap['bcz_hi'] = 'bc_hi'
return inputs_fields, fieldmap
@staticmethod
def _getInputsTempText():
"""Returns the template text and leading space for an inputs file."""
#TODO Currently, programmer should make sure fields here are the same as
#in ConfigRecord. Would be nice to automagically do this.
#TODO Does this make sense as method? Can I just define it as property
#or some such?
#TODO Should decide if it makes sense to have more specific format
#specifiers. For now, assume string.
inputs_template = """&PROBIN
model_file = "{im_file:s}"
drdxfac = 5
job_name = "{job_name:s}"
use_alt_energy_fix = T
ppm_trace_forces = 0
hg_bottom_solver = 4
mg_bottom_solver = 4
max_mg_bottom_nlevels = 2
max_levs = {max_levs:s}
regrid_int = 2
n_cellx = {coarse_res:s}
n_celly = {coarse_res:s}
n_cellz = {coarse_res:s}
stop_time = 30000.
max_step = 100000000
init_iter = 1
init_divu_iter = 3
do_initial_projection = T
max_grid_size_1 = 32
max_grid_size_2 = 64
max_grid_size_3 = 128
the_sfc_threshold = 32768
anelastic_cutoff = {anelastic_cutoff:s}
base_cutoff_density = 10000.0
buoyancy_cutoff_factor = 2.d0
sponge_center_density = {anelastic_cutoff:s}
sponge_start_factor = {sponge_start_factor:s}
sponge_kappa = 10.0d0
spherical_in = 1
octant = {octant:s}
dm_in = {dim:s}
do_sponge = .true.
prob_hi_x = {physical_size:s}
prob_hi_y = {physical_size:s}
prob_hi_z = {physical_size:s}
plot_base_name = "{plot_base_name:s}"
plot_int = -1
plot_deltat = {plot_deltat:s}
mini_plot_base_name = "{mini_plot_base_name:s}"
mini_plot_int = -1
mini_plot_deltat = {mini_plot_deltat:s}
mini_plot_var1 = "species"
mini_plot_var2 = "velocity"
mini_plot_var3 = "temperature"
mini_plot_var4 = "radial_velocity"
check_base_name = "{check_base_name:s}"
chk_int = {chk_int:s}
cflfac = 0.7d0
init_shrink = 0.1d0
max_dt_growth = 1.1d0
use_soundspeed_firstdt = T
use_divu_firstdt = T
bcx_lo = {bc_lo:s}
bcx_hi = {bc_hi:s}
bcy_lo = {bc_lo:s}
bcy_hi = {bc_hi:s}
bcz_lo = {bc_lo:s}
bcz_hi = {bc_hi:s}
verbose = 1
mg_verbose = 1
cg_verbose = 0
do_burning = T
enthalpy_pred_type = 1
evolve_base_state = T
dpdt_factor = 0.0d0
species_pred_type = 1
use_tfromp = T
single_prec_plotfiles = T
use_eos_coulomb = T
plot_trac = F
plot_base = T
velpert_amplitude = 1.d5
velpert_scale = 5.d7
velpert_steep = 1.d7
/
"""
lead_space = 8
return inputs_template, lead_space
@staticmethod
def _getIMFields():
"""Get a dictionary of initial model fields and their descriptions."""
im_fields = {}
#Information about the executable
im_fields['im_exe'] = 'Full path to the initial model builder.'
#Parameters sent to the executable that builds the model
im_fields['nx'] = 'Resolution (number of cells) of the 1D model, should match Maestro base state resolution.'
im_fields['M_tot'] = 'Mass of the WD core in M_sol.'
im_fields['M_He'] = 'Mass of He envelope in M_sol.'
im_fields['delta'] = 'Transition delta from core to envelope in cm.'
im_fields['temp_core'] = 'Isothermal core temperature in K.'
im_fields['temp_base'] = 'Temperature at the base of the He envelope in K.'
im_fields['xmin'] = 'Spatial coordinate in cm the model starts at.'
im_fields['xmax'] = 'Spatial coordinate in cm of the last cell, should match the sidelength of domain in octant simulation, half sidelength for full star.'
im_fields['mixed_co_wd'] = 'Boolean that sets if core is C/O or just C.'
im_fields['low_density_cutoff'] = 'Density floor in the initial model (NOT for the 3D Maestro domain).'
im_fields['temp_fluff'] = 'Temperature floor, will also be temperature when below density floor.'
im_fields['smallt'] = 'An unused parameter that used to be like temp_fluff.'
#Arrays for data from the built initial model
im_fields['radius'] = 'NumPy array of initial model radius in cm.'
im_fields['density'] = 'NumPy array of initial model density in g/cm^3.'
im_fields['temperature'] = 'NumPy array of initial model temperature in K.'
im_fields['pressure'] = 'NumPy array of initial model pressure in dyn/cm^2.'
im_fields['soundspeed'] = 'NumPy array of initial model sound speed in cm/s.'
im_fields['entropy'] = 'NumPy array of initial model specific entropy in erg/(g*K).'
im_fields['species'] = 'NumPy 2D array of initial model species mass fractions.'
return im_fields
@staticmethod
def _getIMTempText():
"""Returns the template text and leading space for an initial model _params file."""
#TODO Currently, programmer should make sure fields here are the same as
#in ConfigRecord. Would be nice to automagically do this.
#TODO Does this make sense as method? Can I just define it as property
#or some such?
im_template = """¶ms
nx = {nx:s}
M_tot = {M_tot:s}
M_He = {M_He:s}
delta = {delta:s}
xmin = {xmin:s}
xmax = {xmax:s}
temp_core = {temp_core:s}
temp_base = {temp_base:s}
mixed_co_wd = {mixed_co_wd:s}
low_density_cutoff = {low_density_cutoff:s}
temp_fluff = {temp_fluff:s}
smallt = 1.d6
/
"""
lead_space = 8
return im_template, lead_space
class SCOutput(SimOutput):
"""Represents the products of a sub-Chandra, such as the diagnostics files,
reduced data from pltfiles, output from the executable, etc..."""
def __init__(self, simdir):
"""Constructs an SCOutput object using an existing configuration in the
given directory."""
super(simdir)
def _initFromDir(self, simdir):
"""Initialize this object using an existing configuration."""
raise NotImplementedError("""A subclass of SimOutput did not implement
this method or you're directly instantiating SimOutput. Either way,
NO!""")
#TODO Make driver for tests here (or make an independent test driver?)
# + Verify input info
# + Verify all object construction, generation works
|
bsd-3-clause
|
b1quint/samfp
|
samfp/old/spec_plot.py
|
1
|
4521
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from __future__ import division, print_function
import numpy as np
from astropy.io import fits as pyfits
from astropy.modeling import models
from astropy.modeling import fitting
from matplotlib import pyplot as plt
from scipy import interpolate
from scipy import signal
__author__ = 'Bruno Quint'
# Input Definition
i = 0
inputs = {
'files': ['/home/bquint/Data/SAMFP/20161123/002/spec.fits',
'/home/bquint/Data/SAMFP/20161123/003/spec.fits',
'/home/bquint/Data/SAMFP/20161123/004/spec.fits',
'/home/bquint/Data/SAMFP/20161123/005/spec.fits',
'/home/bquint/Data/SAMFP/20161123/006/spec.fits',
'/home/bquint/Data/SAMFP/20161123/007/spec.fits'
],
'comments': ['FP Parallel', 'x-fine = 2.0', 'y-fine = 2.6',
'y-fine = 1.6', 'FSR scanned', 'More than FSR scanned'
]
}
title = 'SAMFP SV2016B 2016-11-23\nSpectrum at the center of the rings\n' \
'spec{:03d}.fits - {:s}'
def main():
for i in range(6):
filename = inputs['files'][i]
# Read file
header = pyfits.getheader(filename)
spec = pyfits.getdata(filename)
z = get_z(header)
# Plot the raw data
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.set_title(title.format(i + 2, inputs['comments'][i]))
ax.set_xlabel('z [bcv]')
ax.set_ylabel('Intensity [counts]')
ax.set_xlim(z[0], z[-1])
ax.set_ylim(
ymin=spec.min() - 0.05 * spec.ptp(),
ymax=spec.max() + 0.15 * spec.ptp()
)
p1, = ax.plot(z, spec, 'o', color='black')
# Find maximum
foo = np.where(
np.abs(spec > spec.min() + 0.75 * spec.max()), spec, 0
)
maxima = signal.argrelmax(foo, axis=0, order=10)[0]
for m in maxima:
ax.axvline(x=z[m], linestyle=':', color='k', alpha=0.5)
# Lorentzian Modeling
new_z = np.linspace(z[0], z[-1], 1000)
lmodel = models.Lorentz1D()
lmodels = []
fitter = fitting.LevMarLSQFitter()
for m in maxima:
lmodel.x_0 = z[m]
lmodel.amplitude = spec[m]
f = fitter(lmodel, z, spec)
p2, = ax.plot(new_z, f(new_z), 'k-', alpha=0.75)
lmodels.append(f)
# Gaussian Modeling
new_z = np.linspace(z[0], z[-1], 1000)
g_model = models.Gaussian1D()
gmodels = []
fitter = fitting.LevMarLSQFitter()
for m in maxima:
g_model.mean = z[m]
g_model.amplitude = spec[m]
f = fitter(g_model, z, spec)
p3, = ax.plot(new_z, f(new_z), 'k--', alpha=0.5)
gmodels.append(f)
# Set labels
lgd = ax.legend(
(p1, p2, p3), ('Science data', 'Lorentzian fit', 'Gaussian fit'),
loc='upper right', bbox_to_anchor=(1.0, -0.1)
)
# Show fit parameters
if len(maxima) is 1:
foo = 'Lorentzian FWHM: {:0.2f} bcv\n' \
'Gaussian FWHM: {:0.2f} bcv\n' \
'Center at: {:0.2f} bcv'
l_fwhm = lmodels[0].fwhm.value
g_fwhm = gmodels[0].stddev.value
center = lmodels[0].x_0.value
fig.text(
0.1, -0.1, foo.format(l_fwhm, g_fwhm, center),
bbox={'facecolor': 'white', 'pad': 10},
horizontalalignment='left'
)
elif len(maxima) is 2:
foo = 'Lorentzian FWHM: {:0.2f} bcv\n' \
'Gaussian FWHM: {:0.2f} bcv\n' \
'FSR: {:0.2f} bcv'
l_fwhm = (lmodels[0].fwhm.value + lmodels[1].fwhm.value) * 0.5
g_fwhm = (gmodels[0].stddev.value + gmodels[1].stddev.value) * 0.5
center = abs(lmodels[0].x_0.value - lmodels[1].x_0.value)
fig.text(
0.1, -0.1, foo.format(l_fwhm, g_fwhm, center),
bbox={'facecolor': 'white', 'pad': 10},
horizontalalignment='left'
)
# Save figure
fig.savefig(
'spec{:03d}'.format(i + 2), bbox_extra_artists=(lgd,),
bbox_inches='tight'
)
fig.clear()
def get_z(h):
"""
Parameters
----------
h
Returns
-------
"""
z = np.arange(h['NAXIS1'])
z = (z + 1 - h['CRPIX1']) * h['CDELT1'] + h['CRVAL1']
return z
if __name__ == '__main__':
main()
|
bsd-3-clause
|
mxjl620/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
283
|
1678
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
hiimivantang/whatsapp-analytics
|
regex_example.py
|
1
|
5206
|
import re
import sys
import dateutil
import pandas as pd
import matplotlib.pyplot as plt
date_patterns = {
"long_datetime" : "(?P<datetime>\d{1,2}\s{1}\w{3}(\s{1}|\s{1}\d{4}\s{1})\d{2}:\d{2})",
"short_datetime" : "(?P<datetime>\d{2}/\d{2}/\d{4},\s{1}\d{2}:\d{2})"
}
message_pattern = "\s{1}-\s{1}(?P<name>(.*?)):\s{1}(?P<message>(.*?))$"
action_pattern = "\s{1}-\s{1}(?P<action>(.*?))$"
action_strings = {
"admin": "admin",
"change_icon": "changed this group's icon",
"change_subject": "changed the subject",
"added": "added",
"left": "left",
"removed": "removed"
}
class ChatElement:
def __init__(self, datetime, name, message, action):
self.datetime = datetime
self.name = name
self.message = message
self.action = action
class Chat:
def __init__(self, filename):
self.filename = filename
def open_file(self):
x = open(self.filename,'r')
y = x.read()
content = y.splitlines()
return content
class Parser:
def parse_message(self,str):
for pattern in map(lambda x:x+message_pattern, date_patterns.values()):
m = re.match(pattern, str)
if m:
return (m.group('datetime'), m.group('name'), m.group('message'), None)
# if code comes here, message is continuation or action
for pattern in map(lambda x:x+action_pattern, date_patterns.values()):
m = re.match(pattern, str)
if m:
if any(action_string in m.group('action') for action_string in action_strings.values()):
for pattern in map(lambda x: "(?P<name>(.*?))"+x+"(.*?)", action_strings.values()):
m_action = re.match(pattern, m.group('action'))
if m_action:
return (m.group('datetime'), m_action.group('name'), None, m.group('action'))
sys.stderr.write("[failed to capture name from action] - %s\n" %(m.group('action')))
return (m.group('datetime'), None, None, m.group('action'))
#prone to return invalid continuation if above filtering doesn't cover all patterns for messages and actions
return (None, None, str, None)
def process(self, content):
messages = []
for row in content:
parsed = self.parse_message(row)
if parsed[0] is None:
messages[-1].message += parsed[2]
else:
messages.append(ChatElement(*parsed))
j = 1
df = pd.DataFrame(index=range(1, len(messages)+1), columns=['name','message','action','date_string'])
for message in messages:
if message.datetime is None:
sys.stderr.write("[failed to add chatelement to dataframe] - %s, %s, %s, %s\n" %(message.datetime, message.name, message.message, message.action))
else:
df.ix[j]['name'] = message.name
df.ix[j]['message'] = message.message
df.ix[j]['action'] = message.action
df.ix[j]['date_string'] = message.datetime
j += 1
df['Time'] = df['date_string'].map(lambda x: dateutil.parser.parse(x))
df['Day'] = df['date_string'].map(lambda x: dateutil.parser.parse(x).strftime("%a"))
df['Date'] = df['date_string'].map(lambda x:dateutil.parser.parse(x).strftime("%x"))
df['Hour'] = df['date_string'].map(lambda x:dateutil.parser.parse(x).strftime("%H"))
df_actions = df[pd.isnull(df['message'])]
df_messages = df[pd.isnull(df['action'])]
return df_messages, df_actions
#def responses(df):
# ## Create Empty Response Matrix
# labels = df['name'].unique()
# responses = pd.DataFrame(0,index=labels,columns=labels)
#
# ## Update values in Response Matrix
# # self.df.sort(columns='Time', inplace = 1)
# x, y = 1, 2
# while y < len(df):
# n1 = df.ix[x]['name']
# n2 = df.ix[y]['name']
# if n1 != n2: #only responses to others are valid
# responses.loc[n1,n2]=responses.loc[n1,n2]+1
# y = y + 1
# x = x + 1
# return responses
def charts(df):
## Create Canvas
fig = plt.figure()
plt.title = "Whatsappening"
ax1 = plt.subplot2grid((4,6), (0,0), rowspan=2, colspan=2)
ax2 = plt.subplot2grid((4,6), (0,2),rowspan=2,colspan=2)
ax3 = plt.subplot2grid((4,6),(0,4), rowspan=2, colspan=2)
ax4 = plt.subplot2grid((4,6),(2,0), rowspan=2, colspan = 6)
## Create Charts
df.groupby('Hour').count().plot(ax=ax1, legend = None, title ="Hour of Day")
df.groupby('Day').count().plot(y="message",ax=ax2, kind='bar', legend = None, title = 'Days')
df.name.value_counts().plot(ax=ax3,kind = 'bar', title = 'Number of messages')
df.groupby('Date').count().plot(y="message",ax=ax4, legend = None, title = 'Message by Date')
plt.subplots_adjust(wspace=0.46, hspace=1)
plt.show()
def main():
chat1 = Chat("chat1.txt")
chat2 = Chat("chat2.txt")
content = chat1.open_file() + chat2.open_file()
parser = Parser()
df_messages, df_actions = parser.process(content)
print charts(df_messages)
if __name__ == '__main__':
main()
|
mit
|
mwv/scikit-learn
|
sklearn/manifold/tests/test_t_sne.py
|
162
|
9771
|
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
|
bsd-3-clause
|
spallavolu/scikit-learn
|
sklearn/metrics/ranking.py
|
79
|
25426
|
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Arnaud Joly <[email protected]>
# Jochen Wersdorfer <[email protected]>
# Lars Buitinck <[email protected]>
# Joel Nothman <[email protected]>
# Noel Dawe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
|
bsd-3-clause
|
TomAugspurger/pandas
|
pandas/tests/frame/methods/test_head_tail.py
|
2
|
1194
|
import numpy as np
from pandas import DataFrame
import pandas._testing as tm
def test_head_tail(float_frame):
tm.assert_frame_equal(float_frame.head(), float_frame[:5])
tm.assert_frame_equal(float_frame.tail(), float_frame[-5:])
tm.assert_frame_equal(float_frame.head(0), float_frame[0:0])
tm.assert_frame_equal(float_frame.tail(0), float_frame[0:0])
tm.assert_frame_equal(float_frame.head(-1), float_frame[:-1])
tm.assert_frame_equal(float_frame.tail(-1), float_frame[1:])
tm.assert_frame_equal(float_frame.head(1), float_frame[:1])
tm.assert_frame_equal(float_frame.tail(1), float_frame[-1:])
# with a float index
df = float_frame.copy()
df.index = np.arange(len(float_frame)) + 0.1
tm.assert_frame_equal(df.head(), df.iloc[:5])
tm.assert_frame_equal(df.tail(), df.iloc[-5:])
tm.assert_frame_equal(df.head(0), df[0:0])
tm.assert_frame_equal(df.tail(0), df[0:0])
tm.assert_frame_equal(df.head(-1), df.iloc[:-1])
tm.assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
empty_df = DataFrame()
tm.assert_frame_equal(empty_df.tail(), empty_df)
tm.assert_frame_equal(empty_df.head(), empty_df)
|
bsd-3-clause
|
nomadcube/scikit-learn
|
sklearn/svm/setup.py
|
321
|
3157
|
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
chenkianwee/pyliburo
|
py4design/pyoptimise/__init__.py
|
2
|
8768
|
# ==================================================================================================
#
# Copyright (c) 2016, Chen Kian Wee ([email protected])
#
# This file is part of py4design
#
# py4design is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# py4design is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py4design. If not, see <http://www.gnu.org/licenses/>.
#
# ==================================================================================================
"""
Pyoptimise
================================================
Documentation is available in the docstrings and online at http://chenkianwee.github.io/py4design/
Submodules
-----------
::
analyse_xml --- Functions for analysing the generated xml from the optimisation.
dependencies: scipy, numpy, scikit-learn, pymf
draw_graph --- Functions for drawing graphs.
dependencies: matplotlib
nsga2 --- Classes and functions for performing NSGA2 optimisation.
"""
import nsga2
import analyse_xml
import draw_graph
def empty_xml_files(xml_filelist):
"""
This function empties all the xml files.
Parameters
----------
xml_filelist : list of str
The list of xml files to be emptied.
"""
for xmlf in xml_filelist:
open(xmlf,"w").close()
def create_nsga2_population_class(gene_dict_list, score_dict_list, mutation_rate,crossover_rate,init_population,
live_file,dead_file):
"""
This function creates a population class.
Parameters
----------
gene_dict_list : list of dictionaries
Each dictionary contains the parameters for creating a Gene class instance. Each dictionay is in this format: {"type": "int_range", "range": (0,4,1)}.
score_dict_list : list of dictionaries
Each dictionary contains the parameters for creating a ScoreMeta class instance. Each dictionay is in this format: {"name": "solar", "minmax": "min"}.
mutation_rate : float
The mutation probability, the probability is between 0 to 1. The usual mutation probability is about 0.01.
crossover_rate : float
The crossover rate, the rate is between 0 to 1. The usual crossover rate is about 0.8.
init_population : int
The population size. The usual size is about 100.
live_file : str
The file path of the XML file that documents all the living individuals.
dead_file : str
The file path of the XML file that documents all the dead individuals.
Returns
-------
population : Population class instance
The created population class instance.
"""
#====================================
#initialise the genotype class object
#====================================
gm = nsga2.GenotypeMeta()
#====================================
#get the gene meta setting
#====================================
for gene_dict in gene_dict_list:
gene_type = gene_dict["type"]
gene_range = gene_dict["range"]
gene = nsga2.Gene(gene_type, gene_range)
gm.add_gene(gene)
gm.gene_position()
#====================================
#score meta
#====================================
#initiate the score meta class
score_m_list = []
score_name_list = []
for score_dict in score_dict_list:
score_name = score_dict["name"]
score_name_list.append(score_name)
score_minmax = score_dict["minmax"]
if score_minmax == "min":
score_m_list.append(nsga2.ScoreMeta.MIN)
if score_minmax == "max":
score_m_list.append(nsga2.ScoreMeta.MAX)
sm = nsga2.ScoreMeta(score_name_list, score_m_list)
#====================================
#population class parameters
#====================================
p = nsga2.Population(init_population, gm, sm, live_file , dead_file, mutation_rate, crossover_rate)
return p
def initialise_nsga2(gene_dict_list, score_dict_list, mutation_rate,crossover_rate,init_population,
live_file,dead_file ):
"""
This function initialises the population and writes the xml files for an NSGA2 optimisation process.
Parameters
----------
gene_dict_list : list of dictionaries
Each dictionary contains the parameters for creating a Gene class instance. Each dictionay is in this format: {"type": "int_range", "range": (0,4,1)}.
score_dict_list : list of dictionaries
Each dictionary contains the parameters for creating a ScoreMeta class instance. Each dictionay is in this format: {"name": "solar", "minmax": "min"}.
mutation_rate : float
The mutation probability, the probability is between 0 to 1. The usual mutation probability is about 0.01.
crossover_rate : float
The crossover rate, the rate is between 0 to 1. The usual crossover rate is about 0.8.
init_population : int
The population size. The usual size is about 100.
live_file : str
The file path of the XML file that documents all the living individuals.
dead_file : str
The file path of the XML file that documents all the dead individuals.
Returns
-------
population : Population class instance
The created population class instance.
"""
empty_xml_files([live_file, dead_file])
p = create_nsga2_population_class(gene_dict_list, score_dict_list, mutation_rate,crossover_rate,init_population,
live_file,dead_file)
p.randomise()
not_evaluated = p.individuals
for ind in not_evaluated:
ind.add_generation(0)
p.write()
return p
def resume_nsga2(gene_dict_list, score_dict_list, mutation_rate,crossover_rate,init_population,
live_file,dead_file ):
"""
This function resumes a broken NSGA2 optimisation process based on the dead xml file.
Parameters
----------
gene_dict_list : list of dictionaries
Each dictionary contains the parameters for creating a Gene class instance. Each dictionay is in this format: {"type": "int_range", "range": (0,4,1)}.
score_dict_list : list of dictionaries
Each dictionary contains the parameters for creating a ScoreMeta class instance. Each dictionay is in this format: {"name": "solar", "minmax": "min"}.
mutation_rate : float
The mutation probability, the probability is between 0 to 1. The usual mutation probability is about 0.01.
crossover_rate : float
The crossover rate, the rate is between 0 to 1. The usual crossover rate is about 0.8.
init_population : int
The population size. The usual size is about 100.
live_file : str
The file path of the XML file that documents all the living individuals.
dead_file : str
The file path of the XML file that documents all the dead individuals.
Returns
-------
population : Population class instance
The resumed population class instance.
"""
p = create_nsga2_population_class(gene_dict_list, score_dict_list, mutation_rate,crossover_rate,init_population,
live_file,dead_file)
p.read()
return p
def feedback_nsga2(population):
"""
This function performs the feedback process of a population.
Parameters
----------
population : Population class instance
The function will perform the reproduction on this population and generate a new generation of individuals.
"""
#===================================
#feedback
#===================================
current_gen = population.individuals[0].generation
population.reproduce(population.individuals, current_gen+1)
population.write()
#====================================
#separate the evaluated individuals from the unevaluated one
#====================================
unevaluated = []
for ind in population.individuals:
if ind.live == True:
unevaluated.append(ind)
population.individuals = unevaluated
|
gpl-3.0
|
CroatianMeteorNetwork/RMS
|
Utils/SaturationCorrection.py
|
2
|
5479
|
""" Given the FTPdetectinfo file (assuming FF files are available) and the stddev of Gaussian PSF of the image,
correct the magnitudes and levels in the file for saturation. """
from __future__ import print_function, division, absolute_import
import os
import datetime
import numpy as np
import matplotlib.pyplot as plt
from RMS.Formats.FFfile import validFFName
from RMS.Formats.FFfile import read as readFF
from RMS.Formats.FTPdetectinfo import readFTPdetectinfo, writeFTPdetectinfo
from RMS.Routines.Image import thickLine, loadFlat, applyFlat
from Utils.SaturationSimulation import findUnsaturatedMagnitude
if __name__ == "__main__":
import argparse
### COMMAND LINE ARGUMENTS
# Init the command line arguments parser
arg_parser = argparse.ArgumentParser(description="Correct the magnitudes in the FTPdetectinfo file for saturation.")
arg_parser.add_argument('ftpdetectinfo_path', nargs=1, metavar='DIR_PATH', type=str, \
help='Path to the FTPdetectinfo file.')
arg_parser.add_argument('psf_sigma', nargs=1, metavar='PSF_SIGMA', type=float, \
help='Standard deviation of the Gaussian PSF in pixels.')
arg_parser.add_argument('-s', '--satlvl', metavar='SATURATION_LEVEL', type=int, \
help="Saturation level. 255 by default.", default=255)
arg_parser.add_argument('-f', '--flat', metavar='FLAT', type=str, \
help="Path to the flat frame.")
# Parse the command line arguments
cml_args = arg_parser.parse_args()
#########################
# Read command line arguments
ftpdetectinfo_path = cml_args.ftpdetectinfo_path[0]
dir_path, ftpdetectinfo_name = os.path.split(ftpdetectinfo_path)
gauss_sigma = cml_args.psf_sigma[0]
saturation_lvl = cml_args.satlvl
# Load meteor data from FTPdetecinfo
cam_code, fps, meteor_list = readFTPdetectinfo(dir_path, ftpdetectinfo_name, ret_input_format=True)
# Load the flat, if given
flat = None
if cml_args.flat:
flat = loadFlat(*os.path.split(cml_args.flat))
corrected_meteor_list = []
# Find matching FF files in the directory
for entry in meteor_list:
ftp_ff_name, meteor_No, rho, phi, meteor_meas = entry
# Find the matching FTPdetectinfo file in the directory
for ff_name in sorted(os.listdir(dir_path)):
# Reject all non-FF files
if not validFFName(ff_name):
continue
# Reject all FF files which do not match the name in the FTPdetecinfo
if ff_name != ftp_ff_name:
continue
print('Correcting for saturation:', ff_name)
# Load the FF file
ff = readFF(dir_path, ff_name)
# Apply the flat to avepixel
if flat:
avepixel = applyFlat(ff.avepixel, flat)
else:
avepixel = ff.avepixel
# Compute angular velocity
first_centroid = meteor_meas[0]
last_centroid = meteor_meas[-1]
frame1, x1, y1 = first_centroid[:3]
frame2, x2, y2 = last_centroid[:3]
px_fm = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)/float(frame2 - frame1)
print('Ang vel:', px_fm*fps, 'px/s')
corrected_meteor_meas = []
print('Frame, App mag, Corr mag, Background')
# Go though all meteor centroids
for line in meteor_meas:
frame_n, x, y, ra, dec, azim, elev, inten, mag = line
# Compute the photometric offset
photom_offset = mag + 2.5*np.log10(inten)
### Compute the background intensity value behind the meteor ###
# Get the mask for the background as a 3 sigma streak around the meteor, but using avepixel
mask = thickLine(avepixel.shape[0], avepixel.shape[1], x, y, px_fm, phi - 90, \
3*gauss_sigma).astype(np.bool)
img = np.ma.masked_array(avepixel, ~mask)
bg_val = np.ma.median(img)
### ###
# Find the unsaturated magnitude
unsaturated_mag = findUnsaturatedMagnitude(mag, photom_offset, bg_val, fps, px_fm*fps,
gauss_sigma, saturation_point=saturation_lvl)
print("{:5.1f}, {:7.2f}, {:8.2f}, {:10.1f}".format(frame_n, mag, unsaturated_mag, bg_val))
# Compute the intensity from unsaturated magnitude
unsaturated_inten = round(10**((photom_offset - mag)/2.5), 0)
corrected_meteor_meas.append([frame_n, x, y, ra, dec, azim, elev, unsaturated_inten,
unsaturated_mag])
if not corrected_meteor_meas:
corrected_meteor_meas = meteor_meas
corrected_meteor_list.append([ftp_ff_name, meteor_No, rho, phi, corrected_meteor_meas])
# Calibration string to be written to the FTPdetectinfo file
calib_str = "RMS - Saturation corrected on {:s} UTC".format(str(datetime.datetime.utcnow()))
# Write a corrected FTPdetectinfo file
corrected_ftpdetectinfo_name = ftpdetectinfo_name.strip('.txt') + '_saturation_corrected.txt'
print('Saving to:', os.path.join(dir_path, corrected_ftpdetectinfo_name))
writeFTPdetectinfo(corrected_meteor_list, dir_path, corrected_ftpdetectinfo_name, dir_path, cam_code, \
fps, calibration=calib_str, celestial_coords_given=True)
|
gpl-3.0
|
bnaul/scikit-learn
|
sklearn/preprocessing/tests/test_discretization.py
|
3
|
12382
|
import pytest
import numpy as np
import scipy.sparse as sp
import warnings
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.preprocessing import OneHotEncoder
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_array_equal,
assert_warns_message,
assert_allclose_dense_sparse
)
X = [[-2, 1.5, -4, -1],
[-1, 2.5, -3, -0.5],
[0, 3.5, -2, 0.5],
[1, 4.5, -1, 2]]
@pytest.mark.parametrize(
'strategy, expected',
[('uniform', [[0, 0, 0, 0], [1, 1, 1, 0], [2, 2, 2, 1], [2, 2, 2, 2]]),
('kmeans', [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2]]),
('quantile', [[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [2, 2, 2, 2]])])
def test_fit_transform(strategy, expected):
est = KBinsDiscretizer(n_bins=3, encode='ordinal', strategy=strategy)
est.fit(X)
assert_array_equal(expected, est.transform(X))
def test_valid_n_bins():
KBinsDiscretizer(n_bins=2).fit_transform(X)
KBinsDiscretizer(n_bins=np.array([2])[0]).fit_transform(X)
assert KBinsDiscretizer(n_bins=2).fit(X).n_bins_.dtype == np.dtype(int)
def test_invalid_n_bins():
est = KBinsDiscretizer(n_bins=1)
err_msg = ("KBinsDiscretizer received an invalid "
"number of bins. Received 1, expected at least 2.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
est = KBinsDiscretizer(n_bins=1.1)
err_msg = ("KBinsDiscretizer received an invalid "
"n_bins type. Received float, expected int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
def test_invalid_n_bins_array():
# Bad shape
n_bins = np.full((2, 4), 2.)
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Incorrect number of features
n_bins = [1, 2, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = r"n_bins must be a scalar or array of shape \(n_features,\)."
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Bad bin values
n_bins = [1, 2, 2, 1]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = ("KBinsDiscretizer received an invalid number of bins "
"at indices 0, 3. Number of bins must be at least 2, "
"and must be an int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
# Float bin values
n_bins = [2.1, 2, 2.1, 2]
est = KBinsDiscretizer(n_bins=n_bins)
err_msg = ("KBinsDiscretizer received an invalid number of bins "
"at indices 0, 2. Number of bins must be at least 2, "
"and must be an int.")
with pytest.raises(ValueError, match=err_msg):
est.fit_transform(X)
@pytest.mark.parametrize(
'strategy, expected',
[('uniform', [[0, 0, 0, 0], [0, 1, 1, 0], [1, 2, 2, 1], [1, 2, 2, 2]]),
('kmeans', [[0, 0, 0, 0], [0, 0, 0, 0], [1, 1, 1, 1], [1, 2, 2, 2]]),
('quantile', [[0, 0, 0, 0], [0, 1, 1, 1], [1, 2, 2, 2], [1, 2, 2, 2]])])
def test_fit_transform_n_bins_array(strategy, expected):
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode='ordinal',
strategy=strategy).fit(X)
assert_array_equal(expected, est.transform(X))
# test the shape of bin_edges_
n_features = np.array(X).shape[1]
assert est.bin_edges_.shape == (n_features, )
for bin_edges, n_bins in zip(est.bin_edges_, est.n_bins_):
assert bin_edges.shape == (n_bins + 1, )
def test_invalid_n_features():
est = KBinsDiscretizer(n_bins=3).fit(X)
bad_X = np.arange(25).reshape(5, -1)
err_msg = "Incorrect number of features. Expecting 4, received 5"
with pytest.raises(ValueError, match=err_msg):
est.transform(bad_X)
@pytest.mark.parametrize('strategy', ['uniform', 'kmeans', 'quantile'])
def test_same_min_max(strategy):
warnings.simplefilter("always")
X = np.array([[1, -2],
[1, -1],
[1, 0],
[1, 1]])
est = KBinsDiscretizer(strategy=strategy, n_bins=3, encode='ordinal')
assert_warns_message(UserWarning,
"Feature 0 is constant and will be replaced "
"with 0.", est.fit, X)
assert est.n_bins_[0] == 1
# replace the feature with zeros
Xt = est.transform(X)
assert_array_equal(Xt[:, 0], np.zeros(X.shape[0]))
def test_transform_1d_behavior():
X = np.arange(4)
est = KBinsDiscretizer(n_bins=2)
with pytest.raises(ValueError):
est.fit(X)
est = KBinsDiscretizer(n_bins=2)
est.fit(X.reshape(-1, 1))
with pytest.raises(ValueError):
est.transform(X)
@pytest.mark.parametrize('i', range(1, 9))
def test_numeric_stability(i):
X_init = np.array([2., 4., 6., 8., 10.]).reshape(-1, 1)
Xt_expected = np.array([0, 0, 1, 1, 1]).reshape(-1, 1)
# Test up to discretizing nano units
X = X_init / 10**i
Xt = KBinsDiscretizer(n_bins=2, encode='ordinal').fit_transform(X)
assert_array_equal(Xt_expected, Xt)
def test_invalid_encode_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], encode='invalid-encode')
err_msg = (r"Valid options for 'encode' are "
r"\('onehot', 'onehot-dense', 'ordinal'\). "
r"Got encode='invalid-encode' instead.")
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
def test_encode_options():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='ordinal').fit(X)
Xt_1 = est.transform(X)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='onehot-dense').fit(X)
Xt_2 = est.transform(X)
assert not sp.issparse(Xt_2)
assert_array_equal(OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]],
sparse=False)
.fit_transform(Xt_1), Xt_2)
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3],
encode='onehot').fit(X)
Xt_3 = est.transform(X)
assert sp.issparse(Xt_3)
assert_array_equal(OneHotEncoder(
categories=[np.arange(i) for i in [2, 3, 3, 3]],
sparse=True)
.fit_transform(Xt_1).toarray(),
Xt_3.toarray())
def test_invalid_strategy_option():
est = KBinsDiscretizer(n_bins=[2, 3, 3, 3], strategy='invalid-strategy')
err_msg = (r"Valid options for 'strategy' are "
r"\('uniform', 'quantile', 'kmeans'\). "
r"Got strategy='invalid-strategy' instead.")
with pytest.raises(ValueError, match=err_msg):
est.fit(X)
@pytest.mark.parametrize(
'strategy, expected_2bins, expected_3bins, expected_5bins',
[('uniform', [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 2, 2], [0, 0, 1, 1, 4, 4]),
('kmeans', [0, 0, 0, 0, 1, 1], [0, 0, 1, 1, 2, 2], [0, 0, 1, 2, 3, 4]),
('quantile', [0, 0, 0, 1, 1, 1], [0, 0, 1, 1, 2, 2], [0, 1, 2, 3, 4, 4])])
def test_nonuniform_strategies(
strategy, expected_2bins, expected_3bins, expected_5bins):
X = np.array([0, 0.5, 2, 3, 9, 10]).reshape(-1, 1)
# with 2 bins
est = KBinsDiscretizer(n_bins=2, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_2bins, Xt.ravel())
# with 3 bins
est = KBinsDiscretizer(n_bins=3, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_3bins, Xt.ravel())
# with 5 bins
est = KBinsDiscretizer(n_bins=5, strategy=strategy, encode='ordinal')
Xt = est.fit_transform(X)
assert_array_equal(expected_5bins, Xt.ravel())
@pytest.mark.parametrize(
'strategy, expected_inv',
[('uniform', [[-1.5, 2., -3.5, -0.5], [-0.5, 3., -2.5, -0.5],
[0.5, 4., -1.5, 0.5], [0.5, 4., -1.5, 1.5]]),
('kmeans', [[-1.375, 2.125, -3.375, -0.5625],
[-1.375, 2.125, -3.375, -0.5625],
[-0.125, 3.375, -2.125, 0.5625],
[0.75, 4.25, -1.25, 1.625]]),
('quantile', [[-1.5, 2., -3.5, -0.75], [-0.5, 3., -2.5, 0.],
[0.5, 4., -1.5, 1.25], [0.5, 4., -1.5, 1.25]])])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_inverse_transform(strategy, encode, expected_inv):
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy, encode=encode)
Xt = kbd.fit_transform(X)
Xinv = kbd.inverse_transform(Xt)
assert_array_almost_equal(expected_inv, Xinv)
@pytest.mark.parametrize('strategy', ['uniform', 'kmeans', 'quantile'])
def test_transform_outside_fit_range(strategy):
X = np.array([0, 1, 2, 3])[:, None]
kbd = KBinsDiscretizer(n_bins=4, strategy=strategy, encode='ordinal')
kbd.fit(X)
X2 = np.array([-2, 5])[:, None]
X2t = kbd.transform(X2)
assert_array_equal(X2t.max(axis=0) + 1, kbd.n_bins_)
assert_array_equal(X2t.min(axis=0), [0])
def test_overwrite():
X = np.array([0, 1, 2, 3])[:, None]
X_before = X.copy()
est = KBinsDiscretizer(n_bins=3, encode="ordinal")
Xt = est.fit_transform(X)
assert_array_equal(X, X_before)
Xt_before = Xt.copy()
Xinv = est.inverse_transform(Xt)
assert_array_equal(Xt, Xt_before)
assert_array_equal(Xinv, np.array([[0.5], [1.5], [2.5], [2.5]]))
@pytest.mark.parametrize(
'strategy, expected_bin_edges',
[('quantile', [0, 1, 3]), ('kmeans', [0, 1.5, 3])])
def test_redundant_bins(strategy, expected_bin_edges):
X = [[0], [0], [0], [0], [3], [3]]
kbd = KBinsDiscretizer(n_bins=3, strategy=strategy)
msg = ("Bins whose width are too small (i.e., <= 1e-8) in feature 0 "
"are removed. Consider decreasing the number of bins.")
assert_warns_message(UserWarning, msg, kbd.fit, X)
assert_array_almost_equal(kbd.bin_edges_[0], expected_bin_edges)
def test_percentile_numeric_stability():
X = np.array([0.05, 0.05, 0.95]).reshape(-1, 1)
bin_edges = np.array([0.05, 0.23, 0.41, 0.59, 0.77, 0.95])
Xt = np.array([0, 0, 4]).reshape(-1, 1)
kbd = KBinsDiscretizer(n_bins=10, encode='ordinal',
strategy='quantile')
msg = ("Bins whose width are too small (i.e., <= 1e-8) in feature 0 "
"are removed. Consider decreasing the number of bins.")
assert_warns_message(UserWarning, msg, kbd.fit, X)
assert_array_almost_equal(kbd.bin_edges_[0], bin_edges)
assert_array_almost_equal(kbd.transform(X), Xt)
@pytest.mark.parametrize("in_dtype", [np.float16, np.float32, np.float64])
@pytest.mark.parametrize("out_dtype", [None, np.float16, np.float32,
np.float64])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_consistent_dtype(in_dtype, out_dtype, encode):
X_input = np.array(X, dtype=in_dtype)
kbd = KBinsDiscretizer(n_bins=3, encode=encode, dtype=out_dtype)
# a error is raised if a wrong dtype is define for the model
if out_dtype not in [None, np.float32, np.float64]:
with pytest.raises(ValueError, match="Valid options for 'dtype' are"):
kbd.fit(X_input)
else:
kbd.fit(X_input)
# test output dtype
if out_dtype is not None:
expected_dtype = out_dtype
elif out_dtype is None and X_input.dtype == np.float16:
# wrong numeric input dtype are cast in np.float64
expected_dtype = np.float64
else:
expected_dtype = X_input.dtype
Xt = kbd.transform(X_input)
assert Xt.dtype == expected_dtype
@pytest.mark.parametrize('input_dtype', [np.float16, np.float32, np.float64])
@pytest.mark.parametrize('encode', ['ordinal', 'onehot', 'onehot-dense'])
def test_32_equal_64(input_dtype, encode):
# TODO this check is redundant with common checks and can be removed
# once #16290 is merged
X_input = np.array(X, dtype=input_dtype)
# 32 bit output
kbd_32 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float32)
kbd_32.fit(X_input)
Xt_32 = kbd_32.transform(X_input)
# 64 bit output
kbd_64 = KBinsDiscretizer(n_bins=3, encode=encode, dtype=np.float64)
kbd_64.fit(X_input)
Xt_64 = kbd_64.transform(X_input)
assert_allclose_dense_sparse(Xt_32, Xt_64)
|
bsd-3-clause
|
NelisVerhoef/scikit-learn
|
examples/neighbors/plot_digits_kde_sampling.py
|
251
|
2022
|
"""
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
|
bsd-3-clause
|
joernhees/scikit-learn
|
examples/linear_model/plot_logistic.py
|
73
|
1568
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic function
=========================================================
Shown in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logistic curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='red', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(range(-5, 10))
plt.yticks([0, 0.5, 1])
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.legend(('Logistic Regression Model', 'Linear Regression Model'),
loc="lower right", fontsize='small')
plt.show()
|
bsd-3-clause
|
liberatorqjw/scikit-learn
|
benchmarks/bench_random_projections.py
|
397
|
8900
|
"""
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
|
bsd-3-clause
|
abhishekkrthakur/AutoML
|
Phase0/digits_main.py
|
1
|
1289
|
"""
AutoML : Round 0
__author__ : abhishek thakur
"""
import numpy as np
from libscores import *
from sklearn import ensemble, linear_model, preprocessing, svm
from sklearn import decomposition, metrics, cross_validation, neighbors
np.set_printoptions(suppress=True)
train_data = np.loadtxt('digits/digits_train.data')
test_data = np.loadtxt('digits/digits_test.data')
valid_data = np.loadtxt('digits/digits_valid.data')
feat_type = np.loadtxt('digits/digits_feat.type', dtype = 'S20')
labels = np.loadtxt('digits/digits_train.solution')
train_data = np.nan_to_num(train_data)
test_data = np.nan_to_num(test_data)
valid_data = np.nan_to_num(valid_data)
pca = decomposition.PCA(n_components = 40, whiten = False)
pca.fit(train_data)
train_data = pca.transform(train_data)
test_data = pca.transform(test_data)
valid_data = pca.transform(valid_data)
mms = preprocessing.MinMaxScaler()
mms.fit(train_data)
train_data = mms.transform(train_data)
test_data = mms.transform(test_data)
valid_data = mms.transform(valid_data)
clf = svm.SVC(C=10, verbose = 2)
clf.fit(train_data, labels)
test_preds = clf.predict(test_data)
valid_preds = clf.predict(valid_data)
np.savetxt('res/digits_test_001.predict', test_preds, '%1.5f')
np.savetxt('res/digits_valid_001.predict', valid_preds, '%1.5f')
|
mit
|
arasmus/ladder
|
run.py
|
2
|
25307
|
#!/usr/bin/env python
import functools
import logging
import os
import subprocess
from argparse import ArgumentParser, Action
from collections import OrderedDict
import sys
from pandas import DataFrame
import numpy
import time
import theano
from theano.tensor.type import TensorType
from blocks.algorithms import GradientDescent, Adam
from blocks.extensions import FinishAfter
from blocks.extensions.monitoring import TrainingDataMonitoring
from blocks.filter import VariableFilter
from blocks.graph import ComputationGraph
from blocks.main_loop import MainLoop
from blocks.model import Model
from blocks.roles import PARAMETER
from fuel.datasets import MNIST, CIFAR10
from fuel.schemes import ShuffledScheme, SequentialScheme
from fuel.streams import DataStream
from fuel.transformers import Transformer
from picklable_itertools import cycle, imap
from itertools import izip, product, tee
logger = logging.getLogger('main')
from utils import ShortPrinting, prepare_dir, load_df, DummyLoop
from utils import SaveExpParams, SaveLog, SaveParams, AttributeDict
from nn import ZCA, ContrastNorm
from nn import ApproxTestMonitoring, FinalTestMonitoring, TestMonitoring
from nn import LRDecay
from ladder import LadderAE
class Whitening(Transformer):
""" Makes a copy of the examples in the underlying dataset and whitens it
if necessary.
"""
def __init__(self, data_stream, iteration_scheme, whiten, cnorm=None,
**kwargs):
super(Whitening, self).__init__(data_stream,
iteration_scheme=iteration_scheme,
**kwargs)
data = data_stream.get_data(slice(data_stream.dataset.num_examples))
self.data = []
for s, d in zip(self.sources, data):
if 'features' == s:
# Fuel provides Cifar in uint8, convert to float32
d = numpy.require(d, dtype=numpy.float32)
if cnorm is not None:
d = cnorm.apply(d)
if whiten is not None:
d = whiten.apply(d)
self.data += [d]
elif 'targets' == s:
d = unify_labels(d)
self.data += [d]
else:
raise Exception("Unsupported Fuel target: %s" % s)
def get_data(self, request=None):
return (s[request] for s in self.data)
class SemiDataStream(Transformer):
""" Combines two datastreams into one such that 'target' source (labels)
is used only from the first one. The second one is renamed
to avoid collision. Upon iteration, the first one is repeated until
the second one depletes.
"""
def __init__(self, data_stream_labeled, data_stream_unlabeled, **kwargs):
super(Transformer, self).__init__(**kwargs)
self.ds_labeled = data_stream_labeled
self.ds_unlabeled = data_stream_unlabeled
# Rename the sources for clarity
self.ds_labeled.sources = ('features_labeled', 'targets_labeled')
# Rename the source for input pixels and hide its labels!
self.ds_unlabeled.sources = ('features_unlabeled',)
@property
def sources(self):
if hasattr(self, '_sources'):
return self._sources
return self.ds_labeled.sources + self.ds_unlabeled.sources
@sources.setter
def sources(self, value):
self._sources = value
def close(self):
self.ds_labeled.close()
self.ds_unlabeled.close()
def reset(self):
self.ds_labeled.reset()
self.ds_unlabeled.reset()
def next_epoch(self):
self.ds_labeled.next_epoch()
self.ds_unlabeled.next_epoch()
def get_epoch_iterator(self, **kwargs):
unlabeled = self.ds_unlabeled.get_epoch_iterator(**kwargs)
labeled = self.ds_labeled.get_epoch_iterator(**kwargs)
assert type(labeled) == type(unlabeled)
return imap(self.mergedicts, cycle(labeled), unlabeled)
def mergedicts(self, x, y):
return dict(list(x.items()) + list(y.items()))
def unify_labels(y):
""" Work-around for Fuel bug where MNIST and Cifar-10
datasets have different dimensionalities for the targets:
e.g. (50000, 1) vs (60000,) """
yshape = y.shape
y = y.flatten()
assert y.shape[0] == yshape[0]
return y
def make_datastream(dataset, indices, batch_size,
n_labeled=None, n_unlabeled=None,
balanced_classes=True, whiten=None, cnorm=None,
scheme=ShuffledScheme):
if n_labeled is None or n_labeled == 0:
n_labeled = len(indices)
if batch_size is None:
batch_size = len(indices)
if n_unlabeled is None:
n_unlabeled = len(indices)
assert n_labeled <= n_unlabeled, 'need less labeled than unlabeled'
if balanced_classes and n_labeled < n_unlabeled:
# Ensure each label is equally represented
logger.info('Balancing %d labels...' % n_labeled)
all_data = dataset.data_sources[dataset.sources.index('targets')]
y = unify_labels(all_data)[indices]
n_classes = y.max() + 1
assert n_labeled % n_classes == 0
n_from_each_class = n_labeled / n_classes
i_labeled = []
for c in range(n_classes):
i = (indices[y == c])[:n_from_each_class]
i_labeled += list(i)
else:
i_labeled = indices[:n_labeled]
# Get unlabeled indices
i_unlabeled = indices[:n_unlabeled]
ds = SemiDataStream(
data_stream_labeled=Whitening(
DataStream(dataset),
iteration_scheme=scheme(i_labeled, batch_size),
whiten=whiten, cnorm=cnorm),
data_stream_unlabeled=Whitening(
DataStream(dataset),
iteration_scheme=scheme(i_unlabeled, batch_size),
whiten=whiten, cnorm=cnorm)
)
return ds
def setup_model(p):
ladder = LadderAE(p)
# Setup inputs
input_type = TensorType('float32', [False] * (len(p.encoder_layers[0]) + 1))
x_only = input_type('features_unlabeled')
x = input_type('features_labeled')
y = theano.tensor.lvector('targets_labeled')
ladder.apply(x, y, x_only)
# Load parameters if requested
if p.get('load_from'):
with open(p.load_from + '/trained_params.npz') as f:
loaded = numpy.load(f)
cg = ComputationGraph([ladder.costs.total])
current_params = VariableFilter(roles=[PARAMETER])(cg.variables)
logger.info('Loading parameters: %s' % ', '.join(loaded.keys()))
for param in current_params:
assert param.get_value().shape == loaded[param.name].shape
param.set_value(loaded[param.name])
return ladder
def load_and_log_params(cli_params):
cli_params = AttributeDict(cli_params)
if cli_params.get('load_from'):
p = load_df(cli_params.load_from, 'params').to_dict()[0]
p = AttributeDict(p)
for key in cli_params.iterkeys():
if key not in p:
p[key] = None
new_params = cli_params
loaded = True
else:
p = cli_params
new_params = {}
loaded = False
# Make dseed seed unless specified explicitly
if p.get('dseed') is None and p.get('seed') is not None:
p['dseed'] = p['seed']
logger.info('== COMMAND LINE ==')
logger.info(' '.join(sys.argv))
logger.info('== PARAMETERS ==')
for k, v in p.iteritems():
if new_params.get(k) is not None:
p[k] = new_params[k]
replace_str = "<- " + str(new_params.get(k))
else:
replace_str = ""
logger.info(" {:20}: {:<20} {}".format(k, v, replace_str))
return p, loaded
def setup_data(p, test_set=False):
dataset_class, training_set_size = {
'cifar10': (CIFAR10, 40000),
'mnist': (MNIST, 50000),
}[p.dataset]
# Allow overriding the default from command line
if p.get('unlabeled_samples') is not None:
training_set_size = p.unlabeled_samples
train_set = dataset_class(["train"])
# Make sure the MNIST data is in right format
if p.dataset == 'mnist':
d = train_set.data_sources[train_set.sources.index('features')]
assert numpy.all(d <= 1.0) and numpy.all(d >= 0.0), \
'Make sure data is in float format and in range 0 to 1'
# Take all indices and permutate them
all_ind = numpy.arange(train_set.num_examples)
if p.get('dseed'):
rng = numpy.random.RandomState(seed=p.dseed)
rng.shuffle(all_ind)
d = AttributeDict()
# Choose the training set
d.train = train_set
d.train_ind = all_ind[:training_set_size]
# Then choose validation set from the remaining indices
d.valid = train_set
d.valid_ind = numpy.setdiff1d(all_ind, d.train_ind)[:p.valid_set_size]
logger.info('Using %d examples for validation' % len(d.valid_ind))
# Only touch test data if requested
if test_set:
d.test = dataset_class(["test"])
d.test_ind = numpy.arange(d.test.num_examples)
# Setup optional whitening, only used for Cifar-10
in_dim = train_set.data_sources[train_set.sources.index('features')].shape[1:]
if len(in_dim) > 1 and p.whiten_zca > 0:
assert numpy.product(in_dim) == p.whiten_zca, \
'Need %d whitening dimensions, not %d' % (numpy.product(in_dim),
p.whiten_zca)
cnorm = ContrastNorm(p.contrast_norm) if p.contrast_norm != 0 else None
def get_data(d, i):
data = d.get_data(request=list(i))[d.sources.index('features')]
# Fuel provides Cifar in uint8, convert to float32
data = numpy.require(data, dtype=numpy.float32)
return data if cnorm is None else cnorm.apply(data)
if p.whiten_zca > 0:
logger.info('Whitening using %d ZCA components' % p.whiten_zca)
whiten = ZCA()
whiten.fit(p.whiten_zca, get_data(d.train, d.train_ind))
else:
whiten = None
return in_dim, d, whiten, cnorm
def get_error(args):
""" Calculate the classification error """
args['data_type'] = args.get('data_type', 'test')
args['no_load'] = 'g_'
targets, acts = analyze(args)
guess = numpy.argmax(acts, axis=1)
correct = numpy.sum(numpy.equal(guess, targets.flatten()))
return (1. - correct / float(len(guess))) * 100.
def analyze(cli_params):
p, _ = load_and_log_params(cli_params)
_, data, whiten, cnorm = setup_data(p, test_set=True)
ladder = setup_model(p)
# Analyze activations
dset, indices, calc_batchnorm = {
'train': (data.train, data.train_ind, False),
'valid': (data.valid, data.valid_ind, True),
'test': (data.test, data.test_ind, True),
}[p.data_type]
if calc_batchnorm:
logger.info('Calculating batch normalization for clean.labeled path')
main_loop = DummyLoop(
extensions=[
FinalTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean]
+ ladder.costs.denois.values(),
make_datastream(data.train, data.train_ind,
# These need to match with the training
p.batch_size,
n_labeled=p.labeled_samples,
n_unlabeled=len(data.train_ind),
cnorm=cnorm,
whiten=whiten, scheme=ShuffledScheme),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size,
n_labeled=len(data.valid_ind),
n_unlabeled=len(data.valid_ind),
cnorm=cnorm,
whiten=whiten, scheme=ShuffledScheme),
prefix="valid_final", before_training=True),
ShortPrinting({
"valid_final": OrderedDict([
('VF_C_class', ladder.costs.class_clean),
('VF_E', ladder.error.clean),
('VF_C_de', [ladder.costs.denois.get(0),
ladder.costs.denois.get(1),
ladder.costs.denois.get(2),
ladder.costs.denois.get(3)]),
]),
}, after_training=True, use_log=False),
])
main_loop.run()
# Make a datastream that has all the indices in the labeled pathway
ds = make_datastream(dset, indices,
batch_size=p.get('batch_size'),
n_labeled=len(indices),
n_unlabeled=len(indices),
balanced_classes=False,
whiten=whiten,
cnorm=cnorm,
scheme=SequentialScheme)
# We want out the values after softmax
outputs = ladder.act.clean.labeled.h[len(ladder.layers) - 1]
# Replace the batch normalization paramameters with the shared variables
if calc_batchnorm:
outputreplacer = TestMonitoring()
_, _, outputs = outputreplacer._get_bn_params(outputs)
cg = ComputationGraph(outputs)
f = cg.get_theano_function()
it = ds.get_epoch_iterator(as_dict=True)
res = []
inputs = {'features_labeled': [],
'targets_labeled': [],
'features_unlabeled': []}
# Loop over one epoch
for d in it:
# Store all inputs
for k, v in d.iteritems():
inputs[k] += [v]
# Store outputs
res += [f(*[d[str(inp)] for inp in cg.inputs])]
# Concatenate all minibatches
res = [numpy.vstack(minibatches) for minibatches in zip(*res)]
inputs = {k: numpy.vstack(v) for k, v in inputs.iteritems()}
return inputs['targets_labeled'], res[0]
def train(cli_params):
cli_params['save_dir'] = prepare_dir(cli_params['save_to'])
logfile = os.path.join(cli_params['save_dir'], 'log.txt')
# Log also DEBUG to a file
fh = logging.FileHandler(filename=logfile)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
logger.info('Logging into %s' % logfile)
p, loaded = load_and_log_params(cli_params)
in_dim, data, whiten, cnorm = setup_data(p, test_set=False)
if not loaded:
# Set the zero layer to match input dimensions
p.encoder_layers = (in_dim,) + p.encoder_layers
ladder = setup_model(p)
# Training
all_params = ComputationGraph([ladder.costs.total]).parameters
logger.info('Found the following parameters: %s' % str(all_params))
# Fetch all batch normalization updates. They are in the clean path.
bn_updates = ComputationGraph([ladder.costs.class_clean]).updates
assert 'counter' in [u.name for u in bn_updates.keys()], \
'No batch norm params in graph - the graph has been cut?'
training_algorithm = GradientDescent(
cost=ladder.costs.total, parameters=all_params,
step_rule=Adam(learning_rate=ladder.lr))
# In addition to actual training, also do BN variable approximations
training_algorithm.add_updates(bn_updates)
short_prints = {
"train": {
'T_C_class': ladder.costs.class_corr,
'T_C_de': ladder.costs.denois.values(),
},
"valid_approx": OrderedDict([
('V_C_class', ladder.costs.class_clean),
('V_E', ladder.error.clean),
('V_C_de', ladder.costs.denois.values()),
]),
"valid_final": OrderedDict([
('VF_C_class', ladder.costs.class_clean),
('VF_E', ladder.error.clean),
('VF_C_de', ladder.costs.denois.values()),
]),
}
main_loop = MainLoop(
training_algorithm,
# Datastream used for training
make_datastream(data.train, data.train_ind,
p.batch_size,
n_labeled=p.labeled_samples,
n_unlabeled=p.unlabeled_samples,
whiten=whiten,
cnorm=cnorm),
model=Model(ladder.costs.total),
extensions=[
FinishAfter(after_n_epochs=p.num_epochs),
# This will estimate the validation error using
# running average estimates of the batch normalization
# parameters, mean and variance
ApproxTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean]
+ ladder.costs.denois.values(),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size, whiten=whiten, cnorm=cnorm,
scheme=ShuffledScheme),
prefix="valid_approx"),
# This Monitor is slower, but more accurate since it will first
# estimate batch normalization parameters from training data and
# then do another pass to calculate the validation error.
FinalTestMonitoring(
[ladder.costs.class_clean, ladder.error.clean]
+ ladder.costs.denois.values(),
make_datastream(data.train, data.train_ind,
p.batch_size,
n_labeled=p.labeled_samples,
whiten=whiten, cnorm=cnorm,
scheme=ShuffledScheme),
make_datastream(data.valid, data.valid_ind,
p.valid_batch_size,
n_labeled=len(data.valid_ind),
whiten=whiten, cnorm=cnorm,
scheme=ShuffledScheme),
prefix="valid_final",
after_n_epochs=p.num_epochs),
TrainingDataMonitoring(
[ladder.costs.total, ladder.costs.class_corr,
training_algorithm.total_gradient_norm]
+ ladder.costs.denois.values(),
prefix="train", after_epoch=True),
SaveParams(None, all_params, p.save_dir, after_epoch=True),
SaveExpParams(p, p.save_dir, before_training=True),
SaveLog(p.save_dir, after_training=True),
ShortPrinting(short_prints),
LRDecay(ladder.lr, p.num_epochs * p.lrate_decay, p.num_epochs,
after_epoch=True),
])
main_loop.run()
# Get results
df = DataFrame.from_dict(main_loop.log, orient='index')
col = 'valid_final_error_rate_clean'
logger.info('%s %g' % (col, df[col].iloc[-1]))
if main_loop.log.status['epoch_interrupt_received']:
return None
return df
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
rep = lambda s: s.replace('-', ',')
chop = lambda s: s.split(',')
to_int = lambda ss: [int(s) for s in ss if s.isdigit()]
to_float = lambda ss: [float(s) for s in ss]
def to_bool(s):
if s.lower() in ['true', 't']:
return True
elif s.lower() in ['false', 'f']:
return False
else:
raise Exception("Unknown bool value %s" % s)
def compose(*funs):
return functools.reduce(lambda f, g: lambda x: f(g(x)), funs)
# Functional parsing logic to allow flexible function compositions
# as actions for ArgumentParser
def funcs(additional_arg):
class customAction(Action):
def __call__(self, parser, args, values, option_string=None):
def process(arg, func_list):
if arg is None:
return None
elif type(arg) is list:
return map(compose(*func_list), arg)
else:
return compose(*func_list)(arg)
setattr(args, self.dest, process(values, additional_arg))
return customAction
def add_train_params(parser, use_defaults):
a = parser.add_argument
default = lambda x: x if use_defaults else None
# General hyper parameters and settings
a("save_to", help="Destination to save the state and results",
default=default("noname"), nargs="?")
a("--num-epochs", help="Number of training epochs",
type=int, default=default(150))
a("--seed", help="Seed",
type=int, default=default([1]), nargs='+')
a("--dseed", help="Data permutation seed, defaults to 'seed'",
type=int, default=default([None]), nargs='+')
a("--labeled-samples", help="How many supervised samples are used",
type=int, default=default(None), nargs='+')
a("--unlabeled-samples", help="How many unsupervised samples are used",
type=int, default=default(None), nargs='+')
a("--dataset", type=str, default=default(['mnist']), nargs='+',
choices=['mnist', 'cifar10'], help="Which dataset to use")
a("--lr", help="Initial learning rate",
type=float, default=default([0.002]), nargs='+')
a("--lrate-decay", help="When to linearly start decaying lrate (0-1)",
type=float, default=default([0.67]), nargs='+')
a("--batch-size", help="Minibatch size",
type=int, default=default([100]), nargs='+')
a("--valid-batch-size", help="Minibatch size for validation data",
type=int, default=default([100]), nargs='+')
a("--valid-set-size", help="Number of examples in validation set",
type=int, default=default([10000]), nargs='+')
# Hyperparameters controlling supervised path
a("--super-noise-std", help="Noise added to supervised learning path",
type=float, default=default([0.3]), nargs='+')
a("--f-local-noise-std", help="Noise added encoder path",
type=str, default=default([0.3]), nargs='+',
action=funcs([tuple, to_float, chop]))
a("--act", nargs='+', type=str, action=funcs([tuple, chop, rep]),
default=default(["relu"]), help="List of activation functions")
a("--encoder-layers", help="List of layers for f",
type=str, default=default(()), action=funcs([tuple, chop, rep]))
# Hyperparameters controlling unsupervised training
a("--denoising-cost-x", help="Weight of the denoising cost.",
type=str, default=default([(0.,)]), nargs='+',
action=funcs([tuple, to_float, chop]))
a("--decoder-spec", help="List of decoding function types", nargs='+',
type=str, default=default(['sig']), action=funcs([tuple, chop, rep]))
a("--zestbn", type=str, default=default(['bugfix']), nargs='+',
choices=['bugfix', 'no'], help="How to do zest bn")
# Hyperparameters used for Cifar training
a("--contrast-norm", help="Scale of contrast normalization (0=off)",
type=int, default=default([0]), nargs='+')
a("--top-c", help="Have c at softmax?", action=funcs([to_bool]),
default=default([True]), nargs='+')
a("--whiten-zca", help="Whether to whiten the data with ZCA",
type=int, default=default([0]), nargs='+')
ap = ArgumentParser("Semisupervised experiment")
subparsers = ap.add_subparsers(dest='cmd', help='sub-command help')
# TRAIN
train_cmd = subparsers.add_parser('train', help='Train a new model')
add_train_params(train_cmd, use_defaults=True)
# EVALUATE
load_cmd = subparsers.add_parser('evaluate', help='Evaluate test error')
load_cmd.add_argument('load_from', type=str,
help="Destination to load the state from")
load_cmd.add_argument('--data-type', type=str, default='test',
help="Data set to evaluate on")
args = ap.parse_args()
subp = subprocess.Popen(['git', 'rev-parse', 'HEAD'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = subp.communicate()
args.commit = out.strip()
if err.strip():
logger.error('Subprocess returned %s' % err.strip())
t_start = time.time()
if args.cmd == 'evaluate':
for k, v in vars(args).iteritems():
if type(v) is list:
assert len(v) == 1, "should not be a list when loading: %s" % k
logger.info("%s" % str(v[0]))
vars(args)[k] = v[0]
err = get_error(vars(args))
logger.info('Test error: %f' % err)
elif args.cmd == "train":
listdicts = {k: v for k, v in vars(args).iteritems() if type(v) is list}
therest = {k: v for k, v in vars(args).iteritems() if type(v) is not list}
gen1, gen2 = tee(product(*listdicts.itervalues()))
l = len(list(gen1))
for i, d in enumerate(dict(izip(listdicts, x)) for x in gen2):
if l > 1:
logger.info('Training configuration %d / %d' % (i+1, l))
d.update(therest)
if train(d) is None:
break
logger.info('Took %.1f minutes' % ((time.time() - t_start) / 60.))
|
mit
|
wasade/American-Gut
|
scripts/beta_sample_rarefaction.py
|
5
|
4824
|
#!/usr/bin/env python
# File created on 01 Mar 2012
from __future__ import division
from random import sample
import numpy as np
from matplotlib import use
use('Agg') # noqa
from qiime.util import parse_command_line_parameters, make_option
from qiime.parse import parse_distmat
from numpy import mean, std, inf
from matplotlib.pyplot import (figure, subplot, grid, title, axis, savefig,
ylabel, xlabel)
__author__ = "Antonio Gonzalez Pena"
__copyright__ = "Copyright 2011, The QIIME project"
__credits__ = ["Antonio Gonzalez Pena", "Daniel McDonald"]
__license__ = "GPL"
__version__ = "1.4.0-dev"
__maintainer__ = "Antonio Gonzalez Pena"
__email__ = "[email protected]"
__status__ = "Development"
script_info = {}
script_info['brief_description'] = ""
script_info['script_description'] = ""
script_info['script_usage'] = [("", "", "")]
script_info['output_description'] = ""
script_info['required_options'] = [
make_option('-i', '--input_path', type="existing_filepaths",
help='the input distance matrix file(s)'),
make_option('-l', '--labels', type=str,
help='legend labels for the input files'),
make_option('-t', '--title', type=str,
help='plot title'),
make_option('-y', '--ylabel', type=str,
help='ylabel'),
make_option('-o', '--output_path', type="new_filepath",
help='the output file [default: %default]',
default='plot.pdf'),
make_option('-n', '--iterations', type=int,
help="Number of iterations: %default", default=100),
]
script_info['optional_options'] = [
make_option('--y_max', type='float',
help='max y value [default: %default]',
default=None),
]
script_info['version'] = __version__
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
input_path = opts.input_path
output_path = opts.output_path
iterations = opts.iterations
verbose = opts.verbose
y_max = opts.y_max
labels = opts.labels.split(',')
results = {}
for idx, input_file in enumerate(input_path):
if verbose:
print input_file
# Reading OTU/biom table
samples, distmat = parse_distmat(open(input_file, 'U'))
possible_samples = range(len(distmat[0]))
mask = np.ones(distmat.shape)
n_possible_samples = len(possible_samples)
result_iteration = np.zeros((iterations, n_possible_samples))
for iter_idx, iteration in enumerate(range(iterations)):
iter_vals = np.zeros(n_possible_samples)
for idx, n in enumerate(possible_samples):
if n < 1:
continue
curr_samples = sample(possible_samples, n+1)
# masked arrays are inverted apparently, so 0 means to keep
mask.fill(1)
mask[curr_samples] = 0
mask[:, curr_samples] = 0
np.fill_diagonal(mask, 1)
masked_array = np.ma.array(distmat, mask=mask)
iter_vals[idx] = masked_array.min()
result_iteration[iter_idx] = iter_vals
results[input_file] = [mean(result_iteration, axis=0),
std(result_iteration, axis=0)]
if verbose:
f = open(output_path + '.txt', 'a')
f.write('\t'.join(map(str, results[input_file][0])))
f.write('\n')
f.write('\t'.join(map(str, results[input_file][1])))
f.write('\n')
f.close()
# generating plot, some parts taken from
# http://stackoverflow.com/questions/4700614
figure()
ax = subplot(111)
max_x, max_y = -inf, -inf
for i, (label, input_file) in enumerate(zip(labels, input_path)):
len_x = len(results[input_file][0])
len_y = max(results[input_file][0])
if max_x < len_x:
max_x = len_x
if max_y < len_y:
max_y = len_y
if i % 2 == 0:
coloring = (215/255.0, 48/255.0, 39/255.0)
else:
coloring = (69/255.0, 177/255.0, 180/255.0)
ax.errorbar(range(1, len_x+1), results[input_file][0],
yerr=results[input_file][1], fmt='o', color=coloring,
label=label)
if y_max:
axis([0, max_x, 0, y_max])
else:
axis([0, max_x, 0, max_y])
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# Put a legend to the right of the current axis
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
title(opts.title)
xlabel('Samples')
ylabel(opts.ylabel)
grid(True)
savefig(output_path)
if __name__ == "__main__":
main()
|
bsd-3-clause
|
wzbozon/scikit-learn
|
examples/cluster/plot_agglomerative_clustering.py
|
343
|
2931
|
"""
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
|
bsd-3-clause
|
ishank08/scikit-learn
|
sklearn/manifold/tests/test_locally_linear.py
|
85
|
5600
|
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
eigen_solvers = ['dense', 'arpack']
# ----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
# ----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
# regression test for #6033
def test_integer_input():
rand = np.random.RandomState(0)
X = rand.randint(0, 100, size=(20, 3))
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(method=method, n_neighbors=10)
clf.fit(X) # this previously raised a TypeError
|
bsd-3-clause
|
mbruggmann/luigi
|
examples/pyspark_wc.py
|
21
|
3380
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
from luigi.s3 import S3Target
from luigi.contrib.spark import SparkSubmitTask, PySparkTask
class InlinePySparkWordCount(PySparkTask):
"""
This task runs a :py:class:`luigi.contrib.spark.PySparkTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.PySparkTask.main`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
# py-packages: numpy, pandas
"""
driver_memory = '2g'
executor_memory = '3g'
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
def main(self, sc, *args):
sc.textFile(self.input().path) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(self.output().path)
class PySparkWordCount(SparkSubmitTask):
"""
This task is the same as :py:class:`InlinePySparkWordCount` above but uses
an external python driver file specified in :py:meth:`app`
It runs a :py:class:`luigi.contrib.spark.SparkSubmitTask` task
over the target data in :py:meth:`wordcount.input` (a file in S3) and
writes the result into its :py:meth:`wordcount.output` target (a file in S3).
This class uses :py:meth:`luigi.contrib.spark.SparkSubmitTask.run`.
Example luigi configuration::
[spark]
spark-submit: /usr/local/spark/bin/spark-submit
master: spark://spark.example.org:7077
deploy-mode: client
"""
driver_memory = '2g'
executor_memory = '3g'
total_executor_cores = luigi.IntParameter(default=100, significant=False)
name = "PySpark Word Count"
app = 'wordcount.py'
def app_options(self):
# These are passed to the Spark main args in the defined order.
return [self.input().path, self.output().path]
def input(self):
return S3Target("s3n://bucket.example.org/wordcount.input")
def output(self):
return S3Target('s3n://bucket.example.org/wordcount.output')
'''
// Corresponding example Spark Job, running Word count with Spark's Python API
// This file would have to be saved into wordcount.py
import sys
from pyspark import SparkContext
if __name__ == "__main__":
sc = SparkContext()
sc.textFile(sys.argv[1]) \
.flatMap(lambda line: line.split()) \
.map(lambda word: (word, 1)) \
.reduceByKey(lambda a, b: a + b) \
.saveAsTextFile(sys.argv[2])
'''
|
apache-2.0
|
Samarthjainabout/samarthjainabout.github.io
|
main.py
|
1
|
3628
|
from model import *
from data import *
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from PIL import Image
from resizeimage import resizeimage
from skimage import color
from skimage import io
import cv2
from matplotlib import pyplot as plt
import numpy as np
import glob
from array import array
import statistics
from splitter import *
for filename in glob.glob('data/membrane/train/label/*.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename)
ret,thresh1 = cv2.threshold(im,127,255,cv2.THRESH_BINARY)
cv2.imwrite(filename, thresh1)
for filename in glob.glob('data/membrane/train/image/*.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename,0)
im = cv2.equalizeHist(im)
cv2.imwrite(filename, im)
for filename in glob.glob('data/membrane/test/*.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename,0)
im = cv2.equalizeHist(im)
cv2.imwrite(filename, im)
"""upper is for contrast enhancement of images"""
data_gen_args = dict(rotation_range=0.6,
width_shift_range=0.07,
height_shift_range=0.07,
shear_range=0.09,
zoom_range=0.07,
horizontal_flip=True,
fill_mode='nearest')
target_size=(1024,1024)
myGene = trainGenerator(1,'data/membrane/train','image','label',data_gen_args,save_to_dir = 'data/membrane/train/aug',target_size=target_size)
model = unet()
model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
model.fit_generator(myGene,steps_per_epoch=10000,epochs=4 ,callbacks=[model_checkpoint])
#predict using stored model
model.load_weights("unet_membrane.hdf5")
testGene = testGenerator("data/membrane/test",target_size=target_size)
results = model.predict_generator(testGene,23,verbose=1)
saveResult("data/membrane/test",results)
#black and white all predicted values
for filename in glob.glob('data/membrane/test/*_predict.png'): #assuming gif
#cover.save(filename, im.format)
im = cv2.imread(filename)
ret,thresh1 = cv2.threshold(im,127,255,cv2.THRESH_BINARY)
cv2.imwrite(filename, thresh1)
#measure lenght of path image
path="data/membrane/test/6"
left=array("i")
right=array("i")
image_in=cv2.imread(path+"_predict.png")
image_in=cv2.cvtColor(image_in,cv2.COLOR_BGR2GRAY)
cv2.imshow('image',image_in)
cv2.waitKey(0)
cv2.destroyWindow('image')
for i in range(image_in.shape[0]):
counter=0
counter2=0
for j in range(image_in.shape[1]):
if image_in[i,j] < 100:
if j>(image_in.shape[1])*.5 and j<(image_in.shape[1])*.75:
counter2 += 1#right pillar
elif j<(image_in.shape[1])*.5 and j>(image_in.shape[1])*.25:
counter += 1#left pillar
right.append(counter2)
left.append(counter)
elements = np.array(right)
mean = np.mean(elements, axis=0)
sd = np.std(elements, axis=0)
final_list_right = [x for x in right if (x > mean - 2 * sd)]
final_list_right = [x for x in final_list_right if (x < mean + 2 * sd)]
elements = np.array(left)
mean = np.mean(elements, axis=0)
sd = np.std(elements, axis=0)
final_list_left = [x for x in left if (x > mean - 2 * sd)]
final_list_left = [x for x in final_list_left if (x < mean + 2 * sd)]
#print(final_list_left,final_list_right)
print(np.mean(final_list_left)*.5,np.mean(final_list_right)*.5)
#display visual measurements
disp(path,target_size)
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.