repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
start-jsk/jsk_apc | demos/grasp_fusion/grasp_fusion_lib/image.py | 2 | 12837 | import warnings
import cv2
import matplotlib
import numpy as np
import scipy
import six
import skimage.color
import skimage.transform
import skimage.util
def colorize_depth(depth, min_value=None, max_value=None, dtype=np.uint8):
"""Colorize depth image with JET colormap."""
min_value = np.nanmin(depth) if min_value is None else min_value
max_value = np.nanmax(depth) if max_value is None else max_value
if np.isinf(min_value) or np.isinf(max_value):
warnings.warn('Min or max value for depth colorization is inf.')
if max_value == min_value:
eps = np.finfo(depth.dtype).eps
max_value += eps
min_value -= eps
colorized = depth.copy()
nan_mask = np.isnan(colorized)
colorized[nan_mask] = 0
colorized = 1. * (colorized - min_value) / (max_value - min_value)
colorized = matplotlib.cm.jet(colorized)[:, :, :3]
if dtype == np.uint8:
colorized = (colorized * 255).astype(dtype)
else:
assert np.issubdtype(dtype, np.floating)
colorized = colorized.astype(dtype)
colorized[nan_mask] = (0, 0, 0)
return colorized
def colorize_heatmap(heatmap):
"""Colorize heatmap which ranges 0 to 1.
Parameters
----------
heatmap: numpy.ndarray
Heatmap which ranges 0 to 1.
"""
if not (0 <= heatmap.min() <= 1):
raise ValueError('Heatmap min value must range from 0 to 1')
if not (0 <= heatmap.max() <= 1):
raise ValueError('Heatmap max value must range from 0 to 1')
return colorize_depth(heatmap, min_value=0, max_value=1)
def overlay_color_on_mono(img_color, img_mono, alpha=0.5):
"""Overlay color image on mono.
Parameters
----------
img_color: numpy.ndarray, (H, W, 3)
img_mono: numpy.ndarray, (H, W, 3) or (H, W)
alpha: float
Alpha value for color.
Returns
-------
dst: numpy.ndarray
Output image.
"""
# RGB -> Gray
if img_mono.ndim == 3:
img_mono = skimage.color.rgb2gray(img_mono)
img_mono = skimage.color.gray2rgb(img_mono)
img_mono = skimage.util.img_as_float(img_mono)
img_color = skimage.util.img_as_float(img_color)
dst = alpha * img_color + (1 - alpha) * img_mono
dst = (dst * 255).astype(np.uint8)
return dst
def label_colormap(n_label=256):
"""Colormap for specified number of labels.
Parameters
----------
n_label: int
Number of labels and colors.
"""
def bitget(byteval, idx):
return ((byteval & (1 << idx)) != 0)
cmap = np.zeros((n_label, 3))
for i in six.moves.range(0, n_label):
id = i
r, g, b = 0, 0, 0
for j in six.moves.range(0, 8):
r = np.bitwise_or(r, (bitget(id, 0) << 7 - j))
g = np.bitwise_or(g, (bitget(id, 1) << 7 - j))
b = np.bitwise_or(b, (bitget(id, 2) << 7 - j))
id = (id >> 3)
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
cmap = cmap.astype(np.float32) / 255
return cmap
def centerize(src, shape, margin_color=None, return_mask=False):
"""Centerize image for specified image size
Parameters
----------
src: numpy.ndarray
Image to centerize
shape: tuple of int
Image shape (height, width) or (height, width, channel)
margin_color: numpy.ndarray
Color to be filled in the blank.
return_mask: numpy.ndarray
Mask for centerized image.
"""
if src.shape[:2] == shape[:2]:
if return_mask:
return src, np.ones(shape[:2], dtype=bool)
else:
return src
if len(shape) != src.ndim:
shape = list(shape) + [src.shape[2]]
centerized = np.zeros(shape, dtype=src.dtype)
if margin_color:
centerized[:, :] = margin_color
src_h, src_w = src.shape[:2]
scale_h, scale_w = 1. * shape[0] / src_h, 1. * shape[1] / src_w
scale = min(scale_h, scale_w)
src = cv2.resize(src, None, None, fx=scale, fy=scale)
ph, pw = 0, 0
h, w = src.shape[:2]
dst_h, dst_w = shape[:2]
if h < dst_h:
ph = (dst_h - h) // 2
if w < dst_w:
pw = (dst_w - w) // 2
mask = np.zeros(shape[:2], dtype=bool)
mask[ph:ph + h, pw:pw + w] = True
centerized[ph:ph + h, pw:pw + w] = src
if return_mask:
return centerized, mask
else:
return centerized
def _tile(imgs, shape, dst):
"""Tile images which have same size.
Parameters
----------
imgs: numpy.ndarray
Image list which should be tiled.
shape: tuple of int
Tile shape.
dst:
Image to put the tile on.
"""
y_num, x_num = shape
tile_w = imgs[0].shape[1]
tile_h = imgs[0].shape[0]
if dst is None:
if len(imgs[0].shape) == 3:
dst = np.zeros((tile_h * y_num, tile_w * x_num, 3), dtype=np.uint8)
else:
dst = np.zeros((tile_h * y_num, tile_w * x_num), dtype=np.uint8)
for y in range(y_num):
for x in range(x_num):
i = x + y * x_num
if i < len(imgs):
y1 = y * tile_h
y2 = (y + 1) * tile_h
x1 = x * tile_w
x2 = (x + 1) * tile_w
dst[y1:y2, x1:x2] = imgs[i]
return dst
def _get_tile_shape(num):
import math
x_num = int(math.sqrt(num))
y_num = 0
while x_num * y_num < num:
y_num += 1
return x_num, y_num
def tile(
imgs,
shape=None,
dst=None,
margin_color=None,
boundary=False,
boundary_color=(255, 255, 255),
boundary_thickness=3,
):
"""Tile images which have different size.
Parameters
----------
imgs:
Image list which should be tiled.
shape:
The tile shape.
dst:
Image to put the tile on.
margin_color: numpy.ndarray
Color to be filled in the blank.
"""
imgs = imgs[:]
if shape is None:
shape = _get_tile_shape(len(imgs))
# get max tile size to which each image should be resized
max_h, max_w = np.inf, np.inf
for img in imgs:
max_h = min(max_h, img.shape[0])
max_w = min(max_w, img.shape[1])
# tile images
is_color = False
for i, img in enumerate(imgs):
if img.ndim >= 3:
is_color = True
if is_color and img.ndim == 2:
img = skimage.color.gray2rgb(img)
if is_color and img.shape[2] == 4:
img = img[:, :, :3]
img = skimage.util.img_as_ubyte(img)
img = centerize(img, (max_h, max_w, 3), margin_color)
if boundary:
cv2.rectangle(img, (1, 1), (img.shape[1] - 1, img.shape[0] - 1),
boundary_color, thickness=boundary_thickness)
imgs[i] = img
return _tile(imgs, shape, dst)
def get_text_color(color):
if color[0] * 0.299 + color[1] * 0.587 + color[2] * 0.114 > 170:
return (0, 0, 0)
return (255, 255, 255)
def label2rgb(lbl, img=None, label_names=None, n_labels=None,
alpha=0.5, thresh_suppress=0):
if label_names is None:
if n_labels is None:
n_labels = lbl.max() + 1 # +1 for bg_label 0
else:
if n_labels is None:
n_labels = len(label_names)
else:
assert n_labels == len(label_names)
cmap = label_colormap(n_labels)
cmap = (cmap * 255).astype(np.uint8)
lbl_viz = cmap[lbl]
if img is not None:
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img_gray = cv2.cvtColor(img_gray, cv2.COLOR_GRAY2RGB)
lbl_viz = alpha * lbl_viz + (1 - alpha) * img_gray
lbl_viz = lbl_viz.astype(np.uint8)
np.random.seed(1234)
mask_unlabeled = lbl == -1
lbl_viz[mask_unlabeled] = \
np.random.random(size=(mask_unlabeled.sum(), 3)) * 255
if label_names is None:
return lbl_viz
for label in np.unique(lbl):
if label == -1:
continue # unlabeled
mask = lbl == label
if 1. * mask.sum() / mask.size < thresh_suppress:
continue
mask = (mask * 255).astype(np.uint8)
y, x = scipy.ndimage.center_of_mass(mask)
y, x = map(int, [y, x])
if lbl[y, x] != label:
Y, X = np.where(mask)
point_index = np.random.randint(0, len(Y))
y, x = Y[point_index], X[point_index]
text = label_names[label]
font_face = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.7
thickness = 2
text_size, baseline = cv2.getTextSize(
text, font_face, font_scale, thickness)
color = get_text_color(lbl_viz[y, x])
cv2.putText(lbl_viz, text,
(x - text_size[0] // 2, y),
font_face, font_scale, color, thickness)
return lbl_viz
def mask_to_bbox(mask):
"""Convert binary mask image to bounding box.
Parameters
----------
mask: numpy.ndarray, (H, W), bool
Boolean mask.
Returns
-------
bbox: tuple of int, (4,)
x1, y1, x2, y2.
"""
warnings.warn(
'mask_to_bbox is deprecated. Use masks_to_bbox '
'which returns array of (y1, x1, y2, x2).'
)
assert mask.dtype == bool
where = np.argwhere(mask)
(y1, x1), (y2, x2) = where.min(0), where.max(0) + 1
return x1, y1, x2, y2
def masks_to_bboxes(masks):
"""Convert binary mask image to bounding box.
Parameters
----------
masks: numpy.ndarray, (N, H, W), bool
Boolean masks.
Returns
-------
bboxes: tuple of int, (N, 4)
Each bbox represents (y1, x1, y2, x2).
"""
bboxes = np.zeros((len(masks), 4), dtype=np.int32)
for i, mask in enumerate(masks):
assert mask.dtype == bool
where = np.argwhere(mask)
(y1, x1), (y2, x2) = where.min(0), where.max(0) + 1
bboxes[i] = (y1, x1, y2, x2)
return bboxes
def mask_to_lbl(mask, label):
"""Convert mask to label image."""
lbl = np.empty(mask.shape, dtype=np.int32)
lbl[mask] = label
lbl[~mask] = -1
return lbl
def resize(
image,
height=None,
width=None,
fy=None,
fx=None,
size=None,
interpolation=cv2.INTER_LINEAR,
):
"""Resize image with cv2 resize function.
Parameters
----------
image: numpy.ndarray
Source image.
height, width: None or int
Target height or width.
fy, fx: None or float
Target height or width scale.
size: None or int or float
Target image size.
interpolation: int
Interpolation flag. (default: cv2.INTER_LINEAR == 1)
"""
hw_ratio = 1. * image.shape[0] / image.shape[1] # h / w
if height is not None or width is not None:
if height is None:
height = int(round(hw_ratio * width))
elif width is None:
width = int(round(1 / hw_ratio * height))
assert fy is None
assert fx is None
assert size is None
return cv2.resize(image, (width, height), interpolation=interpolation)
elif fy is not None or fx is not None:
if fy is None:
fy = fx
elif fx is None:
fx = fy
assert height is None
assert width is None
assert size is None
elif size is not None:
assert height is None
assert width is None
assert fy is None
assert fx is None
fx = fy = np.sqrt(1. * size / (image.shape[0] * image.shape[1]))
else:
raise ValueError
return cv2.resize(
image, None, None, fx=fx, fy=fy, interpolation=interpolation)
def resize_mask(mask, *args, **kwargs):
"""Resize mask in float space.
Parameters
----------
mask: numpy.ndarray
Source mask whose size must be (H, W) and has bool dtype.
See grasp_fusion_lib.image.resize for other parameters.
"""
assert mask.dtype == bool
assert mask.ndim == 2
mask = mask.astype(float)
mask = resize(mask, *args, **kwargs)
mask = mask > 0.5
return mask
def resize_lbl(lbl, *args, **kwargs):
"""Resize lbl in channel space.
Parameters
----------
lbl: numpy.ndarray
Source mask whose size must be (H, W) and has int32 dtype.
See grasp_fusion_lib.image.resize for other parameters.
"""
assert lbl.dtype == np.int32
assert lbl.ndim == 2
# [label -> onehot] -> [resize] -> [onehot -> label]
min_value = lbl.min()
lbl -= min_value # shift to make the min_value to be 0
lbl_score = (np.arange(lbl.max() + 1) == lbl[..., None]).astype(np.float32)
lbl_score = resize(lbl_score, *args, **kwargs)
lbl_score = np.atleast_3d(lbl_score)
lbl = np.argmax(lbl_score, axis=2)
lbl = lbl.astype(np.int32)
lbl += min_value # restore the min_value
return lbl
| bsd-3-clause |
bthirion/scikit-learn | examples/linear_model/plot_sparse_recovery.py | 11 | 7453 | """
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/sites/default/files/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.exceptions import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
linalg.pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/pandas/tests/indexes/test_frozen.py | 18 | 2435 | import numpy as np
from pandas.util import testing as tm
from pandas.tests.test_base import CheckImmutable, CheckStringMixin
from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
from pandas.compat import u
class TestFrozenList(CheckImmutable, CheckStringMixin):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, method):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, method):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
assert not isinstance(self.container.view(np.ndarray), FrozenNDArray)
assert self.container.view() is not self.container
tm.assert_numpy_array_equal(self.container, original)
# Shallow copy should be the same too
assert isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container):
container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
tm.assert_numpy_array_equal(original, vals)
assert original is not vals
vals[0] = n
assert isinstance(self.container, FrozenNDArray)
tm.assert_numpy_array_equal(self.container.values(), original)
assert vals[0] == n
| mit |
andnovar/ggplot | ggplot/stats/stat_density.py | 12 | 1690 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from scipy.stats import gaussian_kde
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
# TODO: switch to statsmodels kdes
class stat_density(stat):
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'density', 'position': 'stack',
'kernel': 'gaussian', 'adjust': 1, 'trim': False}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
try:
float(x.iloc[0])
except:
try:
# try to use it as a pandas.tslib.Timestamp
x = [ts.toordinal() for ts in x]
except:
raise GgplotError("stat_density(): aesthetic x mapping " +
"needs to be convertable to float!")
# TODO: Implement weight
try:
weight = data.pop('weight')
except KeyError:
weight = np.ones(len(x))
# TODO: Get "full" range of densities
# i.e tail off to zero like ggplot2? But there is nothing
# wrong with the current state.
kde = gaussian_kde(x)
bottom = np.min(x)
top = np.max(x)
step = (top - bottom) / 1000.0
x = np.arange(bottom, top, step)
y = kde.evaluate(x)
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| bsd-2-clause |
xyjin/Program_trade_system | simulation.py | 1 | 17914 | #/usr/bin/env python
# vim: sw=4: et
LICENSE="""
Copyright (C) 2011 Michael Ihde
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import imp
import time
import logging
import datetime
import numpy
import os
import sys
import tables
import math
import yaml
#from config import CONFIG
import config
from yahoo import Market
from utils.progress_bar import ProgressBar
from utils.model import *
from utils.market import *
from utils.date import ONE_DAY
from pycommando.commando import command
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.ticker as ticker
import matplotlib.dates as dates
import portfolio
import report
import plots
import threading
import threadPool
def initialize_position(CONFIG, portfolio, date):
p = CONFIG['portfolios'][portfolio]
if not type(date) == datetime.datetime:
date = datetime.datetime.strptime(date, "%Y-%m-%d")
# Turn the initial cash value into shares based off the portfolio percentage
position = {'$': 0.0}
market = Market()
for instrument, amt in p.items():
instrument = instrument.strip()
if type(amt) == str:
amt = amt.strip()
if instrument == "$":
position[instrument] += float(amt)
else:
d = date
quote = market[instrument][d]
while quote == None:
# Walk backwards looking for a day that had a close price, but not too far
# because the given instrument may not exist at any time for the given
# date or prior to it
d = d - ONE_DAY
if (date - d) > datetime.timedelta(days=7):
break
quote = market[instrument][d]
if quote == None:
# This occurs it the instrument does not exist in the market
# at the start of the simulation period
position[instrument] = Position(0.0, 0.0)
if type(amt) == str and amt.startswith('$'):
amt = float(amt[1:])
position['$'] += amt
else:
print "Warning. Non-cash value used for instrument that is not available at start of simulation period"
else:
price = quote.adjclose
if type(amt) == str and amt.startswith('$'):
amt = float(amt[1:])
amt = math.floor(amt / price)
position[instrument] = Position(float(amt), price)
return position
def write_position(MARKET, table, position, date):
for instrument, p in position.items():
table.row['date'] = date.date().toordinal()
table.row['date_str'] = str(date.date())
table.row['symbol'] = instrument
if instrument == '$':
table.row['amount'] = 0
table.row['value'] = p
else:
table.row['amount'] = p.amount
table.row['basis'] = p.basis
quote = MARKET[instrument][date]
#price = MARKET[instrument][date].adjclose
if quote:
price = MARKET[instrument][date].adjclose
table.row['value'] = price
else:
table.row['value'] = 0.0
table.row.append()
def write_performance(MARKET, table, position, date):
value = 0.0
for instrument, p in position.items():
if instrument == '$':
value += p
else:
quote = MARKET[instrument][date]
if quote:
price = MARKET[instrument][date].adjclose
value += (price * p.amount)
table.row['date'] = date.date().toordinal()
table.row['date_str'] = str(date.date())
table.row['value'] = value
table.row.append()
def execute_orders(MARKET, table, position, date, orders):
for order in orders:
logging.debug("Executing order %s", order)
if position.has_key(order.symbol):
ticker = MARKET[order.symbol]
if order.order == Order.SELL:
if order.price_type == Order.MARKET_PRICE:
strike_price = ticker[date].adjopen
elif order.price_type == Order.MARKET_ON_CLOSE:
strike_price = ticker[date].adjclose
else:
raise StandardError, "Unsupport price type"
qty = None
if order.quantity == "ALL":
qty = position[order.symbol].amount
else:
qty = order.quantity
print "actually sell: %d" % qty
if qty > position[order.symbol] or qty < 1:
logging.warn("Ignoring invalid order %s. Invalid quantity", order)
continue
price_paid = 0.0
table.row['date'] = date.date().toordinal()
table.row['date_str'] = str(date.date())
table.row['order_type'] = order.order
table.row['symbol'] = order.symbol
table.row['order'] = str(order)
table.row['executed_quantity'] = qty
table.row['executed_price'] = strike_price
table.row['basis'] = position[order.symbol].basis
table.row.append()
position[order.symbol].remove(qty, strike_price)
position['$'] += (qty * strike_price)
position['$'] -= 9.99 # TODO make trading cost configurable
elif order.order == Order.BUY:
if order.price_type == Order.MARKET_PRICE:
strike_price = ticker[date].adjopen
elif order.price_type == Order.MARKET_ON_CLOSE:
strike_price = ticker[date].adjclose
if type(order.quantity) == str and order.quantity[0] == "$":
qty = (int(float(order.quantity[1:]) / strike_price)/100)*100
else:
qty = int(order.quantity)
table.row['date'] = date.date().toordinal()
table.row['date_str'] = str(date.date())
table.row['order_type'] = order.order
table.row['symbol'] = order.symbol
table.row['order'] = str(order)
table.row['executed_quantity'] = qty
table.row['executed_price'] = strike_price
table.row['basis'] = 0.0
table.row.append()
position[order.symbol].add(qty, strike_price)
position['$'] -= (qty * strike_price)
position['$'] -= 9.99
def load_strategy(name):
mydir = os.path.abspath(os.path.dirname(sys.argv[0]))
strategydir = os.path.join(mydir, "strategies")
sys.path.insert(0, strategydir)
if name in sys.modules.keys():
reload(sys.modules[name])
else:
__import__(name)
#print sys.modules[name]
clazz = getattr(sys.modules[name], "CLAZZ")
sys.path.pop(0)
return clazz
@command("analyze")
def analyze(strategy_name, portfolio, strategy_params="{}"):
"""Using a given strategy and portfolio, make a trading decision"""
now = datetime.datetime.today()
position = initialize_position(portfolio, now)
# Initialize the strategy
params = yaml.load(strategy_params)
strategy_clazz = load_strategy(strategy_name)
strategy = strategy_clazz(now, now, position, MARKET, params)
orders = strategy.evaluate(now, position, MARKET)
for order in orders:
print order
@command("simulate")
def simulate(MARKET, CONFIG, strategy_name, portfolio, start_date, end_date, output="~/.quant/simulation.h5", strategy_params="{}"):
"""A simple simulator that simulates a strategy that only makes
decisions at closing. Only BUY and SELL orders are supported. Orders
are only good for the next day.
A price type of MARKET is executed at the open price the next day.
A price type of MARKET_ON_CLOSE is executed at the close price the next day.
A price type of LIMIT will be executed at the LIMIT price the next day if LIMIT
is between the low and high prices of the day.
A price type of STOP will be executed at the STOP price the next day if STOP
is between the low and high prices of the day.
A price type of STOP_LIMIT will be executed at the LIMIT price the next day if STOP
is between the low and high prices of the day.
"""
outputFile = openOutputFile(output)
# Get some of the tables from the output file
order_tbl = outputFile.getNode("/Orders")
postion_tbl = outputFile.getNode("/Position")
performance_tbl = outputFile.getNode("/Performance")
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
# Start the simulation at closing of the previous trading day
print start_date
now = getPrevTradingDay(MARKET, start_date)
try:
position = initialize_position(CONFIG, portfolio, now)
for instrument, p in position.items():
if instrument != '$':
quote = MARKET[instrument][now]
if quote == None:
return
# Pre-cache some info to make the simulation faster
'''
ticker = MARKET["399001.sz"].updateHistory(start_date, end_date)
for symbol in position.keys():
if symbol != '$':
MARKET[symbol].updateHistory(start=start_date, end=end_date)
'''
days = (end_date - start_date).days
# Initialize the strategy
params = yaml.load(strategy_params)
imp.acquire_lock()
strategy_clazz = load_strategy(strategy_name)
imp.release_lock()
print 'jinxiaoyi'
strategy = strategy_clazz(start_date, end_date, position, MARKET, params, outputFile)
p = ProgressBar(maxValue=days, totalWidth=80)
print "Starting Simulation %s" % portfolio
# Write the initial position to the database
write_position(MARKET,postion_tbl, position, now)
write_performance(MARKET,performance_tbl, position, now)
while now <= end_date:
#print now
# Remember 'now' is after closing, so the strategy
# can use any information from 'now' or earlier
#orders = strategy.evaluate(now, position, MARKET)
# Go to the next day to evalute the orders
while 1:
orders = strategy.evaluate(now, position, MARKET)
if orders == 'outdate':
outputFile.close()
return
if orders == None:
now += ONE_DAY
p.performWork(1)
else:
break
# Execute orders
execute_orders(MARKET, order_tbl, position, now, orders)
write_position(MARKET, postion_tbl, position, now)
write_performance(MARKET, performance_tbl, position, now)
now += ONE_DAY
# Flush the data to disk
outputFile.flush()
p.performWork(1)
#print p, '\r'
p.updateAmount(p.max)
#print p, '\r',
#print '\r\n' # End the progress bar here before calling finalize
orders = strategy.finalize()
finally:
outputFile.close()
def createHighLow(index):
MARKET = Market()
CONFIG = config._Config(index)
portfolio.delete(CONFIG, index)
portfolio.create(CONFIG, index, 10000, '{%s: $0}' % index)
file_name = './quant/highLow/peak%s.h5' % index.split('.')[0]
simulate(MARKET, CONFIG, 'highlow', index, '2014-01-06', '2014-10-21', file_name, '{short: 18, long: 47, during: 2, win: 0.07, loss: 0.03}')
return index
if __name__ == '__main__':
print datetime.date.today()
#logging.basicConfig(level=logging.ERROR)
'''
china = Market()
china.updateHistory()
'''
#china = Market()
#symbols = china.cache.symbols()
#createHighLow('002391.sz')
#file_name = './quant/highLow/peak002391.h5'
#plots.plot_indicators('002391.sz', 'all', file_name)
symbols = ['002391.sz', '000002.sz', '000004.sz', '600475.ss', '000584.sz', '600720.ss', '002020.sz', '300039.sz', '600468.ss', '300096.sz']
wm = threadPool.WorkerManager(4)
for index in symbols:
wm.add_job(createHighLow, index)
wm.wait_for_complete()
'''
threads = []
count = 0
china = Market()
#createHighLow('000673.sz')
#china.fetchHistory()
symbols = china.cache.symbols()
for index in symbols:
createHighLow(index)
'''
'''
count += 1
t = threading.Thread(target=createHighLow, args=(index,))
threads.append(t)
if count/10 == 0:
for t in threads:
t.start()
for t in threads:
t.join()
threads = []
'''
'''
for i in range(30,50):
portfolio.delete('300172.sz')
portfolio.create('300172.sz', 10000, '{300172.sz: $0}')
file_name = './quant/expma%d.h5' % i
simulate( 'trending_with_ema', '300172.sz', '2014-01-01', '2014-07-01', file_name, '{long: %d, short: 18}' % i)
'''
'''
for i in range(30,50):
file_name = './quant/expma%d.h5' % i
report1 = report.calculate_performance(file_name)
print "EMA: %d" % i
print "Starting Value: $%(starting_value)0.2f" % report1
print "Ending Value: $%(ending_value)0.2f" % report1
print "Return: $%(equity_return)0.2f (%(equity_percent)3.2f%%)" % report1
'''
'''
for i in range(30,50):
file_name = './quant/expma%d.h5' % i
plots.plot_indicators('300172.sz', 'all', file_name)
'''
#index = '300206.sz'
'''
threads = []
indexs = ['000001.sz', '000002.sz', '000004.sz', '000005.sz', '000006.sz', '000008.sz', '000009.sz', '000010.sz', '000011.sz', '000012.sz', '000014.sz', '000016.sz', '000017.sz', '000018.sz', '000019.sz', '000020.sz', '000021.sz', '000022.sz', '000023.sz', '000024.sz', '000025.sz', '000026.sz', '000027.sz', '000028.sz', '000029.sz', '000030.sz', '000031.sz', '000032.sz', '000033.sz', '000034.sz', '000035.sz', '000036.sz', '000037.sz', '000038.sz', '000039.sz', '000040.sz']
for index in indexs:
portfolio.delete(index)
portfolio.create(index, 10000, '{%s: $0}' % index)
file_name = './quant/two_red%s.h5' % index.split('.')[0]
simulate('two_red', index, '2014-01-01', '2014-09-30', file_name, '{short: 18, long: 47, during: 2, win: 0.07, loss: 0.03}')
'''
'''
indexs = ['000001.sz', '000002.sz', '000004.sz', '000005.sz', '000006.sz', '000008.sz', '000009.sz', '000010.sz', '000011.sz', '000012.sz', '000014.sz', '000016.sz', '000017.sz']
for index in indexs:
print "index: %s" % index
file_name = './quant/two_red%s.h5' % index.split('.')[0]
report1 = report.calculate_performance(file_name)
print "Starting Value: $%(starting_value)0.2f" % report1
print "Ending Value: $%(ending_value)0.2f" % report1
print "Return: $%(equity_return)0.2f (%(equity_percent)3.2f%%)" % report1
'''
#plots.plot('./quant/two_red.h5')
#plots.plot_indicators(index, 'all', './quant/two_red.h5')
#plots.show()
#report.report_performance('./quant/trending.h5')
#plots.plot_indicators('300172.sz', 'all', './quant/expma47.h5')
#plots.plot('./quant/expma.h5')
#plots.show()
'''
index = "300065.sz"
portfolio.delete(index)
portfolio.create(index, 10000, '{%s: $0}' % index)
file_name = './quant/peak%s.h5' % index.split('.')[0]
simulate('highlow', index, '2013-01-05', '2014-09-30', file_name, '{short: 18, long: 47, during: 2, win: 0.07, loss: 0.03}')
#report.report_performance(file_name)
plots.plot_indicators(index, 'all', file_name)
#plots.plot(file_name)
plots.show()
'''
'''
index = "600030.ss"
portfolio.delete(index)
portfolio.create(index, 10000, '{%s: $0}' % index)
file_name = './quant/ma%s.h5' % index.split('.')[0]
simulate('averageSystem', index, '2014-01-05', '2014-09-30', file_name, '{short: 18, long: 47, period: 60, win: 0.07, loss: 0.03}')
report.report_performance(file_name)
plots.plot_indicators(index, 'all', file_name)
plots.plot(file_name)
plots.show()
'''
'''
index = "600030.ss"
file_name = './quant/peak%s.h5' % index.split('.')[0]
plots.plot_indicators(index, 'all', file_name)
plots.show()
inputFile = tables.openFile(os.path.expanduser(file_name), "r")
print inputFile.list_nodes("/Indicators/" + index, classname="Table")
try:
for tbl in inputFile.iterNodes("/Indicators/" + index, classname="Table"):
if tbl.name == 'peak':
x_data = tbl.col('date')
y_data = tbl.col('value')
z_data = tbl.col('flag')
print x_data
print y_data
print z_data
finally:
inputFile.close()
print min(y_data)
print max(y_data)
'''
| gpl-2.0 |
yanfriend/pandas | sp500_reg.py | 1 | 1705 | import datetime
import pandas as pd
import pandas.io.data
from pandas import DataFrame
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
interested_range = -500
# style.use('ggplot')
sp = pd.read_csv('sp500.csv', index_col='Date', parse_dates=True)
sp_close = sp['Adj Close']
sp_close = sp_close[interested_range:]
sp_close = np.log(sp_close / sp_close.shift(1))
print sp_close.tail()
#ax1 = plt.subplot(3,1,1)
#ax1.yaxis.tick_right()
#ax1.plot(sp_close, label='sp500')
#plt.legend(loc='upper left')
vix = pd.read_csv('vix.csv', index_col='Date', parse_dates=True)
vix_close = vix['Adj Close']
vix_close = vix_close[interested_range:]
vix_close = np.log(vix_close / vix_close.shift(1)) # log, normalized
print vix_close.tail()
# finish reading data so far.
xdat = vix_close.shift(20) # shift forward, so that today's sp close corresponds to x day earlier vix data.
# 5, 50 -> positive, 15 is best one. 20 is negative, but generally, useless.
print xdat
ydat = sp_close
model = pd.ols(y=ydat, x=xdat)
print model.beta
plt.plot(xdat, ydat, 'r.')
ax = plt.axis()
x = np.linspace(ax[0], ax[1]+0.01)
plt.plot(x, model.beta[1] + model.beta[0]*x, 'b', lw=2)
plt.axis('tight')
plt.show()
# todo 2, use log, normalized
#ax2 = plt.subplot(3,1,2)
#ax2.yaxis.tick_right()
#ax2.plot(vix_close, label='vix')
#plt.legend(loc='upper left')
#pcr = pd.read_csv('equitypc.csv', index_col='DATE', parse_dates=True, skiprows=2)
#pcr = pcr['P/C Ratio']
#print pcr.tail(13)
#ma = pd.rolling_mean(pcr, 13)
#ma = ma[interested_range:]
#ax3 = plt.subplot(3,1,3, sharex=ax1)
#ax3.yaxis.tick_right()
#ax3.plot(ma, label='pcr ma')
#plt.legend(loc='upper left')
#plt.show()
| bsd-2-clause |
krafczyk/spack | var/spack/repos/builtin/packages/geopm/package.py | 2 | 4734 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Geopm(AutotoolsPackage):
"""GEOPM is an extensible power management framework targeting HPC.
The GEOPM package provides libgeopm, libgeopmpolicy and applications
geopmctl and geopmpolicy, as well as tools for postprocessing.
GEOPM is designed to be extended for new control algorithms and new
hardware power management features via its plugin infrastructure.
Note: GEOPM interfaces with hardware using Model Specific Registers (MSRs).
For propper usage make sure MSRs are made available directly or via the
msr-safe kernel module by your administrator."""
homepage = "https://geopm.github.io"
url = "https://github.com/geopm/geopm/releases/download/v0.4.0/geopm-0.4.0.tar.gz"
git = "https://github.com/geopm/geopm.git"
# Add additional proper versions and checksums here. "spack checksum geopm"
version('develop', branch='dev')
version('master', branch='master')
version('0.5.0', '61b454bc74d4606fe84818aef16c1be4')
version('0.4.0', 'd4cc8fffe521296dab379857d7e2064d')
version('0.3.0', '568fd37234396fff134f8d57b60f2b83')
# Variants reflecting most ./configure --help options
variant('debug', default=False, description='Enable debug.')
variant('coverage', default=False, description='Enable test coverage support, enables debug by default.')
variant('overhead', default=False, description='Enable GEOPM to calculate and display time spent in GEOPM API calls.')
variant('procfs', default=True, description='Enable procfs (disable for OSes not using procfs).')
variant('mpi', default=True, description='Enable MPI dependent components.')
variant('fortran', default=True, description='Build fortran interface.')
variant('doc', default=True, description='Create man pages with ruby-ronn.')
variant('openmp', default=True, description='Build with OpenMP.')
variant('ompt', default=False, description='Use OpenMP Tools Interface.')
variant('hwloc', default=True, description='Build with hwloc.')
variant('gnu-ld', default=False, description='Assume C compiler uses gnu-ld.')
# Added dependencies.
depends_on('m4', type='build')
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('ruby-ronn', type='build', when='+doc')
depends_on('doxygen', type='build', when='+doc')
depends_on('numactl')
depends_on('mpi', when='+mpi')
# TODO: check if hwloc@specific-version still required with future openmpi
depends_on('[email protected]', when='+hwloc')
depends_on('json-c')
depends_on('py-pandas', type='run')
depends_on('py-numpy', type='run')
depends_on('py-natsort', type='run')
depends_on('py-matplotlib', type='run')
parallel = False
def configure_args(self):
args = []
args.extend(self.enable_or_disable('debug'))
args.extend(self.enable_or_disable('coverage'))
args.extend(self.enable_or_disable('overhead'))
args.extend(self.enable_or_disable('procfs'))
args.extend(self.enable_or_disable('mpi'))
args.extend(self.enable_or_disable('fortran'))
args.extend(self.enable_or_disable('doc'))
args.extend(self.enable_or_disable('openmp'))
args.extend(self.enable_or_disable('ompt'))
args.extend(self.with_or_without('hwloc', activation_value='prefix'))
args.extend(self.with_or_without('gnu-ld'))
return args
| lgpl-2.1 |
Jimmy-Morzaria/scikit-learn | benchmarks/bench_sparsify.py | 323 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
HolgerPeters/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 39 | 5425 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
fps7806/Graph-CNN | src/graphcnn/experiment_imagenet.py | 1 | 4988 | from graphcnn.helper import *
from graphcnn.network import *
from graphcnn.layers import *
from sklearn.model_selection import KFold
import numpy as np
import tensorflow as tf
import glob
import time
from graphcnn.experiment import GraphCNNExperiment
from tensorflow.python.training import queue_runner
# This function is used to create tf.cond compatible tf.train.batch alternative
def _make_batch_queue(input, capacity, num_threads=1):
queue = tf.PaddingFIFOQueue(capacity=capacity, dtypes=[s.dtype for s in input], shapes=[s.get_shape() for s in input])
tf.summary.scalar("fraction_of_%d_full" % capacity,
tf.cast(queue.size(), tf.float32) *
(1. / capacity))
enqueue_ops = [queue.enqueue(input)]*num_threads
queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, enqueue_ops))
return queue
# This class is responsible for setting up and running experiments
# Also provides helper functions related to experiments (e.g. get accuracy)
class GraphCNNImageNetExperiment(GraphCNNExperiment):
def __init__(self, dataset_name, model_name, net_constructor):
GraphCNNExperiment.__init__(self, dataset_name, model_name, net_constructor)
self.number_of_classes = 1000
self.image_resize_width = 256
self.image_resize_height = 256
self.image_width = 227
self.image_height = 227
# Create input_producers and batch queues
def create_data(self):
with tf.device("/cpu:0"):
with tf.variable_scope('input') as scope:
# Create the training queue
with tf.variable_scope('train_data') as scope:
self.print_ext('Creating training Tensorflow Tensors')
filenames = []
labels = []
with open(self.train_list_file) as file:
for line in file:
key, value = line[:-1].split()
value = int(value)
if value < self.number_of_classes:
labels.append(value)
filenames.append(key)
training_samples = [np.array(filenames), np.array(labels).astype(np.int64)]
training_samples = self.create_input_variable(training_samples)
single_sample = tf.train.slice_input_producer(training_samples, shuffle=True, capacity=2048)
single_sample[0] = tf.image.decode_jpeg(tf.read_file(single_sample[0]), channels=3)
single_sample[0] = tf.random_crop(tf.image.resize_images(single_sample[0], [self.image_resize_width, self.image_resize_height]), [self.image_width, self.image_height, 3])
single_sample[0] = tf.image.random_flip_left_right(single_sample[0])
single_sample[0] = tf.cast(single_sample[0], dtype=tf.float32)/255
train_queue = _make_batch_queue(single_sample, capacity=self.train_batch_size*2, num_threads=8)
# Create the test queue
with tf.variable_scope('test_data') as scope:
self.print_ext('Creating test Tensorflow Tensors')
filenames = []
labels = []
with open(self.val_list_file) as file:
for line in file:
key, value = line[:-1].split()
value = int(value)
if value < self.number_of_classes:
labels.append(value)
filenames.append(key)
test_samples = [np.array(filenames), np.array(labels).astype(np.int64)]
test_samples = self.create_input_variable(test_samples)
single_sample = tf.train.slice_input_producer(test_samples, shuffle=True, capacity=128)
single_sample[0] = tf.image.decode_jpeg(tf.read_file(single_sample[0]), channels=3)
single_sample[0] = tf.image.resize_image_with_crop_or_pad(tf.image.resize_images(single_sample[0], [self.image_resize_width, self.image_resize_height]), self.image_width, self.image_height)
single_sample[0].set_shape([self.image_width, self.image_height, 3])
single_sample[0] = tf.cast(single_sample[0], dtype=tf.float32)/255
test_queue = _make_batch_queue(single_sample, capacity=self.test_batch_size*2, num_threads=1)
result = tf.cond(self.net.is_training, lambda: train_queue.dequeue_many(self.train_batch_size), lambda: test_queue.dequeue_many(self.test_batch_size))
# Have to add placeholder for A and mask
result = [result[0], None, result[1], None]
return result | mit |
georgetown-analytics/skidmarks | bin/distributionplt.py | 1 | 1693 | # -*- coding: utf-8 -*-
###############################################################################
# Information
###############################################################################
# Created by Linwood Creekmore
# Skeleton from http://stackoverflow.com/questions/20011494/plot-normal-distribution-with-matplotlib
# In partial fulfillment of the requirements for the Georgetown University Data Analytics Graduate Certificate Program
# April 29, 2015
# https://plus.google.com/+LinwoodCreekmoreIII/
###############################################################################
# Imports
###############################################################################
import pandas as pd
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import os
def DisPlt(driver,trip):
path = os.path.abspath(os.getcwd())
pathtocsv = os.path.normpath(os.path.join(path,"output","trip",str(driver)+"_"+str(trip)+".csv"))
df = pd.read_csv(pathtocsv)
investigation = str(raw_input("Enter a variable \n>"))
h = sorted([df[investigation]]) #sorted
fit = stats.norm.pdf(h, np.mean(h), np.std(h)) #this is a fitting indeed
plt.plot(h,fit,'-o')
plt.hist(h,normed=True) #use this to draw histogram of your data
plt.show() #use may also need add this
###############################################################################
# 'Main' Function
###############################################################################
if __name__ == '__main__':
driver = raw_input('Pick a driver. Enter a number between 1-3612:\n')
trip = raw_input('Pick a trip. Enter a number between 1-200:\n')
DisPlt(driver,trip) | mit |
gdsfactory/gdsfactory | docs/conf.py | 1 | 1160 | from recommonmark.transform import AutoStructify
project = "gdsfactory"
version = "2.6.1"
copyright = "2019, PsiQ"
author = "PsiQ"
master_doc = "index"
html_theme = "sphinx_rtd_theme"
source_suffix = {
".rst": "restructuredtext",
".txt": "markdown",
".md": "markdown",
}
html_static_path = ["_static"]
htmlhelp_basename = project
extensions = [
"nbsphinx",
"sphinx.ext.autodoc",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"matplotlib.sphinxext.plot_directive",
"sphinx_markdown_tables",
"sphinx.ext.doctest",
"recommonmark",
"sphinx_autodoc_typehints",
]
autodoc_member_order = "bysource"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
exclude_patterns = [
"_build",
"Thumbs.db",
".DS_Store",
"**.ipynb_checkpoints",
"build",
"extra/**",
]
napoleon_use_param = True
def setup(app):
app.add_config_value(
"recommonmark_config",
{"auto_toc_tree_section": "Contents", "enable_eval_rst": True},
True,
)
app.add_transform(AutoStructify)
| mit |
leggitta/mne-python | examples/visualization/plot_clickable_image.py | 6 | 2324 | """
================================================================
Demonstration of how to use ClickableImage / generate_2d_layout.
================================================================
In this example, we open an image file, then use ClickableImage to
return 2D locations of mouse clicks (or load a file already created).
Then, we use generate_2d_layout to turn those xy positions into a layout
for use with plotting topo maps. In this way, you can take arbitrary xy
positions and turn them into a plottable layout.
"""
# Authors: Christopher Holdgraf <[email protected]>
#
# License: BSD (3-clause)
from scipy.ndimage import imread
import numpy as np
from matplotlib import pyplot as plt
from os import path as op
import mne
from mne.viz import ClickableImage, add_background_image # noqa
from mne.channels import generate_2d_layout # noqa
print(__doc__)
# Set parameters and paths
plt.rcParams['image.cmap'] = 'gray'
im_path = op.join(op.dirname(mne.__file__), 'data', 'image', 'mni_brain.gif')
# We've already clicked and exported
layout_path = op.join(op.dirname(mne.__file__), 'data', 'image')
layout_name = 'custom_layout.lout'
###############################################################################
# Load data and click
im = imread(im_path)
plt.imshow(im)
"""
This code opens the image so you can click on it. Commented out
because we've stored the clicks as a layout file already.
# The click coordinates are stored as a list of tuples
click = ClickableImage(im)
click.plot_clicks()
coords = click.coords
# Generate a layout from our clicks and normalize by the image
lt = generate_2d_layout(np.vstack(coords), bg_image=im)
lt.save(layout_path + layout_name) # To save if we want
"""
# We've already got the layout, load it
lt = mne.channels.read_layout(layout_name, path=layout_path, scale=False)
# Create some fake data
nchans = len(lt.pos)
nepochs = 50
sr = 1000
nsec = 5
events = np.arange(nepochs).reshape([-1, 1])
events = np.hstack([events, np.zeros([nepochs, 2])])
data = np.random.randn(nepochs, nchans, sr * nsec)
info = mne.create_info(nchans, sr, ch_types='eeg')
epochs = mne.EpochsArray(data, info, events)
evoked = epochs.average()
# Using the native plot_topo function with the image plotted in the background
f = evoked.plot_topo(layout=lt, fig_background=im)
| bsd-3-clause |
redroy44/parking-project | analysis.py | 1 | 3614 | # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib import ticker
import seaborn as sns
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.callbacks import ModelCheckpoint, Callback, EarlyStopping
# dimensions of our images.
img_width, img_height = 50, 50
train_data_dir = 'data/symlinks/train'
validation_data_dir = 'data/symlinks/validation'
test_data_dir = 'data/symlinks/test'
nb_epoch = 20
batch_size=128
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
samplewise_center=True,
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(
samplewise_center=True,
rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
follow_links=True)
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary',
follow_links=True)
test_generator = test_datagen.flow_from_directory(
test_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='binary',
follow_links=True)
def get_model():
model = Sequential()
model.add(Convolution2D(32, (3, 3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
model = get_model()
## Callback for loss logging per epoch
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_losses = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
history = LossHistory()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'],
verbose=1,
shuffle=True)
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.samples/batch_size,
epochs=nb_epoch,
validation_data=validation_generator,
verbose=1,
validation_steps=validation_generator.samples/batch_size,
callbacks=[history])
test_loss, test_acc = model.evaluate_generator(
test_generator,
steps=test_generator.samples/batch_size)
print("test_loss: %.4f - test_acc: %.4f"%(test_loss, test_acc))
loss = history.losses
val_loss = history.val_losses
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.title('Loss Trend')
plt.plot(loss, 'blue', label='Training Loss')
plt.plot(val_loss, 'green', label='Validation Loss')
plt.xticks(range(0,nb_epoch)[0::2])
plt.legend()
plt.show()
| mit |
yashchandak/GNN | Sample_Run/gcn-multi/utils.py | 1 | 6743 | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
import scipy.io as sio
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
from sklearn.preprocessing import OneHotEncoder
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data(path, percent, fold, label_type):
features = np.load(path + 'features.npy').astype(np.float)
features = sp.csr_matrix(features)
adj = sio.loadmat(path + 'adjmat.mat')['adjmat'].toarray()
adj = nx.adjacency_matrix(nx.from_numpy_matrix(adj))
labels = np.load(path + 'labels.npy')
test_mask = np.load(path + label_type + '/' + str(percent) + '/' + str(fold) + '/test_ids.npy')
train_mask = np.load(path + label_type + '/' + str(percent) + '/' + str(fold) + '/train_ids.npy')
val_mask = np.load(path + label_type +'/' + str(percent) + '/' + str(fold) + '/val_ids.npy')
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def load_data2(dataset_str):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
print ("data/ind.{}.{}".format(dataset_str, names[i]))
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
np.save('test_ids.npy', idx_test)
np.save('train_ids.npy', idx_train)
np.save('val_ids.npy', idx_val)
#np.save('features.npy', features)
#np.save('lables.npy', labels)
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
return sparse_to_tuple(features)
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
#adj_normalized = adj + sp.eye(adj.shape[0])
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
if __name__ == '__main__':
load_data('citeseer')
print('done!') | mit |
blab/antibody-response-pulse | bcell-array/code/VBMG_vaccination_OAS_mutation.py | 1 | 18650 |
# coding: utf-8
# # Antibody Response Pulse
# https://github.com/blab/antibody-response-pulse
#
# ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination
# ### Adaptive immune response for sequential vaccination
# In[1]:
'''
author: Alvason Zhenhua Li
date: 04/09/2015
'''
get_ipython().magic(u'matplotlib inline')
import numpy as np
import matplotlib.pyplot as plt
import os
from matplotlib.ticker import FuncFormatter
import alva_machinery_event_bcell_mutation as alva
AlvaFontSize = 23
AlvaFigSize = (15, 5)
numberingFig = 0
# plotting
dir_path = '/Users/al/Desktop/GitHub/antibody-response-pulse/bcell-array/figure'
file_name = 'Vaccine-Bcell-IgM-IgG'
figure_name = '-equation'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize=(12, 5))
plt.axis('off')
plt.title(r'$ Vaccine-Bcell-IgM-IgG \ equations \ (antibody-response \ for \ sequential-vaccination) $'
, fontsize = AlvaFontSize)
plt.text(0, 7.0/9, r'$ \frac{\partial V_n(t)}{\partial t} = +\xi_{v}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) - \phi_{m} M_{n}(t) V_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 5.0/9, r'$ \frac{\partial B_n(t)}{\partial t} = +\xi_{b}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) + (\beta_{m} + \beta_{g}) V_{n}(t) B_{n}(t) - \mu_{b} B_{n}(t) + m_b V_{n}(t)\frac{B_{n-1}(t) - 2B_n(t) + B_{n+1}(t)}{(\Delta n)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = +\xi_{m} B_{n}(t) - \phi_{m} M_{n}(t) V_{n}(t) - \mu_{m} M_{n}(t) $'
, fontsize = 1.2*AlvaFontSize)
plt.text(0, 1.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = +\xi_{g} B_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) - \mu_{g} G_{n}(t) + m_a \frac{G_{n-1}(t) - 2G_n(t) + G_{n+1}(t)}{(\Delta n)^2} $'
, fontsize = 1.2*AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# define the V-B-M-G partial differential equations
def dVdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dV_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dV_dt_array[:] = +inRateV*V[:]*(1 - V[:]/maxV) - killRateVm*M[:]*V[:] - killRateVg*G[:]*V[:]
return(dV_dt_array)
def dBdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dB_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Bcopy = np.copy(B)
centerX = Bcopy[:]
leftX = np.roll(Bcopy[:], 1)
rightX = np.roll(Bcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dB_dt_array[:] = +inRateB*V[:]*(1 - V[:]/maxV) + (actRateBm + actRateBg + alva.event_recovered + alva.event_OAS_press + alva.event_recoveredV + alva.event_OAS_pressV)*V[:]*B[:] - (outRateB + alva.event_OAS_slowV)*B[:] + mutatRateB*V[:]*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dB_dt_array)
def dMdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dM_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
dM_dt_array[:] = +inRateM*B[:] - consumeRateM*M[:]*V[:] - outRateM*M[:]
return(dM_dt_array)
def dGdt_array(VBMGxt = [], *args):
# naming
V = VBMGxt[0]
B = VBMGxt[1]
M = VBMGxt[2]
G = VBMGxt[3]
x_totalPoint = VBMGxt.shape[1]
# there are n dSdt
dG_dt_array = np.zeros(x_totalPoint)
# each dSdt with the same equation form
Gcopy = np.copy(G)
centerX = Gcopy[:]
leftX = np.roll(Gcopy[:], 1)
rightX = np.roll(Gcopy[:], -1)
leftX[0] = centerX[0]
rightX[-1] = centerX[-1]
dG_dt_array[:] = +(inRateG + alva.event_OAS_boost + alva.event_OAS_boostV)*B[:] - consumeRateG*G[:]*V[:] - outRateG*G[:] + mutatRateA*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2)
return(dG_dt_array)
# In[2]:
# setting parameter
timeUnit = 'day'
if timeUnit == 'hour':
hour = float(1)
day = float(24)
elif timeUnit == 'day':
day = float(1)
hour = float(1)/24
elif timeUnit == 'year':
year = float(1)
day = float(1)/365
hour = float(1)/24/365
maxV = float(16) # max vaccine/micro-liter
inRateV = 0.2/hour # in-rate of virus
killRateVm = 0.001/hour # kill-rate of virus by antibody-IgM
killRateVg = killRateVm # kill-rate of virus by antibody-IgG
inRateB = 0.06/hour # in-rate of B-cell
outRateB = inRateB/8 # out-rate of B-cell
actRateBm = killRateVm # activation rate of naive B-cell
actRateBg = killRateVg # activation rate of memory B-cell for 1st-time-infection
inRateM = 0.16/hour # in-rate of antibody-IgM from naive B-cell
outRateM = inRateM/1 # out-rate of antibody-IgM from naive B-cell
consumeRateM = killRateVm # consume-rate of antibody-IgM by cleaning virus
inRateG = inRateM/6 # in-rate of antibody-IgG from memory B-cell
outRateG = outRateM/60 # out-rate of antibody-IgG from memory B-cell
consumeRateG = killRateVg # consume-rate of antibody-IgG by cleaning virus
mutatRateB = 0.00009/hour # Virus mutation rate
mutatRateA = 0.0001/hour # antibody mutation rate
# time boundary and griding condition
minT = float(0)
maxT = float(4*28*day)
totalPoint_T = int(2*10**3 + 1)
gT = np.linspace(minT, maxT, totalPoint_T)
spacingT = np.linspace(minT, maxT, num = totalPoint_T, retstep = True)
gT = spacingT[0]
dt = spacingT[1]
# space boundary and griding condition
minX = float(0)
maxX = float(3)
totalPoint_X = int(maxX - minX + 1)
gX = np.linspace(minX, maxX, totalPoint_X)
gridingX = np.linspace(minX, maxX, num = totalPoint_X, retstep = True)
gX = gridingX[0]
dx = gridingX[1]
gV_array = np.zeros([totalPoint_X, totalPoint_T])
gB_array = np.zeros([totalPoint_X, totalPoint_T])
gM_array = np.zeros([totalPoint_X, totalPoint_T])
gG_array = np.zeros([totalPoint_X, totalPoint_T])
# initial output condition
#gV_array[1, 0] = float(2)
# [viral population, starting time] ---first
origin_virus = int(1)
current_virus = int(2)
infection_period = 1*28*day
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 0
infection_starting_time = np.arange(int(maxX + 1))*infection_period - 27
event_infect = np.zeros([int(maxX + 1), 2])
event_infect[:, 0] = viral_population
event_infect[:, 1] = infection_starting_time
event_infect[0, 1] = 0
print ('event_infect = {:}'.format(event_infect))
# [viral population, starting time] ---repeated
viral_population = np.zeros(int(maxX + 1))
viral_population[origin_virus:current_virus + 1] = 0
infection_starting_time = np.arange(int(maxX + 1))*0
event_repeated = np.zeros([int(maxX + 1), 2])
event_repeated[:, 0] = viral_population
event_repeated[:, 1] = infection_starting_time
print ('event_repeated = {:}'.format(event_repeated))
# [vaccine population, starting time] ---first
origin_vaccine = int(1)
current_vaccine = int(2)
vaccine_period = 1*28*day
vaccine_population = np.zeros(int(maxX + 1))
vaccine_population[origin_vaccine:current_vaccine + 1] = 8
vaccine_starting_time = np.arange(int(maxX + 1))*vaccine_period - 27
event_vaccine = np.zeros([int(maxX + 1), 2])
event_vaccine[:, 0] = vaccine_population
event_vaccine[:, 1] = vaccine_starting_time
event_vaccine[0, 1] = 0
print ('event_vaccine = {:}'.format(event_vaccine))
#[origin-virus, current-virus, recovered-day, repeated-parameter, OAS+, OSA-]
min_cell = 1.0 # minimum cell
recovered_time = 14*day # recovered time of 1st-time infection
actRateBg_recovered = actRateBg*10 # activation rate of memory B-cell for repeated-infection (same virus)
inRateG_OAS_boost = 5/hour # boosting in-rate of antibody-IgG from memory B-cell for origin-virus
actRateBg_OAS_press = -0.00035/hour # depress act-rate from memory B-cell for non-origin-virus
event_infection_parameter = np.array([origin_virus,
current_virus,
min_cell,
recovered_time,
actRateBg_recovered,
inRateG_OAS_boost,
actRateBg_OAS_press,
0.0])
# vaccination_parameter
# vaccination_parameter
# vaccination_parameter
min_cell_v = 0.2 # minimum cell
recovered_time_v = 14*day # recovered time of 1st-time infection
actRateBg_recovered_v = actRateBg*9 # activation rate of memory B-cell for repeated-infection (same virus)
inRateG_OAS_boost_v = 1.5/hour # boosting in-rate of antibody-IgG from memory B-cell for origin-virus
actRateBg_OAS_press_v = -0.001/hour # depress act-rate from memory B-cell for non-origin-virus
outRateB_OAS_slow_v = -outRateB/1.4
event_vaccination_parameter = np.array([origin_vaccine,
current_vaccine,
min_cell_v,
recovered_time_v,
actRateBg_recovered_v,
inRateG_OAS_boost_v,
actRateBg_OAS_press_v,
outRateB_OAS_slow_v])
event_parameter = np.array([event_infection_parameter, event_vaccination_parameter])
event_table = np.array([event_parameter, event_infect, event_repeated, event_vaccine])
# Runge Kutta numerical solution
pde_array = np.array([dVdt_array, dBdt_array, dMdt_array, dGdt_array])
initial_Out = np.array([gV_array, gB_array, gM_array, gG_array])
gOut_array = alva.AlvaRungeKutta4XT(pde_array, initial_Out, minX, maxX, totalPoint_X, minT, maxT, totalPoint_T, event_table)
# plotting
gV = gOut_array[0]
gB = gOut_array[1]
gM = gOut_array[2]
gG = gOut_array[3]
# Experimental lab data from OAS paper
gT_lab_fresh = np.array([0, 7, 14, 28])*day
gFM1_lab_fresh = np.array([2**(5 + 1.0/3), 2**7, 2**(8 + 1.0/6), 2**(8 - 1.0/2)])
error_FM1_fresh = gFM1_lab_fresh**(4.0/5)
bar_width = 2
# Experimental lab data from OAS paper
gT_lab = np.array([0, 7, 14, 28])*day + infection_period*origin_virus
gPR8_lab = np.array([2**(7 + 1.0/2), 2**9, 2**(9 + 1.0/4), 2**(9 - 1.0/6)])
error_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([2**(6 + 2.0/5), 2**(7 - 1.0/5), 2**(7 + 1.0/3), 2**(8 - 1.0/5)])
error_FM1 = gFM1_lab**(3.0/4)
bar_width = 2.0
# Sequential immunization graph
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'black'
, label = r'$ Origin-vaccine $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'red'
, label = r'$ Subsequence-vaccine $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = error_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-vaccine $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'red', yerr = error_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-vaccine $')
plt.bar(gT_lab_fresh - bar_width/2, gFM1_lab_fresh, bar_width, alpha = 0.2, color = 'gray', yerr = error_FM1_fresh
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-control $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-vaccination)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.xlim([minT, maxT])
plt.ylim([2**5, 2**10])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
#plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.show()
# In[3]:
# step by step
numberingFig = numberingFig + 1
for i in range(totalPoint_X):
figure_name = '-response-%i'%(i)
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
plt.figure(numberingFig, figsize = AlvaFigSize)
plt.plot(gT, gV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gB[i], color = 'purple', label = r'$ B_{%i}(t) $'%(i), linewidth = 5.0, alpha = 0.5
, linestyle = '-.')
plt.plot(gT, gM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5)
plt.plot(gT, gM[i] + gG[i], color = 'black', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed'
, label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i))
plt.grid(True, which = 'both')
plt.title(r'$ Antibody \ from \ Vaccine-{%i} $'%(i), fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xlim([minT, maxT])
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.ylim([2**0, 2**10])
plt.yscale('log', basey = 2)
plt.legend(loc = (1,0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100)
plt.show()
# In[4]:
# Experimental lab data from OAS paper
gT_lab_fresh = np.array([0, 7, 14, 28])*day
gFM1_lab_fresh = np.array([2**(5 + 1.0/3), 2**7, 2**(8 + 1.0/6), 2**(8 - 1.0/2)])
error_FM1_fresh = gFM1_lab_fresh**(4.0/5)
bar_width = 2
# Experimental lab data from OAS paper
gT_lab = np.array([0, 7, 14, 28])*day + infection_period*origin_virus
gPR8_lab = np.array([2**(7 + 1.0/2), 2**9, 2**(9 + 1.0/4), 2**(9 - 1.0/6)])
error_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([2**(6 + 2.0/5), 2**(7 - 1.0/5), 2**(7 + 1.0/3), 2**(8 - 1.0/5)])
error_FM1 = gFM1_lab**(3.0/4)
bar_width = 2.0
# Sequential immunization graph
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'black'
, label = r'$ Origin-vaccine $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'red'
, label = r'$ Subsequence-vaccine $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = error_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-vaccine $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'red', yerr = error_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-vaccine $')
plt.bar(gT_lab_fresh - bar_width/2, gFM1_lab_fresh, bar_width, alpha = 0.2, color = 'gray', yerr = error_FM1_fresh
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-control $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-vaccination)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.6)
plt.yticks(fontsize = AlvaFontSize*0.6)
plt.xlim([minT, maxT])
plt.ylim([2**5, 2**10])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
#plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.show()
# In[5]:
# Experimental lab data from OAS paper
gT_lab_fresh = np.array([0, 7, 14, 28])*day
gFM1_lab_fresh = np.array([2**(5 + 1.0/3), 2**7, 2**(8 + 1.0/6), 2**(8 - 1.0/2)])
error_FM1_fresh = gFM1_lab_fresh**(4.0/5)
bar_width = 1
# Experimental lab data from OAS paper
gT_lab = np.array([0, 7, 14, 28])*day + 28*day
gPR8_lab = np.array([2**(7 + 1.0/2), 2**9, 2**(9 + 1.0/4), 2**(9 - 1.0/6)])
error_PR8 = gPR8_lab**(3.0/4)
gFM1_lab = np.array([2**(6 + 2.0/5), 2**(7 - 1.0/5), 2**(7 + 1.0/3), 2**(8 - 1.0/5)])
error_FM1 = gFM1_lab**(3.0/4)
bar_width = 1.0
# Sequential immunization graph
figure_name = '-Original-Antigenic-Sin-vaccination'
file_suffix = '.png'
save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix)
numberingFig = numberingFig + 1
plt.figure(numberingFig, figsize = (12, 6))
plt.subplot(111)
plt.plot(gT, (gM[origin_virus] + gG[origin_virus]), linewidth = 5.0, alpha = 0.5, color = 'black'
, label = r'$ Origin-vaccine $')
plt.plot(gT, (gM[origin_virus + 1] + gG[origin_virus + 1]), linewidth = 5.0, alpha = 0.5, color = 'red'
, label = r'$ Subsequence-vaccine $')
plt.bar(gT_lab - bar_width/2, gPR8_lab, bar_width, alpha = 0.6, color = 'gray', yerr = error_PR8
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ PR8-vaccine $')
plt.bar(gT_lab + bar_width/2, gFM1_lab, bar_width, alpha = 0.6, color = 'red', yerr = error_FM1
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-vaccine $')
plt.bar(gT_lab_fresh - bar_width/2, gFM1_lab_fresh, bar_width, alpha = 0.2, color = 'gray', yerr = error_FM1_fresh
, error_kw = dict(elinewidth = 1, ecolor = 'black'), label = r'$ FM1-control $')
plt.grid(True, which = 'both')
plt.title(r'$ Original \ Antigenic \ Sin \ (sequential-vaccination)$', fontsize = AlvaFontSize)
plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize)
plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize)
plt.xticks(fontsize = AlvaFontSize*0.7)
plt.yticks(fontsize = AlvaFontSize*0.7)
plt.xlim([minT, 2*30*day])
plt.ylim([2**5, 2**10])
plt.yscale('log', basey = 2)
# gca()---GetCurrentAxis and Format the ticklabel to be 2**x
plt.gca().yaxis.set_major_formatter(FuncFormatter(lambda x, pos: int(2**(np.log(x)/np.log(2)))))
plt.gca().xaxis.set_major_locator(plt.MultipleLocator(7))
plt.legend(loc = (1, 0), fontsize = AlvaFontSize)
plt.savefig(save_figure, dpi = 100, bbox_inches='tight')
plt.show()
# In[ ]:
| gpl-2.0 |
msmbuilder/osprey | osprey/fit_estimator.py | 2 | 6253 | from __future__ import print_function, absolute_import, division
import time
from distutils.version import LooseVersion
import numpy as np
import sklearn
from sklearn.base import is_classifier, clone
from sklearn.metrics.scorer import check_scoring
from sklearn.externals.joblib import Parallel, delayed
from sklearn.model_selection import check_cv
from sklearn.model_selection._validation import _safe_split, _score
from .utils import check_arrays, num_samples
from .utils import short_format_time, is_msmbuilder_estimator
if LooseVersion(sklearn.__version__) < LooseVersion('0.16.1'):
raise ImportError('Please upgrade to the latest version of scikit-learn')
def fit_and_score_estimator(estimator, parameters, cv, X, y=None, scoring=None,
iid=True, n_jobs=1, verbose=1,
pre_dispatch='2*n_jobs'):
"""Fit and score an estimator with cross-validation
This function is basically a copy of sklearn's
model_selection._BaseSearchCV._fit(), which is the core of the GridSearchCV
fit() method. Unfortunately, that class does _not_ return the training
set scores, which we want to save in the database, and because of the
way it's written, you can't change it by subclassing or monkeypatching.
This function uses some undocumented internal sklearn APIs (non-public).
It was written against sklearn version 0.16.1. Prior Versions are likely
to fail due to changes in the design of cross_validation module.
Returns
-------
out : dict, with keys 'mean_test_score' 'test_scores', 'train_scores'
The scores on the training and test sets, as well as the mean test set
score.
"""
scorer = check_scoring(estimator, scoring=scoring)
n_samples = num_samples(X)
X, y = check_arrays(X, y, allow_lists=True, sparse_format='csr',
allow_nans=True)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv=cv, y=y, classifier=is_classifier(estimator))
out = Parallel(
n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, parameters,
fit_params=None)
for train, test in cv.split(X, y))
assert len(out) == cv.n_splits
train_scores, test_scores = [], []
n_train_samples, n_test_samples = [], []
for test_score, n_test, train_score, n_train, _ in out:
train_scores.append(train_score)
test_scores.append(test_score)
n_test_samples.append(n_test)
n_train_samples.append(n_train)
train_scores, test_scores = map(list, check_arrays(train_scores,
test_scores,
warn_nans=True,
replace_nans=True))
if iid:
if verbose > 0 and is_msmbuilder_estimator(estimator):
print('[CV] Using MSMBuilder API n_samples averaging')
print('[CV] n_train_samples: %s' % str(n_train_samples))
print('[CV] n_test_samples: %s' % str(n_test_samples))
mean_test_score = np.average(test_scores, weights=n_test_samples)
mean_train_score = np.average(train_scores, weights=n_train_samples)
else:
mean_test_score = np.average(test_scores)
mean_train_score = np.average(train_scores)
grid_scores = {
'mean_test_score': mean_test_score, 'test_scores': test_scores,
'mean_train_score': mean_train_score, 'train_scores': train_scores,
'n_test_samples': n_test_samples, 'n_train_samples': n_train_samples}
return grid_scores
def _fit_and_score(estimator, X, y, scorer, train, test, verbose, parameters,
fit_params=None):
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
if num_samples(train) == 0 or num_samples(test) == 0:
raise RuntimeError(
'Cross validation error in fit_estimator. The total data set '
'contains %d elements, which were split into a training set '
'of %d elements and a test set of %d elements. Unfortunately, '
'you can\'t have a %s set with 0 elements.' % (
num_samples(X), num_samples(train), num_samples(test),
'training' if num_samples(train) == 0 else 'test'))
# adjust length of sample weights
n_samples = num_samples(X)
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, np.asarray(v)[train]
if hasattr(v, '__len__') and len(v) == n_samples else v)
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
# fit and score
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
test_score = _score(estimator, X_test, y_test, scorer)
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
msmbuilder_api = is_msmbuilder_estimator(estimator)
n_samples_test = num_samples(X_test, is_nested=msmbuilder_api)
n_samples_train = num_samples(X_train, is_nested=msmbuilder_api)
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
return (test_score, n_samples_test, train_score, n_samples_train,
scoring_time)
| apache-2.0 |
science-of-imagination/nengo-buffer | Project/mental_translation_training.py | 1 | 8721 | #import matplotlib.pyplot as plt
#%matplotlib inline
import nengo
import numpy as np
import scipy.ndimage
#import matplotlib.animation as animation
#from matplotlib import pylab
from PIL import Image
import nengo.spa as spa
import cPickle
import random
from nengo_extras.data import load_mnist
from nengo_extras.vision import Gabor, Mask
#Encode categorical integer features using a one-hot aka one-of-K scheme.
def one_hot(labels, c=None):
assert labels.ndim == 1
n = labels.shape[0]
c = len(np.unique(labels)) if c is None else c
y = np.zeros((n, c))
y[np.arange(n), labels] = 1
return y
# --- load the data
img_rows, img_cols = 28, 28
(X_train, y_train), (X_test, y_test) = load_mnist()
X_train = 2 * X_train - 1 # normalize to -1 to 1
X_test = 2 * X_test - 1 # normalize to -1 to 1
train_targets = one_hot(y_train, 10)
test_targets = one_hot(y_test, 10)
rng = np.random.RandomState(9)
# --- set up network parameters
#Want to encode and decode the image
n_vis = X_train.shape[1]
n_out = X_train.shape[1]
#number of neurons/dimensions of semantic pointer
n_hid = 5000 #Try with more neurons for more accuracy
#Want the encoding/decoding done on the training images
ens_params = dict(
eval_points=X_train,
neuron_type=nengo.LIF(), #Why not use LIF? originally used LIFRate()
intercepts=nengo.dists.Choice([-0.5]),
max_rates=nengo.dists.Choice([100]),
)
#Least-squares solver with L2 regularization.
solver = nengo.solvers.LstsqL2(reg=0.01)
#solver = nengo.solvers.LstsqL2(reg=0.0001)
solver2 = nengo.solvers.LstsqL2(reg=0.01)
#network that generates the weight matrices between neuron activity and images and the labels
with nengo.Network(seed=3) as model:
a = nengo.Ensemble(n_hid, n_vis, seed=3, **ens_params)
v = nengo.Node(size_in=n_out)
conn = nengo.Connection(
a, v, synapse=None,
eval_points=X_train, function=X_train,#want the same thing out (identity)
solver=solver)
v2 = nengo.Node(size_in=train_targets.shape[1])
conn2 = nengo.Connection(
a, v2, synapse=None,
eval_points=X_train, function=train_targets, #Want to get the labels out
solver=solver2)
# linear filter used for edge detection as encoders, more plausible for human visual system
encoders = Gabor().generate(n_hid, (11, 11), rng=rng)
encoders = Mask((28, 28)).populate(encoders, rng=rng, flatten=True)
#Set the ensembles encoders to this
a.encoders = encoders
#Get the one hot labels for the images
def get_outs(sim, images):
#The activity of the neurons when an image is given as input
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
#The activity multiplied by the weight matrix (calculated in the network) to give the one-hot labels
return np.dot(acts, sim.data[conn2].weights.T)
#Get the neuron activity of an image or group of images (this is the semantic pointer in this case)
def get_activities(sim, images):
_, acts = nengo.utils.ensemble.tuning_curves(a, sim, inputs=images)
return acts
#Get the representation of the image after it has gone through the encoders (Gabor filters) but before it is in the neurons
#This must be computed to create the weight matrix for rotation from neuron activity to this step
# This allows a recurrent connection to be made from the neurons to themselves later
def get_encoder_outputs(sim,images):
#Pass the images through the encoders
outs = np.dot(images,sim.data[a].encoders.T) #before the neurons
return outs
dim =28
#Shift an image
def translate(img,x,y):
newImg = scipy.ndimage.interpolation.shift(np.reshape(img, (dim,dim), 'F'),(x,y), cval=-1)
return newImg.T.ravel()
#Images to train, starting at random translation
orig_imgs = X_train[:100000].copy()
for img in orig_imgs:
img[:] = translate(img,random.randint(-6,6),random.randint(-6,6))
#Images translated up a fixed amount from the original random translation
translate_up_imgs = orig_imgs.copy()
for img in translate_up_imgs:
img[:] = translate(img,0,-1)
#Images translated down a fixed amount from the original random translation
translate_down_imgs = orig_imgs.copy()
for img in translate_down_imgs:
img[:] = translate(img,0,1)
'''
#Images translated right a fixed amount from the original random translation
translate_right_imgs = orig_imgs.copy()
for img in translate_right_imgs:
img[:] = translate(img,1,0)
#Images translated left a fixed amount from the original random translation
translate_left_imgs = orig_imgs.copy()
for img in translate_left_imgs:
img[:] = translate(img,-1,0)
'''
#Images not used for training, but for testing (all at random translations)
test_imgs = X_test[:1000].copy()
for img in test_imgs:
img[:] = translate(img,random.randint(-4,4),random.randint(-4,4))
with nengo.Simulator(model) as sim:
#Neuron activities of different mnist images
#The semantic pointers
orig_acts = get_activities(sim,orig_imgs)
translate_up_acts = get_activities(sim,translate_up_imgs)
translate_down_acts = get_activities(sim,translate_down_imgs)
'''
translate_left_acts = get_activities(sim,translate_left_imgs)
translate_right_acts = get_activities(sim,translate_right_imgs)
'''
test_acts = get_activities(sim,test_imgs)
X_test_acts = get_activities(sim,X_test)
labels_out = get_outs(sim,X_test)
translate_up_after_encoders = get_encoder_outputs(sim,translate_up_imgs)
translate_down_after_encoders = get_encoder_outputs(sim,translate_down_imgs)
'''
translate_left_after_encoders = get_encoder_outputs(sim,translate_left_imgs)
translate_right_after_encoders = get_encoder_outputs(sim,translate_right_imgs)
'''
#solvers for a learning rule
solver_translate_up = nengo.solvers.LstsqL2(reg=1e-8)
solver_translate_down = nengo.solvers.LstsqL2(reg=1e-8)
'''
solver_translate_left = nengo.solvers.LstsqL2(reg=1e-8)
solver_translate_right = nengo.solvers.LstsqL2(reg=1e-8)
'''
solver_word = nengo.solvers.LstsqL2(reg=1e-8)
solver_translate_up_encoder = nengo.solvers.LstsqL2(reg=1e-8)
solver_translate_down_encoder = nengo.solvers.LstsqL2(reg=1e-8)
'''
solver_translate_left_encoder = nengo.solvers.LstsqL2(reg=1e-8)
solver_translate_right_encoder = nengo.solvers.LstsqL2(reg=1e-8)
'''
#find weight matrix between neuron activity of the original image and the translated image
#weights returns a tuple including information about learning process, just want the weight matrix
translate_up_weights,_ = solver_translate_up(orig_acts, translate_up_acts)
translate_down_weights,_ = solver_translate_down(orig_acts, translate_down_acts)
'''
translate_left_weights,_ = solver_translate_left(orig_acts, translate_left_acts)
translate_right_weights,_ = solver_translate_right(orig_acts, translate_right_acts)
'''
#find weight matrix between labels and neuron activity
label_weights,_ = solver_word(labels_out,X_test_acts)
translate_up_after_encoder_weights,_ = solver_translate_up_encoder(orig_acts,translate_up_after_encoders)
translate_down_after_encoder_weights,_ = solver_translate_down_encoder(orig_acts,translate_down_after_encoders)
'''
translate_left_after_encoder_weights,_ = solver_translate_left_encoder(orig_acts,translate_left_after_encoders)
translate_right_after_encoder_weights,_ = solver_translate_right_encoder(orig_acts,translate_right_after_encoders)
'''
#Saving
filename = "activity_to_img_weights_translate" + str(n_hid) +".p"
cPickle.dump(sim.data[conn].weights.T, open( filename, "wb" ) )
filename = "translate_up_weights" + str(n_hid) +".p"
cPickle.dump(translate_up_weights, open( filename, "wb" ) )
filename = "translate_down_weights" + str(n_hid) +".p"
cPickle.dump(translate_down_weights, open( filename, "wb" ) )
'''
filename = "translate_left_weights" + str(n_hid) +".p"
cPickle.dump(translate_left_weights, open( filename, "wb" ) )
filename = "translate_right_weights" + str(n_hid) +".p"
cPickle.dump(translate_right_weights, open( filename, "wb" ) )
'''
filename = "translate_up_after_encoder_weights" + str(n_hid) +".p"
cPickle.dump(translate_up_after_encoder_weights, open( filename, "wb" ) )
filename = "translate_down_after_encoder_weights" + str(n_hid) +".p"
cPickle.dump(translate_down_after_encoder_weights, open( filename, "wb" ) )
'''
filename = "translate_left_after_encoder_weights" + str(n_hid) +".p"
cPickle.dump(translate_left_after_encoder_weights, open( filename, "wb" ) )
filename = "translate_right_after_encoder_weights" + str(n_hid) +".p"
cPickle.dump(translate_right_after_encoder_weights, open( filename, "wb" ) )
''' | gpl-3.0 |
festeh/BuildingMachineLearningSystemsWithPython | ch01/analyze_webstats.py | 23 | 5113 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
from utils import DATA_DIR, CHART_DIR
import scipy as sp
import matplotlib.pyplot as plt
sp.random.seed(3) # to reproduce the data later on
data = sp.genfromtxt(os.path.join(DATA_DIR, "web_traffic.tsv"), delimiter="\t")
print(data[:10])
print(data.shape)
# all examples will have three classes in this file
colors = ['g', 'k', 'b', 'm', 'r']
linestyles = ['-', '-.', '--', ':', '-']
x = data[:, 0]
y = data[:, 1]
print("Number of invalid entries:", sp.sum(sp.isnan(y)))
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
# plot input data
def plot_models(x, y, models, fname, mx=None, ymax=None, xmin=None):
plt.figure(num=None, figsize=(8, 6))
plt.clf()
plt.scatter(x, y, s=10)
plt.title("Web traffic over the last month")
plt.xlabel("Time")
plt.ylabel("Hits/hour")
plt.xticks(
[w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])
if models:
if mx is None:
mx = sp.linspace(0, x[-1], 1000)
for model, style, color in zip(models, linestyles, colors):
# print "Model:",model
# print "Coeffs:",model.coeffs
plt.plot(mx, model(mx), linestyle=style, linewidth=2, c=color)
plt.legend(["d=%i" % m.order for m in models], loc="upper left")
plt.autoscale(tight=True)
plt.ylim(ymin=0)
if ymax:
plt.ylim(ymax=ymax)
if xmin:
plt.xlim(xmin=xmin)
plt.grid(True, linestyle='-', color='0.75')
plt.savefig(fname)
# first look at the data
plot_models(x, y, None, os.path.join(CHART_DIR, "1400_01_01.png"))
# create and plot models
fp1, res1, rank1, sv1, rcond1 = sp.polyfit(x, y, 1, full=True)
print("Model parameters of fp1: %s" % fp1)
print("Error of the model of fp1:", res1)
f1 = sp.poly1d(fp1)
fp2, res2, rank2, sv2, rcond2 = sp.polyfit(x, y, 2, full=True)
print("Model parameters of fp2: %s" % fp2)
print("Error of the model of fp2:", res2)
f2 = sp.poly1d(fp2)
f3 = sp.poly1d(sp.polyfit(x, y, 3))
f10 = sp.poly1d(sp.polyfit(x, y, 10))
f100 = sp.poly1d(sp.polyfit(x, y, 100))
plot_models(x, y, [f1], os.path.join(CHART_DIR, "1400_01_02.png"))
plot_models(x, y, [f1, f2], os.path.join(CHART_DIR, "1400_01_03.png"))
plot_models(
x, y, [f1, f2, f3, f10, f100], os.path.join(CHART_DIR, "1400_01_04.png"))
# fit and plot a model using the knowledge about inflection point
inflection = 3.5 * 7 * 24
xa = x[:inflection]
ya = y[:inflection]
xb = x[inflection:]
yb = y[inflection:]
fa = sp.poly1d(sp.polyfit(xa, ya, 1))
fb = sp.poly1d(sp.polyfit(xb, yb, 1))
plot_models(x, y, [fa, fb], os.path.join(CHART_DIR, "1400_01_05.png"))
def error(f, x, y):
return sp.sum((f(x) - y) ** 2)
print("Errors for the complete data set:")
for f in [f1, f2, f3, f10, f100]:
print("Error d=%i: %f" % (f.order, error(f, x, y)))
print("Errors for only the time after inflection point")
for f in [f1, f2, f3, f10, f100]:
print("Error d=%i: %f" % (f.order, error(f, xb, yb)))
print("Error inflection=%f" % (error(fa, xa, ya) + error(fb, xb, yb)))
# extrapolating into the future
plot_models(
x, y, [f1, f2, f3, f10, f100],
os.path.join(CHART_DIR, "1400_01_06.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
print("Trained only on data after inflection point")
fb1 = fb
fb2 = sp.poly1d(sp.polyfit(xb, yb, 2))
fb3 = sp.poly1d(sp.polyfit(xb, yb, 3))
fb10 = sp.poly1d(sp.polyfit(xb, yb, 10))
fb100 = sp.poly1d(sp.polyfit(xb, yb, 100))
print("Errors for only the time after inflection point")
for f in [fb1, fb2, fb3, fb10, fb100]:
print("Error d=%i: %f" % (f.order, error(f, xb, yb)))
plot_models(
x, y, [fb1, fb2, fb3, fb10, fb100],
os.path.join(CHART_DIR, "1400_01_07.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
# separating training from testing data
frac = 0.3
split_idx = int(frac * len(xb))
shuffled = sp.random.permutation(list(range(len(xb))))
test = sorted(shuffled[:split_idx])
train = sorted(shuffled[split_idx:])
fbt1 = sp.poly1d(sp.polyfit(xb[train], yb[train], 1))
fbt2 = sp.poly1d(sp.polyfit(xb[train], yb[train], 2))
print("fbt2(x)= \n%s"%fbt2)
print("fbt2(x)-100,000= \n%s"%(fbt2-100000))
fbt3 = sp.poly1d(sp.polyfit(xb[train], yb[train], 3))
fbt10 = sp.poly1d(sp.polyfit(xb[train], yb[train], 10))
fbt100 = sp.poly1d(sp.polyfit(xb[train], yb[train], 100))
print("Test errors for only the time after inflection point")
for f in [fbt1, fbt2, fbt3, fbt10, fbt100]:
print("Error d=%i: %f" % (f.order, error(f, xb[test], yb[test])))
plot_models(
x, y, [fbt1, fbt2, fbt3, fbt10, fbt100],
os.path.join(CHART_DIR, "1400_01_08.png"),
mx=sp.linspace(0 * 7 * 24, 6 * 7 * 24, 100),
ymax=10000, xmin=0 * 7 * 24)
from scipy.optimize import fsolve
print(fbt2)
print(fbt2 - 100000)
reached_max = fsolve(fbt2 - 100000, x0=800) / (7 * 24)
print("100,000 hits/hour expected at week %f" % reached_max[0])
| mit |
ltiao/scikit-learn | examples/linear_model/plot_omp.py | 385 | 2263 | """
===========================
Orthogonal Matching Pursuit
===========================
Using orthogonal matching pursuit for recovering a sparse signal from a noisy
measurement encoded with a dictionary
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import OrthogonalMatchingPursuit
from sklearn.linear_model import OrthogonalMatchingPursuitCV
from sklearn.datasets import make_sparse_coded_signal
n_components, n_features = 512, 100
n_nonzero_coefs = 17
# generate the data
###################
# y = Xw
# |x|_0 = n_nonzero_coefs
y, X, w = make_sparse_coded_signal(n_samples=1,
n_components=n_components,
n_features=n_features,
n_nonzero_coefs=n_nonzero_coefs,
random_state=0)
idx, = w.nonzero()
# distort the clean signal
##########################
y_noisy = y + 0.05 * np.random.randn(len(y))
# plot the sparse signal
########################
plt.figure(figsize=(7, 7))
plt.subplot(4, 1, 1)
plt.xlim(0, 512)
plt.title("Sparse signal")
plt.stem(idx, w[idx])
# plot the noise-free reconstruction
####################################
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 2)
plt.xlim(0, 512)
plt.title("Recovered signal from noise-free measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction
###############################
omp.fit(X, y_noisy)
coef = omp.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 3)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements")
plt.stem(idx_r, coef[idx_r])
# plot the noisy reconstruction with number of non-zeros set by CV
##################################################################
omp_cv = OrthogonalMatchingPursuitCV()
omp_cv.fit(X, y_noisy)
coef = omp_cv.coef_
idx_r, = coef.nonzero()
plt.subplot(4, 1, 4)
plt.xlim(0, 512)
plt.title("Recovered signal from noisy measurements with CV")
plt.stem(idx_r, coef[idx_r])
plt.subplots_adjust(0.06, 0.04, 0.94, 0.90, 0.20, 0.38)
plt.suptitle('Sparse signal recovery with Orthogonal Matching Pursuit',
fontsize=16)
plt.show()
| bsd-3-clause |
edarin/population_simulator | src/main.py | 1 | 3114 | # coding: utf-8
import numpy as np
import pandas as pd
from make_demo import (generate_SexeAge, generate_Handicap)
from make_travail import *
from make_couple import (generate_Couple,
generate_pop_men)
from make_children import (generate_Children,
add_Children
)
sample_size_target = 1000
''''
TABLE INDIVIDUS
'''
# AgeSexe
effectifs_age_sexe = pd.read_csv("data/demographie/pop_age_sexe_2016.csv")
generation = generate_SexeAge(effectifs_age_sexe, sample_size_target)
effectifs_age_sexe = generation[1]
population = generation[0]
sample_size = len(population)
max_age = effectifs_age_sexe['age'].max()
### Activité
reference_activite = pd.read_csv("data/travail/activite_2015.csv")
population['activite']= generate_Activite(reference_activite, effectifs_age_sexe, population, sample_size)
#Emploi : à partir du taux de chomaĝe
reference_emploi = pd.read_csv("data/travail/chomage.csv")
population['emploi'] = generate_Emploi(reference_emploi, population, max_age)
# Salaire
reference_salaire = pd.read_csv("data/travail/salaire_brut_horaire.csv")
#population['salaire'] = add_Salaire_fromINSEE(reference_salaire,population, max_age)
salaire = open_json('data/travail/salaire_sexe_age.json')
population['salaire'] = add_SalairefromERFS(population, salaire)
# Retraite
reference_retraite = pd.read_csv("data/travail/retraite_2012.csv")
population['retraite'] = add_Retraite(reference_retraite, population, max_age)
#Étudiants
reference_etudes = pd.read_csv("data/demographie/etudes.csv")
population['etudes'] = generate_Etudiants(reference_etudes, population)
# Selon une étude INSEE (2016) -> 23%/
# http://www.insee.fr/fr/themes/document.asp?reg_id=0&ref_id=ip1603#inter6
#### Handicap
reference_handicap = pd.read_csv("data/demographie/handicap_pop.csv")
reference_handicap_jeune = pd.read_csv("data/demographie/handicap_pop_jeune.csv")
population['handicap'] = generate_Handicap(reference_handicap, reference_handicap_jeune, population, effectifs_age_sexe, sample_size)
#### Statut marital
reference_marital = dict()
for sexe in ['homme', 'femme']:
reference_marital[sexe] = pd.read_csv("data/menages/statut_marital_{0}.csv".format(sexe))
population['statut_marital'] = generate_Couple(reference_marital.copy(), population)
''''
TABLE MÉNAGE
'''''
population_menage = generate_pop_men(population[population['age'] >= 15])
population_menage.reset_index(drop= True, inplace= True)
##### Le fait d'avoir un.des enfant.s
reference_typefam = pd.read_csv('data/menages/enfants/type_famille.csv')
population_menage[['type_fam', 'enfant']] = generate_Children(reference_typefam, population_menage)
reference_enfant = pd.read_csv('data/menages/enfants/nbr_enfant.csv')
population_menage['nb_enf'] = add_Children(reference_enfant, population_menage)
print('Nombre de ménages final :', len(population_menage) )
population_menage.to_csv('population_simulated_men{0}.csv'.format(len(population_menage)))
population.to_csv('population_simulated_ind{0}.csv'.format(len(population)))
| agpl-3.0 |
marmarko/ml101 | tensorflow/examples/skflow/mnist_weights.py | 9 | 4163 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This demonstrates one way to access the weights of a custom skflow model.
It is otherwise identical to the standard MNIST convolutional code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
### Download and load MNIST data.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images, mnist.train.labels, batch_size=100,
steps=1000)
score = metrics.accuracy_score(
mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def conv_model(X, y):
# pylint: disable=invalid-name,missing-docstring
# reshape X to 4d tensor with 2nd and 3rd dimensions being image width and
# height final dimension being the number of color channels
X = tf.reshape(X, [-1, 28, 28, 1])
# first conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = learn.ops.conv2d(X, n_filters=32, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# second conv layer will compute 64 features for each 5x5 patch
with tf.variable_scope('conv_layer2'):
h_conv2 = learn.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5],
bias=True, activation=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# densely connected layer with 1024 neurons
h_fc1 = learn.ops.dnn(
h_pool2_flat, [1024], activation=tf.nn.relu, dropout=0.5)
return learn.models.logistic_regression(h_fc1, y)
# Training and predicting
classifier = learn.TensorFlowEstimator(
model_fn=conv_model, n_classes=10, batch_size=100, steps=20000,
learning_rate=0.001)
classifier.fit(mnist.train.images, mnist.train.labels)
score = metrics.accuracy_score(
mnist.test.labels, classifier.predict(mnist.test.images))
print('Accuracy: {0:f}'.format(score))
# Examining fitted weights
## General usage is classifier.get_tensor_value('foo')
## 'foo' must be the variable scope of the desired tensor followed by the
## graph path.
## To understand the mechanism and figure out the right scope and path, you can
## do logging. Then use TensorBoard or a text editor on the log file to look at
## available strings.
## First Convolutional Layer
print('1st Convolutional Layer weights and Bias')
print(classifier.get_tensor_value('conv_layer1/convolution/filters:0'))
print(classifier.get_tensor_value('conv_layer1/convolution/bias:0'))
## Second Convolutional Layer
print('2nd Convolutional Layer weights and Bias')
print(classifier.get_tensor_value('conv_layer2/convolution/filters:0'))
print(classifier.get_tensor_value('conv_layer2/convolution/bias:0'))
## Densely Connected Layer
print('Densely Connected Layer weights')
print(classifier.get_tensor_value('dnn/layer0/Linear/Matrix:0'))
## Logistic Regression weights
print('Logistic Regression weights')
print(classifier.get_tensor_value('logistic_regression/weights:0'))
| bsd-2-clause |
maxentile/msm-learn | projects/metric-learning/Autograd tinker.py | 1 | 18896 |
# coding: utf-8
# In[2]:
from autograd import grad
import autograd.numpy as np
# In[2]:
from numpy.linalg import det
def BC(X,Y):
return det(np.dot(X.T,Y)) / np.sqrt(det(np.dot(X.T,X)) * det(np.dot(Y.T,Y)))
# In[23]:
np.random.seed(0)
X = np.random.rand(1000)*10
Y = 2*X + np.random.randn(1000)+5
# In[ ]:
# In[3]:
import matplotlib.pyplot as plt
# In[4]:
get_ipython().magic(u'matplotlib inline')
# In[26]:
plt.scatter(X,Y)
# In[104]:
def loss(theta):
return -np.sum((Y-(theta[0]*X+theta[1]))**2)
# In[105]:
loss((2,5))
# In[106]:
gradient = grad(loss)
# In[107]:
gradient(np.zeros(2))
# In[111]:
n=1000
x = np.zeros((n,2))
for i in range(1,n):
x[i] = x[i-1] + gradient(x[i-1])*0.000001
# In[112]:
x[-1]
# In[113]:
plt.plot(x[:,0],x[:,1])
plt.scatter(x[:,0],x[:,1])
# In[446]:
def autocorrelation(X,k=1):
mu = X.mean(0)
denom=(len(X)-k)*np.std(X,0)**2
s = np.sum((X[:-k]-mu)*(X[k:]-mu),0)
return np.sum(s/denom)
#return np.sum(s/denom)
# In[531]:
def time_lag_corr_cov(X,tau=1):
#mu = (X[:-tau].mean(0) + X[tau:].mean(0)) / 2
mu = X.mean(0)
X_ = X-mu
M = len(X) - tau
dim = len(X.T)
corr = np.zeros((dim,dim))
cov = np.zeros((dim,dim))
for i in range(M):
corr += np.outer(X_[i],X_[i+tau]) + np.outer(X_[i+tau],X_[i])
cov += np.outer(X_[i],X_[i]) + np.outer(X_[i+tau],X_[i+tau])
return corr / (2.0*M),cov / (2.0*M)
# In[536]:
def autocorr(X,tau=1):
mu = X.mean(0)
X_ = X-mu
M = len(X) - tau
dim = len(X.T)
corr = np.zeros((dim,dim))
for i in range(M):
corr += np.outer(X_[i],X_[i+tau]) + np.outer(X_[i+tau],X_[i])
return corr / (2.0*M)
c = autocorr(X_dihedral[:10000])
plt.imshow(c,interpolation='none')
# In[549]:
plt.hist(c.reshape(np.prod(c.shape)),bins=50);
# In[553]:
for i in range(10):
print(np.sum(np.abs(autocorr(np.random.randn(1000,84)))))
# In[552]:
np.sum(np.abs(autocorr(X_dihedral[:10000])))
# In[532]:
time_lag_corr_cov(X_dihedral)
# In[447]:
np.std(X_dihedral,0).shape
# In[448]:
X_dihedral.mean(0).shape
# In[5]:
from msmbuilder.example_datasets import AlanineDipeptide,FsPeptide
dataset = FsPeptide().get()
fs_trajectories = dataset.trajectories
from msmbuilder import featurizer
dhf = featurizer.DihedralFeaturizer()
dhft = dhf.fit_transform(fs_trajectories)
X_dihedral = np.vstack(dhft)#[0]
# In[508]:
X_dihedral.mean(0).shape
# In[509]:
X_dihedral.shape
# In[510]:
autocorrelation(X_dihedral)
# In[511]:
from sklearn.decomposition import PCA
pca = PCA(2)
autocorrelation(pca.fit_transform(X_dihedral))
# In[513]:
X_ = pca.fit_transform(X_dihedral)
plt.scatter(X_[:,0],X_[:,1],linewidths=0,s=1,
c=np.arange(len(X_)),alpha=0.5)
# In[514]:
A_init = pca.components_.T
A_init.shape
# In[515]:
np.dot(X_dihedral,A_init)
# In[8]:
from msmbuilder.decomposition import tICA
tica = tICA(2,10)
X_tica = tica.fit_transform([X_dihedral])[0]
#autocorrelation(X_tica)
# In[519]:
plt.scatter(X_tica[:,0],X_tica[:,1],linewidths=0,s=4,
c=np.arange(len(X_)),alpha=0.5)
# In[520]:
A_init_tica=tica.components_.T
# In[521]:
def autocorr_loss(A_vec):
A = np.reshape(A_vec,A_init.shape)
X_ = np.dot(X_dihedral,A)
X_ /= (np.max(X_) - np.min(X_))
return autocorrelation(X_)
# In[522]:
autocorr_loss(A_init_tica.reshape(84*2))
# In[523]:
autocorr_grad = grad(autocorr_loss)
# In[524]:
plt.hist(autocorr_grad(A_init_tica.reshape(84*2)));
# In[525]:
plt.hist(A_init_tica.reshape(84*2),bins=50);
# In[411]:
get_ipython().magic(u'timeit autocorr_loss(A_init_tica.reshape(84*2))')
# In[412]:
get_ipython().magic(u'timeit autocorr_grad(A_init_tica.reshape(84*2))')
# In[528]:
n=100
x = np.zeros((n,84*2))
x[0] = A_init_tica.reshape(84*2)
from time import time
t = time()
for i in range(1,n):
x[i] = x[i-1] + autocorr_grad(x[i-1])*10
print(i,time()-t)
# In[560]:
plt.plot(x);
# In[530]:
X_ = np.dot(X_dihedral,x[-1].reshape(84,2))
plt.scatter(X_[:,0],X_[:,1],linewidths=0,s=4,
c=np.arange(len(X_)),alpha=0.5)
# In[561]:
X_dihedral.shape
# In[ ]:
# In[468]:
for i in range(len(x))[::50]:
X_ = np.dot(X_dihedral,x[i].reshape(84,2))
plt.scatter(X_[:,0],X_[:,1],linewidths=0,s=4,
c=np.arange(len(X_)),alpha=0.5)
plt.savefig('{0}.jpg'.format(i))
plt.close()
# In[469]:
autocorr_loss(x[-1]),autocorr_loss(A_init_tica),autocorr_loss(A_init)
# In[478]:
def autocorr_loss_mult(A_vec):
A = np.reshape(A_vec,A_init.shape)
X_ = np.dot(X_dihedral,A)
return autocorrelation(X_,1) + autocorrelation(X_,10)
#s = 0
#for i in range(10):
# s += autocorrelation(X_,1+2*i)
return autocorrelation(X_)
# In[479]:
autocorrelation(X_dihedral,10)
# In[480]:
autocorr_grad_mult = grad(autocorr_loss_mult)
# In[482]:
autocorr_loss_mult(np.ones(84*2))
# In[481]:
autocorr_grad_mult(np.ones(84*2)).shape
# In[486]:
n=1000
x = np.zeros((n,84*2))
x[0] = A_init_tica.reshape(84*2)
for i in range(1,n):
x[i] = x[i-1] + autocorr_grad_mult(x[i-1])
# In[487]:
plt.plot(x);
# In[495]:
X_ = np.dot(X_dihedral,x[100].reshape(84,2))
plt.scatter(X_[:,0],X_[:,1],linewidths=0,s=4,
c=np.arange(len(X_)),alpha=0.5)
# In[490]:
l = [autocorr_loss_mult(x_) for x_ in x]
# In[492]:
l
# In[491]:
plt.plot(l)
# In[ ]:
# idea: tICA requires the specification of
# a single autocorrelation time-- can we consider multiple?
# In[563]:
# we want to find an embedding of the dihedral angles that puts kinetically-nearby points near each other in the embedding
# In[564]:
X_dihedral.shape
# In[653]:
pca_w = PCA(whiten=True)
X_dihedral_whitened = pca_w.fit_transform(X_dihedral)
# In[654]:
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_dihedral_whitened)
# In[655]:
sum(pca.explained_variance_ratio_)
# In[656]:
from scipy.spatial.distance import euclidean
# In[906]:
def d(x,y):
return np.sqrt(np.dot(x-y,x-y))
def scalar_penalize(close_distance,far_distance):
return close_distance-far_distance
def mult_penalize(close_distance,far_distance):
return close_distance/far_distance
def exp_penalize(close_distance,far_distance,scale=10):
return np.exp(scale*(close_distance-far_distance))
def zero_one_penalize(close_distance,far_distance):
return 1.0*(close_distance > far_distance)
def triplet_batch_objective_simple(embedding_points,tau_1=1,tau_2=10,penalize=scalar_penalize):
loss = 0.0
n_triplets = len(embedding_points) - tau_2
assert(n_triplets>0)
for i in range(n_triplets):
close = d(embedding_points[i],embedding_points[i+tau_1])
far = d(embedding_points[i],embedding_points[i+tau_2])
loss += penalize(close,far)
#print(close,far)
#print(contribution)
return loss / n_triplets
# In[836]:
triplet_batch_objective_simple(X_dihedral,penalize=zero_one_penalize)
# In[837]:
triplet_batch_objective_simple(X_dihedral,tau_1=5,tau_2=10,penalize=zero_one_penalize)
# In[839]:
triplet_batch_objective_simple(X_dihedral,tau_1=1,tau_2=100,penalize=zero_one_penalize)
# In[840]:
sample = X_dihedral[:10000]
# In[842]:
get_ipython().magic(u'timeit triplet_batch_objective_simple(sample,tau_1=1,tau_2=100,penalize=zero_one_penalize)')
# In[861]:
taus = np.array([1,2,3,4,5,10,20,30,40,50,100,200,300,400,500])
results = np.zeros((len(taus),len(taus)))
for i,tau_1 in enumerate(taus):
for j,tau_2 in enumerate(taus):
if tau_2 > tau_1:
results[i,j] = triplet_batch_objective_simple(sample,tau_1=tau_1,tau_2=tau_2,penalize=zero_one_penalize)
# In[864]:
plt.imshow(results,interpolation='none',cmap='Blues')
plt.colorbar()
# In[718]:
# alternate flow: select random center points along the trajectory....
# In[907]:
def stoch_triplet_objective(transform,full_set,tau_1=1,tau_2=10,batch_size=50,penalize=scalar_penalize):
'''to-do: make this work with a list of trajectories'''
if type(full_set)==list:
# it's a list of trajectories, each a numpy array
list_ind = np.random.randint(0,len(full_set),batch_size)
centers = np.random.randint(0,len(full_set[0])-tau_2,batch_size)
triplets = [(full_set[l][c],full_set[l][c+tau_1],full_set[l][c+tau_2]) for l,c in zip(list_ind,centers)]
else:
# it's just one trajectory in a numpy array
centers = np.random.randint(0,len(full_set)-tau_2,batch_size)
triplets = [(full_set[c],full_set[c+tau_1],full_set[c+tau_2]) for c in centers]
triplets = [(transform(a),transform(b),transform(c)) for (a,b,c) in triplets]
loss = 0
for i in range(batch_size):
close = d(triplets[i][0],triplets[i][1])
far = d(triplets[i][0],triplets[i][2])
loss += penalize(close,far)
return loss / batch_size
# In[775]:
get_ipython().magic(u'timeit stoch_triplet_objective(lambda i:i,dhft,batch_size=100)')
# In[787]:
loss(pca.components_.T)
# In[900]:
A.shape,A.sum(0).shape
sum(np.abs(A)),sum(np.abs(A/np.abs(A).sum(0)))
# In[910]:
def loss(weights,batch_size=1000):
transform = lambda x:np.dot(x,weights)
return stoch_triplet_objective(transform,dhft,batch_size=batch_size)
def loss_vec(weights,target_dim=2,batch_size=100,penalize=mult_penalize):
A = np.reshape(weights,(weights.shape[0]/target_dim,target_dim))
#A /= np.sum(A,0)
#A /= np.abs(A).sum(0)
#A = A/np.reshape(np.sum(A**2,1),(weights.shape[0]/target_dim,1))
transform = lambda x:np.dot(x,A)
return stoch_triplet_objective(transform,dhft,batch_size=batch_size,penalize=penalize)
grad_loss = grad(loss)
plt.scatter(grad_loss(np.ones(84)),grad_loss(np.ones(84)))
plt.hlines(0,-0.1,0.1,linestyles='--')
plt.vlines(0,-0.1,0.1,linestyles='--')
print(spearmanr(grad_loss(np.ones(84)),grad_loss(np.ones(84))))
plt.figure()
grad_loss = grad(loss_vec)
plt.scatter(grad_loss(np.ones(84*2)),grad_loss(np.ones(84*2)))
plt.hlines(0,-0.1,0.1,linestyles='--')
plt.vlines(0,-0.1,0.1,linestyles='--')
spearmanr(grad_loss(np.ones(84*2)),grad_loss(np.ones(84*2)))
# In[829]:
from scipy.optimize import minimize
from autograd.convenience_wrappers import hessian_vector_product as hvp
results = minimize(lambda w:loss_vec(w,batch_size=1000),pca.components_.T.flatten(),jac=grad(loss_vec),hessp=hvp(loss_vec,84*2))
# In[825]:
results
# In[826]:
A = results['x'].reshape((84,2))
A = A/np.reshape(np.sum(A**2,1),(84,1))
projected = np.dot(X_dihedral,A)
projected /= np.sum(projected,0)
plt.scatter(projected[:,0],projected[:,1],linewidths=0,s=4,
c=np.arange(len(projected)),alpha=0.5)
# In[809]:
np.sum(A,0).shape
# In[802]:
triplet_batch_objective_simple(projected),triplet_batch_objective_simple(X_pca),triplet_batch_objective_simple(X_tica)
# In[877]:
def adagrad(grad, x, num_iters=100, step_size=0.1, gamma=0.9, eps = 10**-8):
"""Root mean squared prop: See Adagrad paper for details.
Stolen from autograd examples: https://github.com/HIPS/autograd/blob/master/examples/optimizers.py#L21"""
avg_sq_grad = np.ones(len(x))
history = np.zeros((num_iters+1,len(x)))
history[0] = x
for i in xrange(num_iters):
g = grad(x)
avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma)
x -= step_size * g/(np.sqrt(avg_sq_grad) + eps)
history[i+1] = x
return history
# In[920]:
loss_func = lambda weights:loss_vec(weights,batch_size=1000,penalize=scalar_penalize)
history = adagrad(grad(loss_func),pca.components_.T.flatten(),num_iters=100)
# In[929]:
pca = PCA(n_components=2)
X_pca = pca.fit_transform(X_dihedral)
# In[921]:
normed_history = norm_history(history)
plt.plot(normed_history[:,:10]);
# In[932]:
def rotation_matrix(theta=np.pi/2):
r = np.zeros((2,2))
r[0,0] = r[1,1] = np.cos(theta)
r[0,1] = np.sin(theta)
r[1,0] = -np.sin(theta)
return r
np.dot(projected,rotation_matrix())
# In[935]:
proj_mat = np.reshape(normed_history[-1],(84,2))
projected = np.dot(X_dihedral,proj_mat)
#projected /= np.sum(projected,0)
projected = np.dot(projected,rotation_matrix(np.pi/4))
plt.scatter(projected[:,0],projected[:,1],linewidths=0,s=1,
c=np.arange(len(projected)),alpha=0.5,cmap='rainbow')
plt.title('Triplet-based linear embedding')
plt.figure()
plt.scatter(X_pca[:,0],X_pca[:,1],linewidths=0,s=1,
c=np.arange(len(projected)),alpha=0.5,cmap='rainbow')
plt.title('PCA')
plt.figure()
plt.scatter(X_tica[:,0],X_tica[:,1],linewidths=0,s=1,
c=np.arange(len(projected)),alpha=0.5,cmap='rainbow')
plt.title('tICA')
# In[973]:
from mdp.nodes import XSFANode
xsfa = XSFANode(output_dim=2)
pca = PCA()
X_dihedral_ = pca.fit_transform(X_dihedral)[:,:20]
X_xsfa = xsfa.execute(X_dihedral_)
# In[975]:
len(projected),len(X_xsfa)
# In[963]:
plt.plot(pca.explained_variance_ratio_)
# In[976]:
plt.scatter(X_xsfa[:,0],X_xsfa[:,1],linewidths=0,s=1,
c=np.arange(len(X_xsfa)),alpha=0.5,cmap='rainbow')
# In[ ]:
# now let's test which embedding is best for constructing MSMs...
from msmbuilder.cluster import MiniBatchKMeans
from msmbuilder.msm import MarkovStateModel
from sklearn.pipeline import Pipeline
results = dict()
embeddings = [('tICA',X_tica),('PCA',X_pca),('Triplet',projected),('xSFA',X_xsfa)]
for (name,dataset) in embeddings:
pipeline = Pipeline([
('cluster', MiniBatchKMeans(n_clusters=100)),
('msm', MarkovStateModel(lag_time=10))
])
pipeline.fit([dataset[:100000]])
results[name] = pipeline
print(name,pipeline.score([dataset[100000:]]))
# In[951]:
msm = MarkovStateModel()
msm.fit(np.random.randint(0,10,100000))
print(msm.summarize())
# In[916]:
from time import time
t = time()
grad(loss_vec)(pca.components_.T.flatten())
print(time()-t)
# In[873]:
pca.components_.T.flatten().shape
# In[786]:
from scipy.stats import spearmanr
spearmanr(grad_loss(np.ones(84)),grad_loss(np.ones(84)))
# In[ ]:
# In[749]:
objective_evaluations = np.array([stoch_triplet_objective(dhft,tau_1=1,tau_2=100,batch_size=100) for _ in range(1000)])
# In[750]:
objective_evaluations.std(),objective_evaluations.mean()
# In[743]:
fs_trajectories
# In[742]:
a = []
type(a)==list
# In[738]:
objective_evaluations = np.array([stoch_triplet_objective(X_pca,tau_1=1,tau_2=100,batch_size=100) for _ in range(1000)])
# In[739]:
objective_evaluations.std(),objective_evaluations.mean()
# In[658]:
pca.components_.shape
# In[659]:
triplet_batch_objective_simple(X_pca[:11])
# In[660]:
np.dot(X_dihedral,pca.components_.T).shape
# In[661]:
def sgd(objective,dataset,init_point,batch_size=20,n_iter=100,step_size=0.01,seed=0,stoch_select=False):
''' objective takes in a parameter vector and an array of data'''
np.random.seed(seed)
testpoints = np.zeros((n_iter,len(init_point)))
testpoints[0] = init_point
ind=0
for i in range(1,n_iter):
if stoch_select:
else:
max_ind = ind+batch_size
if max_ind>=len(dataset):
ind = max_ind % len(dataset)
max_ind = ind+batch_size
subset = dataset[ind:max_ind]
ind = (ind + batch_size)
obj_grad = grad(lambda p:objective(p,subset))
raw_grad = obj_grad(testpoints[i-1])
gradient = np.nan_to_num(raw_grad)
#print(gradient,raw_grad)
testpoints[i] = testpoints[i-1] - gradient*step_size
return np.array(testpoints)
# In[662]:
def projection_obj(proj_vec,subset):
# WARNING: CURRENTLY HARD-CODED PROJECTION MATRIX DIMENSIONS...
A = np.reshape(proj_vec,(84,2))
A /= (A**2).sum(0)
projected = np.dot(subset,A)
return triplet_batch_objective_simple(projected)
# In[663]:
(A**2).sum(0)
# In[ ]:
# In[712]:
raw_points = sgd(projection_obj,X_dihedral_whitened,pca.components_.T.flatten(),step_size=0.01,
n_iter=1000,batch_size=20)
# In[881]:
def norm(s):
return s / np.sqrt(np.sum(s**2))
plt.plot(raw_points[:,:10]);
plt.figure()
def norm_history(raw_points):
return np.array([norm(s) for s in raw_points])
normed_points = norm_history(raw_points)
plt.plot(normed_points);
# In[707]:
(pca.components_).flatten().shape
# In[708]:
A = np.reshape(raw_points[-1],(84,2))
projected = np.dot(X_dihedral,A)
# In[709]:
plt.scatter(projected[:,0],projected[:,1],linewidths=0,s=4,
c=np.arange(len(projected)),alpha=0.5)
# In[681]:
plt.plot(np.linalg.eigh(A.dot(A.T))[0][-5:])
# In[672]:
tica = tICA(n_components=2)
X_tica = tica.fit_transform([X_dihedral_whitened])[0]
# In[673]:
plt.scatter(X_tica[:,0],X_tica[:,1],linewidths=0,s=4,
c=np.arange(len(projected)),alpha=0.5)
# In[674]:
triplet_batch_objective_simple(X_pca),triplet_batch_objective_simple(X_tica),triplet_batch_objective_simple(projected)
# In[9]:
tica = tICA(n_components=2)
# In[76]:
from sklearn.preprocessing import PolynomialFeatures
from time import time
t = time()
dhft_poly = []
poly = PolynomialFeatures()
for i in range(len(dhft)):
dhft_poly.append(poly.fit_transform(dhft[i]))
print(i,time()-t)
# In[ ]:
# In[15]:
dhft[0].shape
# In[19]:
get_ipython().magic(u'timeit poly.fit_transform(dhft[0][:1000])')
# In[24]:
dhft_poly_0 = poly.fit_transform(dhft[0])
# In[25]:
dhft_poly_0.shape
# In[48]:
tica=tICA(n_components=2,lag_time=10)
# In[80]:
X_tica_poly = tica.fit_transform(dhft_poly)
# In[81]:
X_tica_poly_vstack = np.vstack(X_tica_poly)
# In[84]:
plt.scatter(X_tica_poly_vstack[:,0],X_tica_poly_vstack[:,1],linewidths=0,s=1,
#c=np.arange(len(X_tica_poly_vstack)),
c=np.vstack([i*np.ones(len(X_tica_poly[0])) for i in range(len(X_tica_poly))]),
alpha=0.5,cmap='rainbow')
plt.title('Nonlinear tICA')
# In[78]:
tica = tICA(n_components=2,lag_time=10)
X_tica = tica.fit_transform(dhft)
# In[79]:
X_tica_vstack = np.vstack(X_tica)
plt.scatter(X_tica_vstack[:,0],X_tica_vstack[:,1],linewidths=0,s=1,
c=np.arange(len(X_tica_vstack)),alpha=0.5,cmap='rainbow')
plt.title('tICA')
# In[87]:
from msmbuilder.cluster import MiniBatchKMeans
from msmbuilder.msm import MarkovStateModel
from sklearn.pipeline import Pipeline
results = dict()
embeddings = [('tICA',X_tica),('Nonlinear tICA',X_tica_poly)]
for (name,dataset) in embeddings:
pipeline = Pipeline([
('cluster', MiniBatchKMeans(n_clusters=100)),
('msm', MarkovStateModel(lag_time=1))
])
#pipeline.fit([dataset[:5000]])
#pipeline.fit(dataset)
pipeline.fit(dataset[:14])
results[name] = pipeline
#print(pipeline.steps[1][1].score_)
print(pipeline.score(dataset[14:]))
#print(name,pipeline.score([dataset[5000:]]))
# In[67]:
msm = MarkovStateModel()
msm.score_
# In[56]:
X_tica_poly.shape
# In[ ]:
| mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/tests/__init__.py | 6 | 1362 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import difflib
import os
from matplotlib.testing import setup
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
def assert_str_equal(reference_str, test_str,
format_str=('String {str1} and {str2} do not '
'match:\n{differences}')):
"""
Assert the two strings are equal. If not, fail and print their
diffs using difflib.
"""
if reference_str != test_str:
diff = difflib.unified_diff(reference_str.splitlines(1),
test_str.splitlines(1),
'Reference', 'Test result',
'', '', 0)
raise ValueError(format_str.format(str1=reference_str,
str2=test_str,
differences=''.join(diff)))
| mit |
thientu/scikit-learn | examples/manifold/plot_mds.py | 261 | 2616 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
dyoung418/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner_test.py | 116 | 5164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.estimator.inputs.queues import feeding_functions as ff
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff._enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff._enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff._enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
theoryno3/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
akuefler/fovea | tests/track_text_test.py | 1 | 2526 | """
Tests for prototype code to track variable values with callbacks
"""
from __future__ import division
import PyDSTool as dst
from PyDSTool.Toolbox import phaseplane as pp
import numpy as np
from matplotlib import pyplot as plt
import fovea
import fovea.graphics as gx
from fovea.graphics import tracker
class LogWrappedFunction(object):
def __init__(self, function):
import inspect
self.function = function
self.args, self.varargs, self.keywords, self.defaults = inspect.getargspec(function)
# ArgSpec(args=['gen', 'subdomain', 'n', 'maxsearch', 'eps', 't', 'jac'], varargs=None, keywords=None, defaults=(None, 5, 1000, 1e-08, 0, None))
def logAndCall(self, *arguments, **namedArguments):
print("Calling %s with arguments %s and named arguments %s" %\
(self.function.func_name, arguments, namedArguments))
self.function.__call__(*arguments, **namedArguments)
def logwrap(function):
return LogWrappedFunction(function).logAndCall
@logwrap
def doSomething(spam, eggs, foo, bar):
print("Doing something totally awesome with %s and %s." % (spam, eggs))
doSomething("beans","rice", foo="wiggity", bar="wack")
# ============================
def track_attribute(calc_con, attr_name):
"""
Create decorator to track named attribute used in a calculation
"""
def decorator(fn):
#obj =
#tracker.track_list = [getattr(obj, attr_name)]
calc_con.workspace
return fn
return decorator
def test_func_noattr(x, eps=1e-8):
"""
mock function that would use a tolerance eps and return a numerical
object that doesn't contain reference to that tolerance
"""
return x
# this doesn't let us get at the defaults unless we re-specify a default value
# in the wrapper (see logger above)
def wrap_test_func_noattr(x, eps=1e-8):
x = test_func_noattr(x, eps)
res = dst.args(x=x, eps=eps)
return res
#@track_attribute(cc, 'eps')
def test_func_attr(x, eps=1e-8):
"""
mock function that would use a tolerance eps and return a numerical
object that does contain reference to that tolerance
"""
res = dst.args(val=x, eps=eps)
return res
x1 = wrap_test_func_noattr(1.0, 1e-8)
x2 = test_func_attr(3.1, 1e-5)
cc = fovea.calc_context(dst.args(tracked_objects=[],
name='saddles'), 'saddles') # None would be sim object
wksp = cc.workspace
wksp.x1 = x1
wksp.x2 = x2
tracker(cc, 1, text_metadata='eps')
tracker.show() | bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
ashgti/VEE | src/vee/plotter.py | 1 | 2283 | # Copyright John Harrison, 2011
import numpy as np
from matplotlib import use
# use('AGG')
from matplotlib.transforms import Bbox
from matplotlib.path import Path
from matplotlib.patches import Rectangle
from matplotlib.pylab import *
from matplotlib.axes import *
from PySide import QtGui
class Plot(QtGui.QWidget):
def __init__(self, parent=None):
super(Plot, self).__init__(parent)
self.imageLabel = QtGui.QLabel()
self.imageLabel.setBackgroundRole(QtGui.QPalette.Base)
layout = QtGui.QVBoxLayout()
layout.addWidget(self.imageLabel)
self.need_update = False
self.setLayout(layout)
# self.plot()
def plot(self, *args):
"""Pasthrough"""
# x, y = args
a = [0, 0, 1, 1, 0, 0, 1]
b = [0, 0.5, 0.5, 1.5, 1.5, 2.5, 2.5]
plot(b, a)
a = [2, 2, 3, 3, 2, 2, 3]
b = [0, 0.5, 0.5, 1.5, 1.5, 2.5, 2.5]
axis([-0.5, 3.0, -0.5, 3.5])
plot(b, a)
self.need_update = True
self.update()
def plotHeading(self, headings, time):
"""Plots heading"""
print(headings, time)
plot(heading, time)
self.update()
def paintEvent(self, event):
"Paint event redraws the plot"
# if not self.need_update:
return
gcf().canvas.draw()
stringBuffer = gcf().canvas.buffer_rgba(0,0)
l, b, w, h = gcf().bbox.bounds
qImage = QtGui.QImage(stringBuffer,
w,
h,
QtGui.QImage.Format_ARGB32)
qImage = qImage.scaled(self.width(), self.height())
self.imageLabel.setPixmap(QtGui.QPixmap.fromImage(qImage))
## Generate a square wave.
def generate_range(start, stop, step=1.0, square=True):
r = []
st = start
while st < stop:
if st % 2 == 0:
r.append((st, 1.0))
r.append((st, 0.0))
else:
r.append((st, 0.0))
r.append((st, 1.0))
st += step
return r
if __name__ == '__main__':
pass
# a = generate_range(1, 10)
#
# (xs, ys) = zip(*a)
# xlim(-0.5, 10.5)
# ylim(-0.5, 1.5)
# plot(xs, ys, label="Blink")
# show()
| mit |
samzhang111/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
alexeyum/scikit-learn | sklearn/setup.py | 24 | 3025 | import os
from os.path import join
import warnings
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
import numpy
libraries = []
if os.name == 'posix':
libraries.append('m')
config = Configuration('sklearn', parent_package, top_path)
# submodules with build utilities
config.add_subpackage('__check_build')
config.add_subpackage('_build_utils')
# submodules which do not have their own setup.py
# we must manually add sub-submodules & tests
config.add_subpackage('covariance')
config.add_subpackage('covariance/tests')
config.add_subpackage('cross_decomposition')
config.add_subpackage('cross_decomposition/tests')
config.add_subpackage('feature_selection')
config.add_subpackage('feature_selection/tests')
config.add_subpackage('gaussian_process')
config.add_subpackage('gaussian_process/tests')
config.add_subpackage('mixture')
config.add_subpackage('mixture/tests')
config.add_subpackage('model_selection')
config.add_subpackage('model_selection/tests')
config.add_subpackage('neural_network')
config.add_subpackage('neural_network/tests')
config.add_subpackage('preprocessing')
config.add_subpackage('preprocessing/tests')
config.add_subpackage('semi_supervised')
config.add_subpackage('semi_supervised/tests')
# submodules which have their own setup.py
# leave out "linear_model" and "utils" for now; add them after cblas below
config.add_subpackage('cluster')
config.add_subpackage('datasets')
config.add_subpackage('decomposition')
config.add_subpackage('ensemble')
config.add_subpackage('externals')
config.add_subpackage('feature_extraction')
config.add_subpackage('manifold')
config.add_subpackage('metrics')
config.add_subpackage('metrics/cluster')
config.add_subpackage('neighbors')
config.add_subpackage('tree')
config.add_subpackage('svm')
# add cython extension module for isotonic regression
config.add_extension(
'_isotonic',
sources=['_isotonic.c'],
include_dirs=[numpy.get_include()],
libraries=libraries,
)
# some libs needs cblas, fortran-compiled BLAS will not be sufficient
blas_info = get_info('blas_opt', 0)
if (not blas_info) or (
('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', [])):
config.add_library('cblas',
sources=[join('src', 'cblas', '*.c')])
warnings.warn(BlasNotFoundError.__doc__)
# the following packages depend on cblas, so they have to be build
# after the above.
config.add_subpackage('linear_model')
config.add_subpackage('utils')
# add the test directory
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
mbayon/TFG-MachineLearning | venv/lib/python3.6/site-packages/pandas/tests/io/parser/index_col.py | 20 | 5352 | # -*- coding: utf-8 -*-
"""
Tests that the specified index column (a.k.a 'index_col')
is properly handled or inferred during parsing for all of
the parsers defined in parsers.py
"""
import pytest
import pandas.util.testing as tm
from pandas import DataFrame, Index, MultiIndex
from pandas.compat import StringIO
class IndexColTests(object):
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n" # noqa
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
pytest.raises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
assert xp.index.name == rs.index.name
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
assert xp.index.name == rs.index.name
def test_index_col_is_true(self):
# see gh-9798
pytest.raises(ValueError, self.read_csv,
StringIO(self.ts_data), index_col=True)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
assert data.index.equals(Index(['foo', 'bar', 'baz']))
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame(
[], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col),
expected, check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(
data), index_col=index_col),
expected, check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col),
expected, check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(
data), index_col=index_col),
expected, check_index_type=False)
def test_empty_with_index_col_false(self):
# see gh-10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
| mit |
habi/GlobalDiagnostiX | GOTTHARD/PlotGOTTHARD.py | 1 | 11302 | # -*- coding: utf-8 -*-
"""
Plotting GOTTHARD spectra
"""
from __future__ import division
import matplotlib.pylab as plt
import numpy as np
import os
import glob
from scipy import interpolate
FigureSize = [8, 9]
Spectrapath = '/afs/psi.ch/user/h/haberthuer/EssentialMed/Images/' \
'GOTTHARD_and_TIS/GOTTHARD'
Spectra = sorted(glob.glob(os.path.join(Spectrapath, '*.txt')))
FileName = [os.path.basename(item) for item in Spectra]
Data = [np.loadtxt(item) for item in Spectra]
DataName = [open(item).readlines()[0].split()[0][1:-2] for item in Spectra]
# Get Filenames of Spectra and split it up into the desired values like kV, mAs
# and exposure time with some basic string handling.
Modality = [item.split('_')[0] for item in FileName]
Energy = [int(item.split('_')[1][:-2]) for item in FileName]
Current = [int(item.split('_')[2][:-2]) for item in FileName]
mAs = [float(item.split('_')[3][:-3]) for item in FileName]
ExposureTime = [int(item.split('_')[4][:-6]) for item in FileName]
# Generate Labels which we can use later-on
Label = [Modality[i] + ': ' + str(Energy[i]) + 'kV, ' + str(mAs[i]) + 'mAs,' +
str(ExposureTime[i]) + 'ms' for i in range(len(Spectra))]
Frames = [open(item).readlines()[0].split()[1] for item in Spectra]
BinCenter = [open(item).readlines()[1].split()[0] for item in Spectra]
Photons = [open(item).readlines()[1].split()[1] for item in Spectra]
PhotonsPerFrame = [open(item).readlines()[1].split()[2] for item in Spectra]
# Calculate attenuation in 320 um of Silicon
SiliconAttenuation = np.loadtxt('Si_Attenuation.dat')
SiliconDensity = 2.329 # g/cm³
SiliconThickness = 320 # um
# Plot transmission
plt.figure(figsize=FigureSize)
plt.plot(SiliconAttenuation[:, 0] * 1000,
(np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity *
SiliconThickness / 10000))), 'ro', label='Si Transmission')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.ylim([0, 1])
plt.savefig('1_Si_Transmission.pdf', transparent=True)
# Plot transmission
plt.figure(figsize=FigureSize)
plt.loglog(SiliconAttenuation[:, 0], SiliconAttenuation[:, 1], 'r',
label='Si Transmission')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.savefig('1_Si_Transmission.pdf', transparent=True)
# Plot transmission-zoom
plt.figure(figsize=FigureSize)
for i in reversed(range(5000, 20250, 500)):
plt.plot(SiliconAttenuation[:, 0] * 1000,
(np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity * SiliconThickness / 10000))),
'ro', label='Si Transmission')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.ylim([0, 1])
plt.xlim(xmin=0)
plt.xlim(xmax=i)
print '%05i' % i
plt.draw()
plt.savefig('anim' + str('%05i' % (20000 - i)) + '.png', transparent=True)
for i in reversed(range(1000, 5000, 200)):
plt.plot(SiliconAttenuation[:, 0] * 1000,
(np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity * SiliconThickness / 10000))),
'ro', label='Si Transmission')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.ylim([0, 1])
plt.xlim(xmin=0)
plt.xlim(xmax=i)
print '%05i' % i
plt.draw()
plt.savefig('anim' + str('%05i' % (20000 - i)) + '.png', transparent=True)
for i in reversed(range(120, 1000, 50)):
plt.plot(SiliconAttenuation[:, 0] * 1000,
(np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity * SiliconThickness / 10000))),
'ro', label='Si Transmission')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.ylim([0, 1])
plt.xlim(xmin=0)
plt.xlim(xmax=i)
print '%05i' % i
plt.draw()
plt.savefig('anim' + str('%05i' % (20000 - i)) + '.png', transparent=True)
# Plot transmission with the limits that are interesting for us
plt.figure(figsize=FigureSize)
plt.plot(SiliconAttenuation[:, 0] * 1000,
(np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity *
SiliconThickness / 10000))), 'ro', label='Si Transmission')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.xlim([0, 120])
plt.ylim([0, 1])
plt.savefig('2_Si_Transmission_limits.pdf', transparent=True)
plt.show()
# Plot interpolated transmission
x = SiliconAttenuation[:, 0] * 1000
y = (np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity * SiliconThickness /
10000)))
interpolated = interpolate.interp1d(x, y, kind='cubic')
xnew = np.linspace(1, 120, 1000) # get 1000 steps from 1 to 120
plt.figure(figsize=FigureSize)
plt.plot(SiliconAttenuation[:, 0] * 1000,
(np.exp(- (SiliconAttenuation[:, 1] * SiliconDensity *
SiliconThickness / 10000))), 'ro', label='Si Transmission')
plt.plot(xnew, interpolated(xnew), 'r', label='Interpolated values')
plt.xlabel('Photon Energy [keV]')
plt.ylabel('Tranmission')
plt.xlim([0, 120])
plt.ylim([0, 1])
plt.savefig('3_Si_Transmission_limits_interpolated.pdf', transparent=True)
print 'Plotting uncorrected spectra'
for i in range(int(len(Spectra) / 2)):
plt.figure(figsize=FigureSize)
k = i + int(len(Spectra) / 2)
plt.plot(Data[i][:, 0], Data[i][:, 1], label=Label[i], color='k')
plt.plot(Data[k][:, 0], Data[k][:, 1], label=Label[k], color='g')
plt.legend(loc=1)
plt.xlabel('BinCenter [adc]')
plt.ylabel('Photons [count]')
plt.xlim([0, 5000])
plt.ylim(ymin=0)
plt.savefig('4_' + DataName[i] + '.pdf', transparent=True)
print 'Plotting corrected spectra'
for i in range(int(len(Spectra) / 2)):
plt.figure(figsize=FigureSize)
k = i + int(len(Spectra) / 2)
plt.plot(Data[i][:, 0], Data[i][:, 1], color='k', alpha=0.125)
plt.plot(Data[k][:, 0], Data[k][:, 1], color='g', alpha=0.125)
plt.plot(xnew * 5000 / 120, interpolated(xnew) * Data[i][:, 1], color='k',
label=Label[i])
plt.plot(xnew * 5000 / 120, interpolated(xnew) * Data[k][:, 1], color='g',
label=Label[k])
plt.legend(loc=1)
plt.xlabel('BinCenter [adc]')
plt.ylabel('Photons [count]')
plt.xlim([0, 5000])
plt.ylim(ymin=0)
plt.savefig('5_' + DataName[i] + 'corrected.pdf', transparent=True)
print 'Plotting corrected log-spectra'
for i in range(int(len(Spectra) / 2)):
plt.figure(figsize=FigureSize)
k = i + int(len(Spectra) / 2)
plt.semilogy(Data[i][:, 0], Data[i][:, 1], color='k', alpha=0.125)
plt.semilogy(Data[k][:, 0], Data[k][:, 1], color='g', alpha=0.125)
plt.semilogy(xnew * 5000 / 120, interpolated(xnew) * Data[i][:, 1],
color='k', label=Label[i])
plt.semilogy(xnew * 5000 / 120, interpolated(xnew) * Data[k][:, 1],
color='g', label=Label[k])
plt.xlabel('BinCenter [adc]')
plt.ylabel('Photons [count]')
plt.legend(loc=1)
plt.xlim([0, 5000])
plt.ylim(ymin=1)
plt.savefig('6_' + DataName[i] + 'log.pdf', transparent=True)
exit()
print 'Plotting Spectra, Logplot and Difference for'
for i in range(int(len(Spectra) / 2)):
plt.figure(figsize=FigureSize)
k = i + int(len(Spectra) / 2)
print ' * ' + DataName[i], 'vs.', DataName[k]
print ' * for', DataName[i], 'we recorded',\
'%.3e' % int(np.sum(Data[i][:, 1])), 'photons.'
print ' * for', DataName[k], 'we recorded',\
'%.3e' % int(np.sum(Data[k][:, 1])), 'photons.'
print ' * the difference is',\
'%.3e' % int(np.sum(Data[i][:, 1]) - np.sum(Data[k][:, 1])), 'photons'
plt.subplot(1, 3, 1)
plt.plot(Data[i][:, 0], Data[i][:, 1], label=DataName[i], color='k')
plt.plot(Data[k][:, 0], Data[k][:, 1], label=DataName[k], color='g')
plt.xlabel('BinCenter [adc]')
plt.ylabel('Photons [count]')
plt.xlim(xmin=0)
# Plotting left, right and then middle, so we have the legend on top
plt.subplot(1, 3, 3)
plt.plot(Data[i][:, 0], Data[i][:, 1], color='k',
label=' '.join([Modality[i] + ',', str(Energy[i]) + 'kV,',
str(mAs[i]) + 'mAs']))
plt.plot(Data[k][:, 0], Data[k][:, 1], color='g',
label=' '.join([Modality[k][:2] + ' ' + Modality[k][13:] + ',',
str(Energy[k]) + 'kV,', str(mAs[k]) + 'mAs']))
plt.plot(Data[i][:, 0], (Data[i] - Data[k])[:, 1],
label='Difference', color='r')
plt.legend(loc='center', bbox_to_anchor=(0.5, 0.1))
plt.xlabel('BinCenter [adc]')
plt.ylabel('Photons [count]')
plt.title('Difference')
plt.xlim(xmin=0)
plt.subplot(1, 3, 2)
plt.semilogy(Data[i][:, 0], Data[i][:, 1], label=DataName[i], color='k')
plt.semilogy(Data[k][:, 0], Data[k][:, 1], label=DataName[k], color='g')
plt.xlabel('BinCenter [adc]')
plt.ylabel('Photons [log count]')
plt.legend(loc='center', bbox_to_anchor=(0.5, 0.9))
plt.xlim(xmin=0)
plt.savefig(os.path.join('img', 'Full_' + DataName[i] + '.png'),
transparent=True)
plt.savefig(os.path.join('img', 'Full_' + DataName[i] + '.pdf'),
transparent=True)
# plt.show()
exit()
print
print 'Plotting'
for i in range(int(len(Spectra) / 2)):
plt.figure(figsize=FigureSize)
k = i + int(len(Spectra) / 2)
print ' * ' + DataName[i] + '/' + DataName[k]
plt.plot(Data[i][:, 0], Data[i][:, 1], color='k',
label=' '.join([Modality[i] + ',', str(Energy[i]) + 'kV,',
str(mAs[i]) + 'mAs,',
str(ExposureTime[i]) + 'ms']))
plt.plot(Data[k][:, 0], Data[k][:, 1], color='g',
label=' '.join([Modality[k] + ',', str(Energy[k]) + 'kV,',
str(mAs[k]) + 'mAs,',
str(ExposureTime[k]) + 'ms']))
plt.plot(Data[i][:, 0], (Data[i] - Data[k])[:, 1],
label='Difference', color='r')
plt.legend(loc='best')
plt.xlabel('BinCenter [adc]')
plt.xlim(xmin=0)
plt.savefig(os.path.join('img', 'Photons_' + DataName[i] + '.png'),
transparent=True)
plt.savefig(os.path.join('img', 'Photons_' + DataName[i] + '.pdf'),
transparent=True)
# plt.show()
print
print 'Plotting Logplot for every modality'
for i in range(int(len(Spectra) / 2)):
plt.figure(figsize=FigureSize)
k = i + int(len(Spectra) / 2)
print ' * ' + DataName[i] + '/' + DataName[k]
plt.semilogy(Data[i][:, 0], Data[i][:, 1], color='k',
label=' '.join([Modality[i] + ',',
str(Energy[i]) + 'kV,',
str(mAs[i]) + 'mAs,',
str(ExposureTime[i]) + 'ms']))
plt.semilogy(Data[k][:, 0], Data[k][:, 1], color='g',
label=' '.join([Modality[k] + ',',
str(Energy[k]) + 'kV,',
str(mAs[k]) + 'mAs,',
str(ExposureTime[k]) + 'ms']))
plt.semilogy(Data[i][:, 0], (Data[i] - Data[k])[:, 1],
label='Difference', color='r')
plt.legend(loc='best')
plt.xlabel('BinCenter [adc]')
plt.xlim(xmin=0)
plt.savefig(os.path.join('img', 'Log_Photons_' + DataName[i] + '.png'),
transparent=True)
plt.savefig(os.path.join('img', 'Log_Photons_' + DataName[i] + '.pdf'),
transparent=True)
# plt.show()
| unlicense |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/stats/fama_macbeth.py | 7 | 7274 | from pandas.core.base import StringMixin
from pandas.compat import StringIO, range
import numpy as np
from pandas.core.api import Series, DataFrame
import pandas.stats.common as common
from pandas.util.decorators import cache_readonly
# flake8: noqa
def fama_macbeth(**kwargs):
"""Runs Fama-MacBeth regression.
Parameters
----------
Takes the same arguments as a panel OLS, in addition to:
nw_lags_beta: int
Newey-West adjusts the betas by the given lags
"""
window_type = kwargs.get('window_type')
if window_type is None:
klass = FamaMacBeth
else:
klass = MovingFamaMacBeth
return klass(**kwargs)
class FamaMacBeth(StringMixin):
def __init__(self, y, x, intercept=True, nw_lags=None,
nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
import warnings
warnings.warn("The pandas.stats.fama_macbeth module is deprecated and will be "
"removed in a future version. We refer to external packages "
"like statsmodels, see here: "
"http://www.statsmodels.org/stable/index.html",
FutureWarning, stacklevel=4)
if dropped_dummies is None:
dropped_dummies = {}
self._nw_lags_beta = nw_lags_beta
from pandas.stats.plm import MovingPanelOLS
self._ols_result = MovingPanelOLS(
y=y, x=x, window_type='rolling', window=1,
intercept=intercept,
nw_lags=nw_lags, entity_effects=entity_effects,
time_effects=time_effects, x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._cols = self._ols_result._x.columns
@cache_readonly
def _beta_raw(self):
return self._ols_result._beta_raw
@cache_readonly
def _stats(self):
return _calc_t_stat(self._beta_raw, self._nw_lags_beta)
@cache_readonly
def _mean_beta_raw(self):
return self._stats[0]
@cache_readonly
def _std_beta_raw(self):
return self._stats[1]
@cache_readonly
def _t_stat_raw(self):
return self._stats[2]
def _make_result(self, result):
return Series(result, index=self._cols)
@cache_readonly
def mean_beta(self):
return self._make_result(self._mean_beta_raw)
@cache_readonly
def std_beta(self):
return self._make_result(self._std_beta_raw)
@cache_readonly
def t_stat(self):
return self._make_result(self._t_stat_raw)
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw,
'std_beta': self._std_beta_raw,
't_stat': self._t_stat_raw,
}
@cache_readonly
def _coef_table(self):
buffer = StringIO()
buffer.write('%13s %13s %13s %13s %13s %13s\n' %
('Variable', 'Beta', 'Std Err', 't-stat', 'CI 2.5%', 'CI 97.5%'))
template = '%13s %13.4f %13.4f %13.2f %13.4f %13.4f\n'
for i, name in enumerate(self._cols):
if i and not (i % 5):
buffer.write('\n' + common.banner(''))
mean_beta = self._results['mean_beta'][i]
std_beta = self._results['std_beta'][i]
t_stat = self._results['t_stat'][i]
ci1 = mean_beta - 1.96 * std_beta
ci2 = mean_beta + 1.96 * std_beta
values = '(%s)' % name, mean_beta, std_beta, t_stat, ci1, ci2
buffer.write(template % values)
if self._nw_lags_beta is not None:
buffer.write('\n')
buffer.write('*** The Std Err, t-stat are Newey-West '
'adjusted with Lags %5d\n' % self._nw_lags_beta)
return buffer.getvalue()
def __unicode__(self):
return self.summary
@cache_readonly
def summary(self):
template = """
----------------------Summary of Fama-MacBeth Analysis-------------------------
Formula: Y ~ %(formulaRHS)s
# betas : %(nu)3d
----------------------Summary of Estimated Coefficients------------------------
%(coefTable)s
--------------------------------End of Summary---------------------------------
"""
params = {
'formulaRHS': ' + '.join(self._cols),
'nu': len(self._beta_raw),
'coefTable': self._coef_table,
}
return template % params
class MovingFamaMacBeth(FamaMacBeth):
def __init__(self, y, x, window_type='rolling', window=10,
intercept=True, nw_lags=None, nw_lags_beta=None,
entity_effects=False, time_effects=False, x_effects=None,
cluster=None, dropped_dummies=None, verbose=False):
if dropped_dummies is None:
dropped_dummies = {}
self._window_type = common._get_window_type(window_type)
self._window = window
FamaMacBeth.__init__(
self, y=y, x=x, intercept=intercept,
nw_lags=nw_lags, nw_lags_beta=nw_lags_beta,
entity_effects=entity_effects, time_effects=time_effects,
x_effects=x_effects, cluster=cluster,
dropped_dummies=dropped_dummies, verbose=verbose)
self._index = self._ols_result._index
self._T = len(self._index)
@property
def _is_rolling(self):
return self._window_type == 'rolling'
def _calc_stats(self):
mean_betas = []
std_betas = []
t_stats = []
# XXX
mask = self._ols_result._rolling_ols_call[2]
obs_total = mask.astype(int).cumsum()
start = self._window - 1
betas = self._beta_raw
for i in range(start, self._T):
if self._is_rolling:
begin = i - start
else:
begin = 0
B = betas[max(obs_total[begin] - 1, 0): obs_total[i]]
mean_beta, std_beta, t_stat = _calc_t_stat(B, self._nw_lags_beta)
mean_betas.append(mean_beta)
std_betas.append(std_beta)
t_stats.append(t_stat)
return np.array([mean_betas, std_betas, t_stats])
_stats = cache_readonly(_calc_stats)
def _make_result(self, result):
return DataFrame(result, index=self._result_index, columns=self._cols)
@cache_readonly
def _result_index(self):
mask = self._ols_result._rolling_ols_call[2]
# HACK XXX
return self._index[mask.cumsum() >= self._window]
@cache_readonly
def _results(self):
return {
'mean_beta': self._mean_beta_raw[-1],
'std_beta': self._std_beta_raw[-1],
't_stat': self._t_stat_raw[-1],
}
def _calc_t_stat(beta, nw_lags_beta):
N = len(beta)
B = beta - beta.mean(0)
C = np.dot(B.T, B) / N
if nw_lags_beta is not None:
for i in range(nw_lags_beta + 1):
cov = np.dot(B[i:].T, B[:(N - i)]) / N
weight = i / (nw_lags_beta + 1)
C += 2 * (1 - weight) * cov
mean_beta = beta.mean(0)
std_beta = np.sqrt(np.diag(C)) / np.sqrt(N)
t_stat = mean_beta / std_beta
return mean_beta, std_beta, t_stat
| apache-2.0 |
cc7768/RecMacroTheoryFigures | RMT/Chapter20/Chp20Specification.py | 1 | 5586 | """
This file sets up the model parameters and specification
as described in 20.2.
"""
import numpy as np
import matplotlib.pyplot as plt
import quantecon as qe
from numba import jit, vectorize
def get_primitives(beta=0.92, gamma=0.8, ymin=6, ymax=15, ny=10, lamb=0.66):
# Set up ybar
ybar = np.linspace(ymin, ymax, ny)
pi_y = (1-lamb)/(1-lamb**ny) * lamb**(np.arange(ny))
# Set up utility function
u = lambda c: np.exp(-gamma*c)/(-gamma)
uinv = lambda _u: np.log(-gamma*_u) / (-gamma)
up = lambda c: np.exp(-gamma*c)
upinv = lambda _up: np.log(_up) / (-gamma)
return beta, gamma, ybar, pi_y, u, uinv, up, upinv
class Chp20_Sec3_Economy(object):
"""
This class builds the economy described by Chapter 20 section 3 of
"Recursive Macroeconomic Dynamics. This economy is one in which there
is a risk-averse agent and a risk-neutral money lender with one-sided
commitment. It is one-sided in the sense that the money lender honors
all of his contracts, but the agent is free to walk away if he so
chooses.
"""
def __init__(self, beta=0.92, gamma=0.2, ymin=6, ymax=15, ny=10, lamb=0.66):
# Generate the primitives of the model
primitives = get_primitives(beta, gamma, ymin, ymax, ny, lamb)
self.beta, self.gamma = primitives[0], primitives[1]
self.ybar, self.pi_y = primitives[2], primitives[3]
self.u, self.uinv = primitives[4], primitives[5]
self.up, self.upinv = primitives[6], primitives[7]
# Get the autarky value for the agent
self.v_aut = (1/(1-beta)) * np.dot(self.u(self.ybar), self.pi_y)
self.c_complete_markets = np.dot(self.pi_y, self.ybar)
def v(self, c, w):
return self.u(c) + self.beta*w
def participation_constraint(self, c, w, s):
"""
This function evaluates whether the incentive constraint is binding
for a specific state (s) given consumption (c) and continuation
promise (w)
"""
# Pull out information we need
ybars = self.ybar[s]
v_aut = self.v_aut
# Evaluate policy under the contract and exiting contract
v_stay = self.v(c, w)
v_exit = self.v(ybars, v_aut)
return (v_stay >= v_exit)
def solve(self):
# Unpack some parameters
beta, gamma = self.beta, self.gamma
ybar, pi_y = self.ybar, self.pi_y
nstates = ybar.size
v_aut = self.v_aut
v, u, up = self.v, self.u, self.up
uinv, upinv = self.uinv, self.upinv
# Allocate space for policies when participation constraint binds
# We call this "amnesia"
g1 = np.empty(nstates) # Policy for consumption when hit with amnesia
l1 = np.empty(nstates) # Policy for cont value when hit with amnesia
# First step is to solve equilibrium for an agent who has seen the
# maximum income level
l1[-1] = v(ybar[-1], v_aut)
g1[-1] = uinv((1-beta)*l1[-1])
for s in range(nstates-2, -1, -1):
# Get the value of exit
v_exit = v(ybar[s], v_aut)
# All the j+1 to S terms
jp1_S_terms = np.dot(pi_y[s+1:], v(g1[s+1:], l1[s+1:]))
# Sum of 1 to j values of pi times beta
pi1j = np.sum(pi_y[:s+1])
# Closed form for barc_j and barw_j
l1[s] = pi1j*v_exit + jp1_S_terms
g1[s] = uinv((l1[s]*(1-beta*pi1j) - jp1_S_terms)/(pi1j))
# Allocate space for money lender vf
P = np.empty(nstates)
# Solve for values of money lender
P[-1] = 1/(1-beta) * np.dot(pi_y, ybar - g1[-1])
for s in range(nstates-2, -1, -1):
# Pull out cvalues for current states
ck = g1[s]
# Sum of 1 to j values of pi times beta
pi1j = np.sum(pi_y[:s+1])
# Solve for what happens if you have low/high shocks relative
# to the state you bring into period
low_flow = np.dot(pi_y[:s+1], ybar[:s+1] - ck)
high_flow = np.dot(pi_y[s+1:], ybar[s+1:] - g1[s+1:])
# Give continuation values of high shocks
high_cont = np.dot(pi_y[s+1:], P[s+1:])
P[s] = ((low_flow + high_flow) + beta*high_cont)/(1 - beta*pi1j)
return g1, l1, P
def simulate(self, g1, l1, T):
"""
Given a policy for consumption (g1) and a policy for continuation
values (l1) simulate for T periods.
"""
# Pull out information from class
ybar, pi_y = self.ybar, self.pi_y
ns = self.ybar.size
# Draw random simulation of iid income realizations
d = qe.DiscreteRV(pi_y)
y_indexes = d.draw(T)
# Draw appropriate indexes for policy.
# We do this by making sure that the indexes are weakly increasing
# by changing any values less than the previous max to the previous max
pol_indexes = np.empty(T, dtype=int)
fix_indexes(ns, T, y_indexes, pol_indexes)
# Pull off consumption and continuation value sequences
c = g1[pol_indexes]
w = l1[pol_indexes]
y = ybar[y_indexes]
return c, w, y
@jit(nopython=True)
def fix_indexes(nstates, T, realized, fixed):
# Current values of everything
prev_max = 0
# loop through values and "fix" them as described above
for t in range(T):
cind = realized[t]
if cind<=prev_max:
fixed[t] = prev_max
else:
fixed[t] = cind
prev_max = cind
return None
| mit |
emanuele/measure_peaks | measure_peaks.py | 1 | 30947 | """Code to parse HPLC files, to smooth the signal, to detrend, to
detect peaks and to measure their areas. Moreover, given data from
standards and samples, the mole fraction of each peak is estimated by
comparing with the matching one in the standards.
Copyright 2017 Emanuele Olivetti and Yuejiao Yang.
MIT License
"""
import numpy as np
import matplotlib.pyplot as plt
import time
from scipy.signal import savgol_filter, detrend, find_peaks_cwt, argrelextrema
from scipy.interpolate import interp1d
from scipy.integrate import trapz, simps
from peakdetect import peakdetect
from glob import glob
from os.path import join
import sys
# Adapted from: https://stackoverflow.com/questions/616645/how-do-i-duplicate-sys-stdout-to-a-log-file-in-python
class Tee(object):
"""Duplicates stdout on file, like the unix command tee.
"""
def __init__(self, name, mode='w'):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing. you might
# want to specify some extra behavior here.
pass
sys.stdout = Tee('report.txt')
plt.interactive(True)
def load(filename, plot=False):
"""Load and parse CSV-like high-performance liquid chromatography (HLPC)
measurements file.
"""
print("Loading and parsing %s" % filename)
timestamp_format = "%d/%m/%Y %H:%M:%S"
timestamps = []
signal = []
with open(filename) as f:
assert(f.readline().strip() == 'Date, HPLC flow chart')
for line in f:
t, s = line.split(',')
timestamps.append(time.mktime(time.strptime(t, timestamp_format)))
signal.append(float(s))
timestamps = np.array(timestamps)
signal = np.array(signal)
if plot:
plt.figure()
plt.plot(timestamps, signal)
plt.xlabel('Time (sec.)')
plt.title('%s : raw data' % filename)
# plt.figure()
# plt.plot(timestamps, label='timestamps')
# plt.title('%s : sequence of timestamps' % filename)
# plt.legend()
# plt.xlabel('Time (sec.)')
print("Read %s timestamps and %s measures" % (timestamps.size,
signal.size))
return timestamps, signal
def timestamps_signal_fix(timestamps, signal, plot=True):
"""Fix timestamps/signal, because the timestamp resolution of the
instrument is 'seconds' while there are multiple measurements per
second, leading to multiple entries with the same timestamp.
"""
print("Fixing and clipping time information.")
timestamps = timestamps - timestamps[0]
# Issue: there are multiple measurements at the same timestamp.
# Solution 1: resample timestamps using linspace. This is WRONG in
# some cases
# timestamps = timestamps - timestamps[0]
# timestamps_fixed = np.linspace(timestamps[0], timestamps[-1],
# len(timestamps), endpoint=True)
# Solution 2: average all measurements with the same timestamp:
timestamps_fixed = np.unique(timestamps)
signal_fixed = np.zeros(len(timestamps_fixed))
for i, t in enumerate(np.unique(timestamps)):
signal_fixed[i] = signal[np.where(timestamps == t)[0]].mean()
if plot:
plt.figure()
plt.plot(timestamps_fixed, signal_fixed)
plt.xlabel('Time (sec.)')
plt.title('Fixed data')
return timestamps_fixed, signal_fixed
def timestamps_signal_clip(timestamps, signal, timestep_min=980.0,
timestep_max=2000.0, plot=True):
"""Clip the interval of the signal to a given time window.
"""
time_window_idx = np.logical_and(timestamps >= timestep_min,
timestamps <= timestep_max)
timestamps_clipped = timestamps[time_window_idx]
signal_clipped = signal[time_window_idx]
if plot:
plt.figure()
plt.plot(timestamps_clipped, signal_clipped)
plt.xlabel('Time (sec.)')
plt.title('Clipped data')
return timestamps_clipped, signal_clipped
def signal_smoothing(timestamps, signal, smoothing_window=5, smoothing_polyorder=1,
plot=True):
"""Smooth signal to remove high-frequency noise.
"""
print("Smoothing")
signal_smoothed = savgol_filter(signal,
window_length=smoothing_window,
polyorder=smoothing_polyorder)
if plot:
plt.figure()
plt.plot(timestamps, signal_smoothed)
plt.xlabel('Time (sec.)')
plt.title('Smoothed data')
return signal_smoothed
def baseline_correction(timestamps, signal, baseline_order=5,
baseline_threshold=1.0e-2, plot=True):
"""Detrend the signal with a polyline going through some of the lowest
points in the signal.
"""
print("Baseline correction.")
break_points = argrelextrema(signal, np.less, order=baseline_order)[0]
# Remove break_points too far from their linear fit:
linear_fit = np.poly1d(np.polyfit(timestamps[break_points],
signal[break_points], 1))
losses = signal[break_points] - linear_fit(timestamps[break_points])
# print(losses)
break_points = break_points[np.abs(losses) < baseline_threshold]
# Add the first and last measured points as break-points for detrending:
break_points = np.concatenate([[0], break_points, [len(signal) - 1]])
if plot:
plt.figure()
plt.plot(timestamps, signal)
plt.plot(timestamps[break_points], signal[break_points], 'g*')
plt.xlabel('Time (sec.)')
# linear_fit before removing utliers in breakpoints
# plt.plot(timestamps[break_points],
# linear_fit(timestamps[break_points]), 'm*')
plt.title('Break points for detrending')
# Detrending:
# linear detrend:
# signal_detrended = detrend(signal, type='linear', bp=break_points)
# polyline detrend:
trend = interp1d(timestamps[break_points], signal[break_points])
signal_detrended = signal - trend(timestamps)
if plot:
plt.figure()
plt.plot(timestamps, signal_detrended)
plt.xlabel('Time (sec.)')
plt.title("Detrended data")
return signal_detrended
def load_and_prepare(filename,
timestep_min=920,
timestep_max=2000,
smoothing_window=5,
smoothing_polyorder=1,
baseline_order=10,
baseline_threshold=1.0e-2,
plot=False):
"""Convenience function to load and preprocess HLPC data.
"""
timestamps, signal = load(filename, plot=plot)
timestamps_fixed, signal_fixed = timestamps_signal_fix(timestamps,
signal, plot=plot)
timestamps_clipped, signal_clipped = timestamps_signal_clip(timestamps_fixed,
signal_fixed, timestep_min=timestep_min,
timestep_max=timestep_max, plot=plot)
signal_smoothed = signal_smoothing(timestamps_clipped,
signal_clipped,
smoothing_window=smoothing_window,
smoothing_polyorder=smoothing_polyorder, plot=plot)
signal_detrended = baseline_correction(timestamps_clipped,
signal_smoothed,
baseline_order=baseline_order,
baseline_threshold=baseline_threshold,
plot=plot)
return timestamps_clipped, signal_detrended, signal
def automatic_peak_detection(timestamps,
signal,
peak_lookahead=3,
peak_delta=1.0e-3,
peak_min=0.002,
expected_peaks=None,
plot=True,
offset=0.0):
"""Authomatic peak detection and post-detection filtering.
"""
print("Automatic peak detection.")
# there are a number of options
# See: https://blog.ytotech.com/2015/11/01/findpeaks-in-python/
# or https://github.com/MonsieurV/py-findpeaks
# This does not work well:
# peaks_idx = np.array(find_peaks_cwt(signal, widths=peak_widths),
# dtype=np.int)
# This one works very well:
peaks_idx = np.array(peakdetect(signal, lookahead=peak_lookahead,
delta=peak_delta)[0])[:, 0].astype(np.int)
print("Peaks detected: %s" % len(peaks_idx))
if plot:
if offset == 0.0:
plt.figure()
plt.xlabel('Time (sec.)')
plt.plot(timestamps, signal + offset)
plt.plot(timestamps[peaks_idx], signal[peaks_idx] + offset,
'w*', label='rejected peaks')
print("Peak filtering")
peaks_idx_filtered = peaks_idx[signal[peaks_idx] >= peak_min]
print("Peaks retained after filtering: %s" % len(peaks_idx_filtered))
for i, pif in enumerate(peaks_idx_filtered):
print("Peak %s, time=%0.2f min" % (i+1, timestamps[pif]/60.0))
if expected_peaks is not None:
assert(len(peaks_idx_filtered) == expected_peaks)
if plot:
plt.plot(timestamps[peaks_idx_filtered],
signal[peaks_idx_filtered] + offset, 'r*',
label='confirmed peaks')
plt.title("Detected peaks")
plt.legend()
return peaks_idx_filtered
def plot_peaks_area(timestamps, signal, peaks_idx, beginnings, ends,
offset=0.0):
"""Plot signal, peaks and their areas.
"""
plt.figure()
plt.plot(timestamps, signal + offset)
plt.plot(timestamps[peaks_idx], signal[peaks_idx] + offset, 'r*')
plt.xlabel('Time (sec.)')
for i in range(len(beginnings)):
plt.fill_between(timestamps[beginnings[i]:ends[i]+1],
signal[beginnings[i]:ends[i]+1] + offset, 0)
plt.title("Peaks areas")
return
def compute_peak_beginning_end(timestamps, signal, peaks_idx, be_order=5,
plot=True, offset=0.0):
"""Compute beginning and end of a peak.
"""
print("Detecting beginnings and ends of peaks")
beginnings = np.zeros(len(peaks_idx), dtype=np.int)
ends = np.zeros(len(peaks_idx), dtype=np.int)
# add 0 and last idx
tmp = np.concatenate([[0], peaks_idx, [len(signal) - 1]])
for i in range(len(tmp) - 2):
try: # sometimes argrelextrema does not find any result...
beginnings[i] = tmp[i] + argrelextrema(signal[tmp[i]:tmp[i+1]],
np.less, order=be_order)[0][-1] # better!
except IndexError:
try:
beginnings[i] = tmp[i] + np.argmin(signal[tmp[i]:tmp[i+1]]) # basic
except ValueError:
beginnings[i] = tmp[i] # if everything else fail...
try: # sometimes argrelextrema does not find any result...
ends[i] = tmp[i+1] + argrelextrema(signal[tmp[i+1]:tmp[i+2]], np.less, order=be_order)[0][0] # better!
except IndexError:
try:
ends[i] = tmp[i+1] + np.argmin(signal[tmp[i+1]:tmp[i+2]]) # basic
except ValueError:
ends[i] = tmp[i+1] # if everything else fail...
if plot:
plot_peaks_area(timestamps, signal, peaks_idx, beginnings,
ends, offset=0.0)
return beginnings, ends
def compute_peaks_area(timestamps, signal, beginnings, ends):
"""Compute area of peaks. The area is in the units of timestamps X
signal.
"""
print("Measuring area of peaks.")
peaks_area = np.zeros(len(beginnings))
for i in range(len(beginnings)):
# Several ways to compute a peak's area:
# dx = (timestamps[-1] - timestamps[0]) / len(timestamps)
# peaks_area[i] = signal[beginnings[i]:ends[i]].sum() * dx # basic
# peaks_area[i] = trapz(signal[beginnings[i]:ends[i]], dx=dx) # better
peaks_area[i] = trapz(signal[beginnings[i]:ends[i]+1],
timestamps[beginnings[i]:ends[i]+1]) # even better
for i in range(len(beginnings)):
print("Peak %s, area: %0.2f \t percentage: %0.2f" % (i+1, peaks_area[i], peaks_area[i] / peaks_area.sum() * 100.0))
return peaks_area
def glob_filenames(directory,
filenames=None):
"""mix filenames with directory and retrieve, i.e. glob, filenames if
not available.
"""
mix = False
if (filenames is None) or (filenames == []):
filenames = sorted(glob(join(directory, '*.txt')))
mix = True
elif type(filenames) == str:
filenames = [filenames]
else:
raise Exception
if len(filenames) == 0:
print("NO STANDARDS IN DIRECTORY %s" % directory)
raise Exception
else:
if not mix:
filenames = [join(directory, fn) for fn in filenames]
return filenames
def crunch_standards(standards,
standards_filename=None,
standards_directory='standards',
timestep_min=920,
timestep_max=2000,
smoothing_window=5,
smoothing_polyorder=1,
baseline_order=10,
baseline_threshold=1.0e-2,
peak_lookahead=3,
peak_delta=1.0e-3,
peak_min=0.002,
be_order=5,
plot=True):
"""Load, prepare, find peaks, measure area and compute means of standards.
"""
print("Crunching standards...")
print("Declared standards: %s" % (standards, ))
print("Number of declared standards: %s" % len(standards))
standards_filename = glob_filenames(directory=standards_directory,
filenames=standards_filename)
print("Standards' files: %s" % standards_filename)
peaks_area_standards = np.zeros((len(standards_filename),
len(standards)), dtype=np.float)
for i, filename_standard in enumerate(standards_filename):
timestamps_standard, signal_standard, \
raw_standard = load_and_prepare(filename_standard,
timestep_min=timestep_min,
timestep_max=timestep_max,
smoothing_window=smoothing_window,
smoothing_polyorder=smoothing_polyorder,
baseline_order=baseline_order,
baseline_threshold=baseline_threshold,
plot=plot)
peaks_idx_standard = automatic_peak_detection(timestamps_standard,
signal_standard,
peak_lookahead=peak_lookahead,
peak_delta=peak_delta,
peak_min=peak_min,
expected_peaks=len(standards),
plot=plot)
beginnings_standard, \
ends_standard = compute_peak_beginning_end(timestamps_standard,
signal_standard,
peaks_idx_standard,
be_order=be_order,
plot=plot)
peaks_area_standard = compute_peaks_area(timestamps_standard,
signal_standard,
beginnings_standard,
ends_standard)
peaks_area_standards[i] = peaks_area_standard
print("")
F = 100.0 / peaks_area_standards
print("Standards: F ([pmol/ul]/Area)")
print("| Protein | F (mean) | F (std) |")
print("|--------------------------------|")
for i, st in enumerate(standards):
print("| %s | %8.3f | %7.3f |" % (st, F.mean(0)[i], F.std(0)[i]))
print("|--------------------------------|")
print("")
return peaks_area_standards, timestamps_standard, \
signal_standard, peaks_idx_standard
def plot_matching_peaks(timestamps, signal, peaks_idx,
timestamps_standard, signal_standard,
peaks_idx_standard, matching, standards):
"""Plot sample (above) and standard (below) signals with
markers on peaks and straight lines connecting matching peaks.
"""
plt.figure()
plt.plot(timestamps_standard, signal_standard, label='standard')
plt.plot(timestamps_standard[peaks_idx_standard],
signal_standard[peaks_idx_standard], 'r*')
for i in range(len(peaks_idx_standard)):
plt.text(timestamps_standard[peaks_idx_standard][i],
-signal_standard.max() * 0.1, standards[i], ha='center')
offset = signal_standard.max() * 1.1
plt.plot(timestamps, signal + offset, label='sample')
plt.plot(timestamps[peaks_idx], signal[peaks_idx] + offset, 'r*')
for k in matching.keys():
plt.plot([timestamps_standard[peaks_idx_standard[k]],
timestamps[peaks_idx[matching[k]]]],
[signal_standard[peaks_idx_standard[k]],
signal[peaks_idx[matching[k]]] + offset], 'k-')
plt.xlabel('Time (sec.)')
plt.legend()
plt.title("Matching standard peaks (below) to sample peaks (above)")
plt.legend(loc='upper left')
return
def match_peaks_standard_sample_nn(peaks_idx, peaks_idx_standard,
signal, signal_standard, standards,
match_threshold=20.0, plot=True):
"""Standard-to-sample peak matching based on nearest neighbors, with
some additions.
Note: this function is work in progress and may not guarantee unique
matching, at the moment.
"""
distance_matrix = np.abs(np.subtract.outer(timestamps[peaks_idx],
timestamps_standard[peaks_idx_standard]))
match = distance_matrix.argmin(0)
matching = {}
for sample_idx in np.unique(match):
standard_idxs = np.where(match == sample_idx)[0]
# standard's peak with minimum distance from sample peak:
standard_idx = standard_idxs[distance_matrix[sample_idx,
standard_idxs].argmin()]
print("Sample peak %s: candidates = %s , winner: %s , at distance %s" % (sample_idx, standard_idxs, standard_idx, distance_matrix[sample_idx, standard_idx]))
if distance_matrix[sample_idx, standard_idx] <= match_threshold:
matching[standard_idx] = sample_idx
if plot:
plot_matching_peaks(timestamps, signal, peaks_idx,
timestamps_standard, signal_standard,
peaks_idx_standard, matching, standards)
return matching
def greedy_assignment(X):
"""A simple greedy algorithm for the assignment problem.
Note: the X matrix is a benefit function, not a cost function!
Returns a partial permutation matrix.
"""
XX = np.nan_to_num(X.copy())
min = XX.min() - 1.0
P = np.zeros(X.shape)
while (XX > min).any():
row, col = np.unravel_index(XX.argmax(), XX.shape)
P[row, col] = 1.0
XX[row, :] = min
XX[:, col] = min
return P
def match_peaks_standard_sample_lap_greedy(timestamps,
timestamps_standard,
peaks_idx,
peaks_idx_standard,
signal,
signal_standard,
standards,
match_threshold=20.0,
plot=True):
"""Standard-to-sample peaks matching based on greedy LAP, with
threshold. This algorithms works well in practice. The threshold is
the maximum allowed timestamp distance between corresponding peaks.
Returns a dictionary where the key is the index of
peaks_idx_standard and the value is the index of the matching
peaks_idx. Only matching peaks are in the dictionary.
"""
print("Matching standard peaks with sample peaks:")
distance_matrix = np.abs(np.subtract.outer(timestamps[peaks_idx],
timestamps_standard[peaks_idx_standard]))
P = greedy_assignment(-distance_matrix) # '-' is used to transform cost in benefit
match = P.argmax(0)
distances = distance_matrix[match, range(len(peaks_idx_standard))]
tmp = (distances <= match_threshold)
peaks_sample_match = np.sort(match[tmp]) # sort() ensures temporal order
peaks_standard_match = np.arange(len(peaks_idx_standard),
dtype=np.int)[tmp]
matching = dict(zip(peaks_standard_match, peaks_sample_match))
for peak_idx_standard in matching.keys():
print("Standard peak %s matches sample peak %s (distance: %s)" % (peak_idx_standard+1, matching[peak_idx_standard]+1, distance_matrix[matching[peak_idx_standard], peak_idx_standard]))
if plot:
plot_matching_peaks(timestamps, signal, peaks_idx,
timestamps_standard, signal_standard, peaks_idx_standard,
matching, standards)
return matching
def compute_molar_fraction(standards,
standards_exclude,
peaks_area,
peaks_area_standards,
matching,
filename):
F = 100.0 / peaks_area_standards
print("")
print(filename)
peaks_area_full = np.zeros(len(standards), dtype=np.float)
for psidx in matching.keys():
peaks_area_full[psidx] = peaks_area[matching[psidx]]
Conc = F.mean(0) * peaks_area_full
tmp = np.ones(len(standards), dtype=np.bool)
tmp[[standards.index(se) for se in standards_exclude]] = False # mask to exclude proteins in standards_exclude
Molar_percent_full = -np.ones(len(standards), dtype=np.float)
Molar_percent_full[tmp] = Conc[tmp] / Conc[tmp].sum() * 100.0
print("| Protein | F (stand) | Area | Conc | % Molar |")
print("|--------------------------------------------------|")
for i, st in enumerate(standards):
if st in standards_exclude:
continue
else:
print("| %s | %8.3f | %.3f | %8.3f | %7.2f |" % (st, F.mean(0)[i], peaks_area_full[i], Conc[i], Molar_percent_full[i]))
print("|--------------------------------------------------|")
print("")
return Molar_percent_full
def plot_timelines(timestamps_standard, timestamps, plot=True):
"""Plot comparison of two timelines.
"""
if plot:
plt.figure()
plt.plot(timestamps_standard, label='standard')
plt.plot(timestamps, label='sample')
plt.title('Timestamps')
plt.xlabel('Time (sec.)')
def crunch_samples(standards,
standards_exclude,
timestamps_standard,
signal_standard,
peaks_idx_standard,
peaks_area_standards,
samples_filename=None,
samples_directory='samples',
match_threshold=20,
timestep_min=920,
timestep_max=2000,
smoothing_window=5,
smoothing_polyorder=1,
baseline_order=10,
baseline_threshold=1.0e-2,
peak_lookahead=3,
peak_delta=1.0e-3,
peak_min=0.002,
be_order=5,
plot=True,
savefig=True):
"""Crunch all samples.
"""
print("Crunching samples...")
samples_filename = glob_filenames(directory=samples_directory,
filenames=samples_filename)
print("Standards' files: %s" % samples_filename)
peaks_area_samples = []
matchings = []
timestamps_samples = []
signal_samples = []
molar_fractions = []
for i, sample_filename in enumerate(samples_filename):
timestamps_sample, signal_sample, \
raw_sample = load_and_prepare(sample_filename,
timestep_min=timestep_min,
timestep_max=timestep_max,
smoothing_window=smoothing_window,
smoothing_polyorder=smoothing_polyorder,
baseline_order=baseline_order,
baseline_threshold=baseline_threshold,
plot=plot)
peaks_idx_sample = automatic_peak_detection(timestamps_sample,
signal_sample,
peak_lookahead=peak_lookahead,
peak_delta=peak_delta,
peak_min=peak_min,
plot=plot)
matching = match_peaks_standard_sample_lap_greedy(timestamps_sample,
timestamps_standard,
peaks_idx_sample,
peaks_idx_standard,
signal_sample,
signal_standard,
standards,
match_threshold=match_threshold,
plot=True)
beginnings_sample, \
ends_sample = compute_peak_beginning_end(timestamps_sample,
signal_sample,
peaks_idx_sample,
be_order=be_order,
plot=plot)
peaks_area_sample = compute_peaks_area(timestamps_sample,
signal_sample,
beginnings_sample,
ends_sample)
molar_fraction = compute_molar_fraction(standards,
standards_exclude,
peaks_area_sample,
peaks_area_standards,
matching,
sample_filename)
peaks_area_samples.append(peaks_area_sample)
matchings.append(matching)
timestamps_samples.append(timestamps_sample)
signal_samples.append(signal_sample)
molar_fractions.append(molar_fraction)
if savefig:
tmp = sample_filename[:-4] + '.pdf'
print("Saving %s" % (sample_filename[:-4] + '.pdf'))
plt.savefig(tmp)
print("")
print("")
print('Done.')
return samples_filename, peaks_area_samples, matchings, timestamps_samples, signal_samples, molar_fractions
def merge_two_results_and_compute_molar_fraction(filename_smaller,
filename_larger,
samples_filename,
peaks_area_samples,
matchings, standards,
standards_exclude,
peaks_area_standards,
aminoacids_to_merge):
"""When the device is set at a certain (smaller) scale, all measurements
are more accurate but some clip. By repeating the measurements at
a larger scale those previously clipped measurements are now
correct but all others are less accurate. This function takes the
best measurements from the two sets and compute the results (molar
fraction), that now come from two files.
"""
print("")
print("Merging the results of two sets of measurements at different scales.")
print("%s is at smaller scale" % filename_smaller)
print("%s is at larger scale" % filename_larger)
print("The aminoacids to be used from the larger-scale measures are: %s" % (aminoacids_to_merge,))
idx_smaller = samples_filename.index(filename_smaller)
idx_larger = samples_filename.index(filename_larger)
idx_aminoacids_to_merge = [standards.index(atm) for atm in aminoacids_to_merge]
idx_aminoacids_to_merge_smaller = [matchings[idx_smaller][iatm] for iatm in idx_aminoacids_to_merge]
idx_aminoacids_to_merge_larger = [matchings[idx_larger][iatm] for iatm in idx_aminoacids_to_merge]
peaks_area_sample_merged = peaks_area_samples[idx_smaller].copy()
peaks_area_sample_merged[idx_aminoacids_to_merge_smaller] = peaks_area_samples[idx_larger][idx_aminoacids_to_merge_larger]
molar_fraction = compute_molar_fraction(standards,
standards_exclude,
peaks_area_sample_merged,
peaks_area_standards,
matchings[idx_smaller],
filename='Merge of %s (small scale) with %s (large scale)' % (filename_smaller, filename_larger))
return molar_fraction
| mit |
hlin117/scikit-learn | sklearn/discriminant_analysis.py | 27 | 26804 | """
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .externals.six.moves import xrange
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .utils.multiclass import check_classification_targets
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LinearDiscriminantAnalysis(BaseEstimator, LinearClassifierMixin,
TransformerMixin):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
.. versionadded:: 0.17
tol : float, optional
Threshold used for rank estimation in SVD solver.
.. versionadded:: 0.17
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.discriminant_analysis.QuadraticDiscriminantAnalysis: Quadratic
Discriminant Analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis(n_components=None, priors=None, shrinkage=None,
solver='svd', store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
X, y = check_X_y(X, y, ensure_min_samples=2, estimator=self)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if self.priors_.sum() != 1:
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Get the maximum number of components
if self.n_components is None:
self._max_components = len(self.classes_) - 1
else:
self._max_components = min(len(self.classes_) - 1,
self.n_components)
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
class QuadraticDiscriminantAnalysis(BaseEstimator, ClassifierMixin):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : array, optional, shape = [n_classes]
Priors on classes
reg_param : float, optional
Regularizes the covariance estimate as
``(1-reg_param)*Sigma + reg_param*np.eye(n_features)``
Attributes
----------
covariances_ : list of array-like, shape = [n_features, n_features]
Covariance matrices of each class.
means_ : array-like, shape = [n_classes, n_features]
Class means.
priors_ : array-like, shape = [n_classes]
Class priors (sum to 1).
rotations_ : list of arrays
For each class k an array of shape [n_features, n_k], with
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis.
scalings_ : list of arrays
For each class k an array of shape [n_k]. It contains the scaling
of the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system.
store_covariances : boolean
If True the covariance matrices are computed and stored in the
`self.covariances_` attribute.
.. versionadded:: 0.17
tol : float, optional, default 1.0e-4
Threshold used for rank estimation.
.. versionadded:: 0.17
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
QuadraticDiscriminantAnalysis(priors=None, reg_param=0.0,
store_covariances=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.discriminant_analysis.LinearDiscriminantAnalysis: Linear
Discriminant Analysis
"""
def __init__(self, priors=None, reg_param=0., store_covariances=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariances = store_covariances
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
"""
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
if self.store_covariances:
cov = []
means = []
scalings = []
rotations = []
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
U, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariances:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariances:
self.covariances_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
check_is_fitted(self, 'classes_')
X = check_array(X)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, 1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples (test vectors).
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Array of samples/test vectors.
Returns
-------
C : array, shape = [n_samples, n_classes]
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
| bsd-3-clause |
yanlend/scikit-learn | sklearn/datasets/lfw.py | 141 | 19372 | """Loader for the Labeled Faces in the Wild (LFW) dataset
This dataset is a collection of JPEG pictures of famous people collected
over the internet, all details are available on the official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. The typical task is called
Face Verification: given a pair of two pictures, a binary classifier
must predict whether the two images are from the same person.
An alternative task, Face Recognition or Face Identification is:
given the picture of the face of an unknown person, identify the name
of the person by referring to a gallery of previously seen pictures of
identified persons.
Both Face Verification and Face Recognition are tasks that are typically
performed on the output of a model trained to perform Face Detection. The
most popular model for Face Detection is called Viola-Johns and is
implemented in the OpenCV library. The LFW faces were extracted by this face
detector from various online websites.
"""
# Copyright (c) 2011 Olivier Grisel <[email protected]>
# License: BSD 3 clause
from os import listdir, makedirs, remove
from os.path import join, exists, isdir
from sklearn.utils import deprecated
import logging
import numpy as np
try:
import urllib.request as urllib # for backwards compatibility
except ImportError:
import urllib
from .base import get_data_home, Bunch
from ..externals.joblib import Memory
from ..externals.six import b
logger = logging.getLogger(__name__)
BASE_URL = "http://vis-www.cs.umass.edu/lfw/"
ARCHIVE_NAME = "lfw.tgz"
FUNNELED_ARCHIVE_NAME = "lfw-funneled.tgz"
TARGET_FILENAMES = [
'pairsDevTrain.txt',
'pairsDevTest.txt',
'pairs.txt',
]
def scale_face(face):
"""Scale back to 0-1 range in case of normalization for plotting"""
scaled = face - face.min()
scaled /= scaled.max()
return scaled
#
# Common private utilities for data fetching from the original LFW website
# local disk caching, and image decoding.
#
def check_fetch_lfw(data_home=None, funneled=True, download_if_missing=True):
"""Helper function to download any missing LFW data"""
data_home = get_data_home(data_home=data_home)
lfw_home = join(data_home, "lfw_home")
if funneled:
archive_path = join(lfw_home, FUNNELED_ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw_funneled")
archive_url = BASE_URL + FUNNELED_ARCHIVE_NAME
else:
archive_path = join(lfw_home, ARCHIVE_NAME)
data_folder_path = join(lfw_home, "lfw")
archive_url = BASE_URL + ARCHIVE_NAME
if not exists(lfw_home):
makedirs(lfw_home)
for target_filename in TARGET_FILENAMES:
target_filepath = join(lfw_home, target_filename)
if not exists(target_filepath):
if download_if_missing:
url = BASE_URL + target_filename
logger.warning("Downloading LFW metadata: %s", url)
urllib.urlretrieve(url, target_filepath)
else:
raise IOError("%s is missing" % target_filepath)
if not exists(data_folder_path):
if not exists(archive_path):
if download_if_missing:
logger.warning("Downloading LFW data (~200MB): %s", archive_url)
urllib.urlretrieve(archive_url, archive_path)
else:
raise IOError("%s is missing" % target_filepath)
import tarfile
logger.info("Decompressing the data archive to %s", data_folder_path)
tarfile.open(archive_path, "r:gz").extractall(path=lfw_home)
remove(archive_path)
return lfw_home, data_folder_path
def _load_imgs(file_paths, slice_, color, resize):
"""Internally used to load images"""
# Try to import imread and imresize from PIL. We do this here to prevent
# the whole sklearn.datasets module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
from scipy.misc import imresize
except ImportError:
raise ImportError("The Python Imaging Library (PIL)"
" is required to load data from jpeg files")
# compute the portion of the images to load to respect the slice_ parameter
# given by the caller
default_slice = (slice(0, 250), slice(0, 250))
if slice_ is None:
slice_ = default_slice
else:
slice_ = tuple(s or ds for s, ds in zip(slice_, default_slice))
h_slice, w_slice = slice_
h = (h_slice.stop - h_slice.start) // (h_slice.step or 1)
w = (w_slice.stop - w_slice.start) // (w_slice.step or 1)
if resize is not None:
resize = float(resize)
h = int(resize * h)
w = int(resize * w)
# allocate some contiguous memory to host the decoded image slices
n_faces = len(file_paths)
if not color:
faces = np.zeros((n_faces, h, w), dtype=np.float32)
else:
faces = np.zeros((n_faces, h, w, 3), dtype=np.float32)
# iterate over the collected file path to load the jpeg files as numpy
# arrays
for i, file_path in enumerate(file_paths):
if i % 1000 == 0:
logger.info("Loading face #%05d / %05d", i + 1, n_faces)
# Checks if jpeg reading worked. Refer to issue #3594 for more
# details.
img = imread(file_path)
if img.ndim is 0:
raise RuntimeError("Failed to read the image file %s, "
"Please make sure that libjpeg is installed"
% file_path)
face = np.asarray(img[slice_], dtype=np.float32)
face /= 255.0 # scale uint8 coded colors to the [0.0, 1.0] floats
if resize is not None:
face = imresize(face, resize)
if not color:
# average the color channels to compute a gray levels
# representaion
face = face.mean(axis=2)
faces[i, ...] = face
return faces
#
# Task #1: Face Identification on picture with names
#
def _fetch_lfw_people(data_folder_path, slice_=None, color=False, resize=None,
min_faces_per_person=0):
"""Perform the actual data loading for the lfw people dataset
This operation is meant to be cached by a joblib wrapper.
"""
# scan the data folder content to retain people with more that
# `min_faces_per_person` face pictures
person_names, file_paths = [], []
for person_name in sorted(listdir(data_folder_path)):
folder_path = join(data_folder_path, person_name)
if not isdir(folder_path):
continue
paths = [join(folder_path, f) for f in listdir(folder_path)]
n_pictures = len(paths)
if n_pictures >= min_faces_per_person:
person_name = person_name.replace('_', ' ')
person_names.extend([person_name] * n_pictures)
file_paths.extend(paths)
n_faces = len(file_paths)
if n_faces == 0:
raise ValueError("min_faces_per_person=%d is too restrictive" %
min_faces_per_person)
target_names = np.unique(person_names)
target = np.searchsorted(target_names, person_names)
faces = _load_imgs(file_paths, slice_, color, resize)
# shuffle the faces with a deterministic RNG scheme to avoid having
# all faces of the same person in a row, as it would break some
# cross validation and learning algorithms such as SGD and online
# k-means that make an IID assumption
indices = np.arange(n_faces)
np.random.RandomState(42).shuffle(indices)
faces, target = faces[indices], target[indices]
return faces, target, target_names
def fetch_lfw_people(data_home=None, funneled=True, resize=0.5,
min_faces_per_person=0, color=False,
slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) people dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Recognition (or Identification): given the
picture of a face, find the name of the person given a training set
(gallery).
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
min_faces_per_person : int, optional, default None
The extracted dataset will only retain pictures of people that have at
least `min_faces_per_person` different pictures.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (13233, 2914)
Each row corresponds to a ravelled face image of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.images : numpy array of shape (13233, 62, 47)
Each row is a face image corresponding to one of the 5749 people in
the dataset. Changing the ``slice_`` or resize parameters will change the shape
of the output.
dataset.target : numpy array of shape (13233,)
Labels associated to each face image. Those labels range from 0-5748
and correspond to the person IDs.
dataset.DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading LFW people faces from %s', lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_people)
# load and memoize the pairs as np arrays
faces, target, target_names = load_func(
data_folder_path, resize=resize,
min_faces_per_person=min_faces_per_person, color=color, slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=faces.reshape(len(faces), -1), images=faces,
target=target, target_names=target_names,
DESCR="LFW faces dataset")
#
# Task #2: Face Verification on pairs of face pictures
#
def _fetch_lfw_pairs(index_file_path, data_folder_path, slice_=None,
color=False, resize=None):
"""Perform the actual data loading for the LFW pairs dataset
This operation is meant to be cached by a joblib wrapper.
"""
# parse the index file to find the number of pairs to be able to allocate
# the right amount of memory before starting to decode the jpeg files
with open(index_file_path, 'rb') as index_file:
split_lines = [ln.strip().split(b('\t')) for ln in index_file]
pair_specs = [sl for sl in split_lines if len(sl) > 2]
n_pairs = len(pair_specs)
# interating over the metadata lines for each pair to find the filename to
# decode and load in memory
target = np.zeros(n_pairs, dtype=np.int)
file_paths = list()
for i, components in enumerate(pair_specs):
if len(components) == 3:
target[i] = 1
pair = (
(components[0], int(components[1]) - 1),
(components[0], int(components[2]) - 1),
)
elif len(components) == 4:
target[i] = 0
pair = (
(components[0], int(components[1]) - 1),
(components[2], int(components[3]) - 1),
)
else:
raise ValueError("invalid line %d: %r" % (i + 1, components))
for j, (name, idx) in enumerate(pair):
try:
person_folder = join(data_folder_path, name)
except TypeError:
person_folder = join(data_folder_path, str(name, 'UTF-8'))
filenames = list(sorted(listdir(person_folder)))
file_path = join(person_folder, filenames[idx])
file_paths.append(file_path)
pairs = _load_imgs(file_paths, slice_, color, resize)
shape = list(pairs.shape)
n_faces = shape.pop(0)
shape.insert(0, 2)
shape.insert(0, n_faces // 2)
pairs.shape = shape
return pairs, target, np.array(['Different persons', 'Same person'])
@deprecated("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
def load_lfw_people(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_people(download_if_missing=False)
Check fetch_lfw_people.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_people(download_if_missing=download_if_missing, **kwargs)
def fetch_lfw_pairs(subset='train', data_home=None, funneled=True, resize=0.5,
color=False, slice_=(slice(70, 195), slice(78, 172)),
download_if_missing=True):
"""Loader for the Labeled Faces in the Wild (LFW) pairs dataset
This dataset is a collection of JPEG pictures of famous people
collected on the internet, all details are available on the
official website:
http://vis-www.cs.umass.edu/lfw/
Each picture is centered on a single face. Each pixel of each channel
(color in RGB) is encoded by a float in range 0.0 - 1.0.
The task is called Face Verification: given a pair of two pictures,
a binary classifier must predict whether the two images are from
the same person.
In the official `README.txt`_ this task is described as the
"Restricted" task. As I am not sure as to implement the
"Unrestricted" variant correctly, I left it as unsupported for now.
.. _`README.txt`: http://vis-www.cs.umass.edu/lfw/README.txt
The original images are 250 x 250 pixels, but the default slice and resize
arguments reduce them to 62 x 74.
Read more in the :ref:`User Guide <labeled_faces_in_the_wild>`.
Parameters
----------
subset : optional, default: 'train'
Select the dataset to load: 'train' for the development training
set, 'test' for the development test set, and '10_folds' for the
official evaluation set that is meant to be used with a 10-folds
cross validation.
data_home : optional, default: None
Specify another download and cache folder for the datasets. By
default all scikit learn data is stored in '~/scikit_learn_data'
subfolders.
funneled : boolean, optional, default: True
Download and use the funneled variant of the dataset.
resize : float, optional, default 0.5
Ratio used to resize the each face picture.
color : boolean, optional, default False
Keep the 3 RGB channels instead of averaging them to a single
gray level channel. If color is True the shape of the data has
one more dimension than than the shape with color = False.
slice_ : optional
Provide a custom 2D slice (height, width) to extract the
'interesting' part of the jpeg files and avoid use statistical
correlation from the background
download_if_missing : optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
The data is returned as a Bunch object with the following attributes:
data : numpy array of shape (2200, 5828)
Each row corresponds to 2 ravel'd face images of original size 62 x 47
pixels. Changing the ``slice_`` or resize parameters will change the shape
of the output.
pairs : numpy array of shape (2200, 2, 62, 47)
Each row has 2 face images corresponding to same or different person
from the dataset containing 5749 people. Changing the ``slice_`` or resize
parameters will change the shape of the output.
target : numpy array of shape (13233,)
Labels associated to each pair of images. The two label values being
different persons or the same person.
DESCR : string
Description of the Labeled Faces in the Wild (LFW) dataset.
"""
lfw_home, data_folder_path = check_fetch_lfw(
data_home=data_home, funneled=funneled,
download_if_missing=download_if_missing)
logger.info('Loading %s LFW pairs from %s', subset, lfw_home)
# wrap the loader in a memoizing function that will return memmaped data
# arrays for optimal memory usage
m = Memory(cachedir=lfw_home, compress=6, verbose=0)
load_func = m.cache(_fetch_lfw_pairs)
# select the right metadata file according to the requested subset
label_filenames = {
'train': 'pairsDevTrain.txt',
'test': 'pairsDevTest.txt',
'10_folds': 'pairs.txt',
}
if subset not in label_filenames:
raise ValueError("subset='%s' is invalid: should be one of %r" % (
subset, list(sorted(label_filenames.keys()))))
index_file_path = join(lfw_home, label_filenames[subset])
# load and memoize the pairs as np arrays
pairs, target, target_names = load_func(
index_file_path, data_folder_path, resize=resize, color=color,
slice_=slice_)
# pack the results as a Bunch instance
return Bunch(data=pairs.reshape(len(pairs), -1), pairs=pairs,
target=target, target_names=target_names,
DESCR="'%s' segment of the LFW pairs dataset" % subset)
@deprecated("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
def load_lfw_pairs(download_if_missing=False, **kwargs):
"""Alias for fetch_lfw_pairs(download_if_missing=False)
Check fetch_lfw_pairs.__doc__ for the documentation and parameter list.
"""
return fetch_lfw_pairs(download_if_missing=download_if_missing, **kwargs)
| bsd-3-clause |
hlin117/scikit-learn | examples/gaussian_process/plot_gpr_noisy_targets.py | 64 | 3706 | """
=========================================================
Gaussian Processes regression: basic introductory example
=========================================================
A simple one-dimensional regression example computed in two different ways:
1. A noise-free case
2. A noisy case with known noise-level per datapoint
In both cases, the kernel's parameters are estimated using the maximum
likelihood principle.
The figures illustrate the interpolating property of the Gaussian Process
model as well as its probabilistic nature in the form of a pointwise 95%
confidence interval.
Note that the parameter ``alpha`` is applied as a Tikhonov
regularization of the assumed covariance between the training points.
"""
print(__doc__)
# Author: Vincent Dubourg <[email protected]>
# Jake Vanderplas <[email protected]>
# Jan Hendrik Metzen <[email protected]>s
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# ----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T
# Observations
y = f(X).ravel()
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
x = np.atleast_2d(np.linspace(0, 10, 1000)).T
# Instanciate a Gaussian Process model
kernel = C(1.0, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
# ----------------------------------------------------------------------
# now the noisy case
X = np.linspace(0.1, 9.9, 20)
X = np.atleast_2d(X).T
# Observations and noise
y = f(X).ravel()
dy = 0.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
# Instanciate a Gaussian Process model
gp = GaussianProcessRegressor(kernel=kernel, alpha=(dy / y) ** 2,
n_restarts_optimizer=10)
# Fit to data using Maximum Likelihood Estimation of the parameters
gp.fit(X, y)
# Make the prediction on the meshed x-axis (ask for MSE as well)
y_pred, sigma = gp.predict(x, return_std=True)
# Plot the function, the prediction and the 95% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$')
plt.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations')
plt.plot(x, y_pred, 'b-', label=u'Prediction')
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.5, fc='b', ec='None', label='95% confidence interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
0asa/scikit-learn | examples/missing_values.py | 233 | 3056 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better results
than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via cross-validation.
Sometimes dropping rows or using marker values is more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.cross_validation import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = np.floor(n_samples * missing_rate)
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
kjung/scikit-learn | sklearn/cluster/tests/test_spectral.py | 262 | 7954 | """Testing for Spectral Clustering methods"""
from sklearn.externals.six.moves import cPickle
dumps, loads = cPickle.dumps, cPickle.loads
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_warns_message
from sklearn.cluster import SpectralClustering, spectral_clustering
from sklearn.cluster.spectral import spectral_embedding
from sklearn.cluster.spectral import discretize
from sklearn.metrics import pairwise_distances
from sklearn.metrics import adjusted_rand_score
from sklearn.metrics.pairwise import kernel_metrics, rbf_kernel
from sklearn.datasets.samples_generator import make_blobs
def test_spectral_clustering():
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for eigen_solver in ('arpack', 'lobpcg'):
for assign_labels in ('kmeans', 'discretize'):
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert_array_equal(labels, [1, 1, 1, 0, 0, 0, 0])
model_copy = loads(dumps(model))
assert_equal(model_copy.n_clusters, model.n_clusters)
assert_equal(model_copy.eigen_solver, model.eigen_solver)
assert_array_equal(model_copy.labels_, model.labels_)
def test_spectral_amg_mode():
# Test the amg mode of SpectralClustering
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
try:
from pyamg import smoothed_aggregation_solver
amg_loaded = True
except ImportError:
amg_loaded = False
if amg_loaded:
labels = spectral_clustering(S, n_clusters=len(centers),
random_state=0, eigen_solver="amg")
# We don't care too much that it's good, just that it *worked*.
# There does have to be some lower limit on the performance though.
assert_greater(np.mean(labels == true_labels), .3)
else:
assert_raises(ValueError, spectral_embedding, S,
n_components=len(centers),
random_state=0, eigen_solver="amg")
def test_spectral_unknown_mode():
# Test that SpectralClustering fails with an unknown mode set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, eigen_solver="<unknown>")
def test_spectral_unknown_assign_labels():
# Test that SpectralClustering fails with an unknown assign_labels set.
centers = np.array([
[0., 0., 0.],
[10., 10., 10.],
[20., 20., 20.],
])
X, true_labels = make_blobs(n_samples=100, centers=centers,
cluster_std=1., random_state=42)
D = pairwise_distances(X) # Distance matrix
S = np.max(D) - D # Similarity matrix
S = sparse.coo_matrix(S)
assert_raises(ValueError, spectral_clustering, S, n_clusters=2,
random_state=0, assign_labels="<unknown>")
def test_spectral_clustering_sparse():
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01)
S = rbf_kernel(X, gamma=1)
S = np.maximum(S - 1e-4, 0)
S = sparse.coo_matrix(S)
labels = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed').fit(S).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
def test_affinities():
# Note: in the following, random_state has been selected to have
# a dataset that yields a stable eigen decomposition both when built
# on OSX and Linux
X, y = make_blobs(n_samples=20, random_state=0,
centers=[[1, 1], [-1, -1]], cluster_std=0.01
)
# nearest neighbors affinity
sp = SpectralClustering(n_clusters=2, affinity='nearest_neighbors',
random_state=0)
assert_warns_message(UserWarning, 'not fully connected', sp.fit, X)
assert_equal(adjusted_rand_score(y, sp.labels_), 1)
sp = SpectralClustering(n_clusters=2, gamma=2, random_state=0)
labels = sp.fit(X).labels_
assert_equal(adjusted_rand_score(y, labels), 1)
X = check_random_state(10).rand(10, 5) * 10
kernels_available = kernel_metrics()
for kern in kernels_available:
# Additive chi^2 gives a negative similarity matrix which
# doesn't make sense for spectral clustering
if kern != 'additive_chi2':
sp = SpectralClustering(n_clusters=2, affinity=kern,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
sp = SpectralClustering(n_clusters=2, affinity=lambda x, y: 1,
random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
sp = SpectralClustering(n_clusters=2, affinity=histogram, random_state=0)
labels = sp.fit(X).labels_
assert_equal((X.shape[0],), labels.shape)
# raise error on unknown affinity
sp = SpectralClustering(n_clusters=2, affinity='<unknown>')
assert_raises(ValueError, sp.fit, X)
def test_discretize(seed=8):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed)
for n_samples in [50, 100, 150, 500]:
for n_class in range(2, 10):
# random class labels
y_true = random_state.random_integers(0, n_class, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert_greater(adjusted_rand_score(y_true, y_pred), 0.8)
| bsd-3-clause |
Open-Power-System-Data/renewable_power_plants | util/visualizer.py | 1 | 4165 | import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.io import shapereader
import geopandas
import shapely
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import cartopy
def visualize_points(latitudes, longitudes, country, categories=None, eps=0.03):
# Remove the locations not in Europe
european_latitude_mask = np.logical_and(latitudes >= 34, latitudes <= 81)
european_longitude_mask= np.logical_and(longitudes >= -31, longitudes <= 69)
european_mask = np.logical_and(european_latitude_mask, european_longitude_mask)
latitudes = latitudes[european_mask]
longitudes = longitudes[european_mask]
if categories is not None:
categories = categories[european_mask]
# Determine the coordinates of boundary locations
max_lat = latitudes.max()
min_lat = latitudes.min()
max_lon = longitudes.max()
min_lon = longitudes.min()
# Make the area to show a bit larger
max_lat = max_lat + (max_lat - min_lat) * eps
min_lat = min_lat - (max_lat - min_lat) * eps
max_lon = max_lon + (max_lon - min_lon) * eps
min_lon = min_lon - (max_lon - min_lon) * eps
# Get the shape file for visualizing countries
shp_filename = shapereader.natural_earth('10m', 'cultural', 'admin_0_countries')
df_geo = geopandas.read_file(shp_filename)
polygon = df_geo.loc[df_geo['ADMIN'] == country]['geometry'].values[0]
# Make sure that polygon is technically multi-part
# (see https://github.com/SciTools/cartopy/issues/948)
if type(polygon) == shapely.geometry.polygon.Polygon:
polygon=[polygon]
# Make the figure
figure(num=None, figsize=(8, 6), dpi=100, facecolor='white', edgecolor='k')
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_geometries(polygon, crs=ccrs.PlateCarree(), facecolor='white', edgecolor='0.5', zorder=1)
ax.set_extent([min_lon, max_lon, min_lat, max_lat], crs=ccrs.PlateCarree())
ax.coastlines(resolution='10m', color='black')
# Plot the locations
if categories is None:
ax.scatter(longitudes, latitudes, s=1.5, zorder=2, c='#123456')
else:
labels = categories.unique()
for label in labels:
category_mask = (categories == label)
latitude_subset = latitudes[category_mask]
longitude_subset = longitudes[category_mask]
ax.scatter(longitude_subset, latitude_subset, s=1.5, zorder=2, label=label)
ax.legend()
# Show the figure
plt.show()
def visualize_countries(countries):
title = "Countries currently covered by the OPSD renewable power plants package:\n" + ", ".join(countries)
figure(num=None, figsize=(8, 8), dpi=1000, facecolor='white')
ax = plt.axes(projection=ccrs.PlateCarree())
ax.add_feature(cartopy.feature.OCEAN, facecolor='#0C8FCE')
ax.coastlines(resolution="10m", color="#FFFFFF")
# Get the shape file for visualizing countries
shp_filename = shapereader.natural_earth("10m", 'cultural', 'admin_0_countries')
df_geo = geopandas.read_file(shp_filename)
wider_european_region = shapely.geometry.Polygon([(-31, 34), (-31, 81), (69, 81), (69, 34)])
df_selected = df_geo[df_geo["geometry"].intersects(wider_european_region) & (df_geo["NAME"].isin(countries))]
df_other = df_geo[df_geo["geometry"].intersects(wider_european_region) & (~df_geo["NAME"].isin(countries))]
for index, row in df_selected.iterrows():
country_polygon = row['geometry']
# Mark selected countries
facecolor = "#173F5F"
edgecolor = "#FFFFFF"
# Make sure that polygon is technically multi-part
# (see https://github.com/SciTools/cartopy/issues/948)
if type(country_polygon) == shapely.geometry.polygon.Polygon:
country_polygon = [country_polygon]
ax.add_geometries(country_polygon, crs=ccrs.PlateCarree(), facecolor=facecolor, edgecolor=edgecolor, zorder=2)
for index, row in df_other.iterrows():
country_polygon = row["geometry"]
facecolor = "#EAF7F3"
edgecolor = "#FFFFFF"
if type(country_polygon) == shapely.geometry.polygon.Polygon:
country_polygon = [country_polygon]
ax.add_geometries(country_polygon, crs=ccrs.PlateCarree(), facecolor=facecolor, edgecolor=edgecolor, zorder=1)
ax.set_extent([-31, 69, 34, 81], crs=ccrs.PlateCarree())
plt.title(title, fontsize=8)
plt.show() | mit |
jrbourbeau/cr-composition | plotting/plot_frac_correct.py | 1 | 7544 | #!/usr/bin/env python
from __future__ import division, print_function
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from dask import delayed, multiprocessing
from dask.diagnostics import ProgressBar
import comptools as comp
color_dict = comp.get_color_dict()
@delayed
def get_frac_correct(df_train, df_test, pipeline_str=None, num_groups=4,
energy_key='MC_log_energy'):
'''Calculates the fraction of correctly identified samples in each energy bin
for each composition in comp_list. In addition, the statisitcal error for the
fraction correctly identified is calculated.'''
# Input validation
if energy_key not in ['MC_log_energy', 'reco_log_energy']:
raise ValueError("Invalid energy_key ({}) entered. Must be either "
"'MC_log_energy' or 'reco_log_energy'.".format(energy_key))
if pipeline_str is None:
pipeline_str = 'BDT_comp_IC86.2012_{}-groups'.format(num_groups)
# Fit pipeline and get mask for correctly identified events
feature_list, feature_labels = comp.get_training_features()
pipeline = comp.get_pipeline(pipeline_str)
comp_target_str = 'comp_target_{}'.format(num_groups)
pipeline.fit(df_train[feature_list],
df_train[comp_target_str])
test_predictions = pipeline.predict(df_test[feature_list])
correctly_identified_mask = (test_predictions == df_test[comp_target_str])
data = {}
for composition in comp_list + ['total']:
comp_mask = df_test['comp_group_{}'.format(num_groups)] == composition
# Get number of MC comp in each energy bin
num_MC_energy, _ = np.histogram(df_test.loc[comp_mask, energy_key],
bins=energybins.log_energy_bins)
num_MC_energy_err = np.sqrt(num_MC_energy)
# Get number of correctly identified comp in each energy bin
combined_mask = comp_mask & correctly_identified_mask
num_reco_energy, _ = np.histogram(df_test.loc[combined_mask, energy_key],
bins=energybins.log_energy_bins)
num_reco_energy_err = np.sqrt(num_reco_energy)
# Calculate correctly identified fractions as a function of energy
frac_correct, frac_correct_err = comp.ratio_error(
num_reco_energy, num_reco_energy_err,
num_MC_energy, num_MC_energy_err)
data['frac_correct_{}'.format(composition)] = frac_correct
data['frac_correct_err_{}'.format(composition)] = frac_correct_err
return data
if __name__ == '__main__':
description='Makes and saves classification accuracy vs. energy plot'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-c', '--config', dest='config',
choices=comp.simfunctions.get_sim_configs(),
help='Detector configuration')
parser.add_argument('--num_groups', dest='num_groups', type=int,
default=4,
help='Number of composition groups to use.')
parser.add_argument('--n_splits', dest='n_splits', type=int,
default=10,
help='Detector configuration')
parser.add_argument('--n_jobs', dest='n_jobs', type=int,
default=1,
help='Detector configuration')
parser.add_argument('--energy', dest='energy',
default='MC',
choices=['MC', 'reco'],
help='Energy that should be used.')
args = parser.parse_args()
config = args.config
num_groups = args.num_groups
n_splits = args.n_splits
n_jobs = args.n_jobs
energy_key = 'MC_log_energy' if args.energy == 'MC' else 'reco_log_energy'
energybins = comp.get_energybins(config)
comp_list = comp.get_comp_list(num_groups=num_groups)
feature_list, feature_labels = comp.get_training_features()
# pipeline_str = 'xgboost_comp_{}_{}-groups'.format(config, num_groups)
# pipeline_str = 'BDT_comp_{}_{}-groups'.format(config, num_groups)
pipeline_str = 'SGD_comp_{}_{}-groups'.format(config, num_groups)
df_train, df_test = comp.load_sim(config=config,
log_energy_min=energybins.log_energy_min,
log_energy_max=energybins.log_energy_max,
test_size=0.5)
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=2)
folds = []
for train_index, test_index in skf.split(df_train, df_train['comp_target_{}'.format(num_groups)]):
df_train_fold = df_train.iloc[train_index]
df_test_fold = df_train.iloc[test_index]
frac_correct = get_frac_correct(df_train_fold, df_test_fold,
pipeline_str=pipeline_str,
num_groups=num_groups,
energy_key=energy_key)
folds.append(frac_correct)
df_cv = delayed(pd.DataFrame.from_records)(folds)
# Run get_frac_correct on each fold in parallel
print('Running {}-fold CV model evaluation...'.format(n_splits))
with ProgressBar():
get = multiprocessing.get if n_jobs > 1 else dask.get
df_cv = df_cv.compute(get=get, num_works=n_jobs)
# Plot correctly identified vs. energy for each composition
fig, ax = plt.subplots()
for composition in comp_list:
key = 'frac_correct_{}'.format(composition)
performance_mean = np.mean(df_cv[key].values)
performance_std = np.std(df_cv[key].values)
comp.plot_steps(energybins.log_energy_bins, performance_mean, yerr=performance_std,
ax=ax, color=color_dict[composition], label=composition)
if energy_key == 'MC_log_energy':
xlabel = '$\mathrm{\log_{10}(E_{true}/GeV)}$'
else:
xlabel = '$\mathrm{\log_{10}(E_{reco}/GeV)}$'
fontsize = 18
ax.set_xlabel(xlabel, fontsize=fontsize)
ax.set_ylabel('Classification accuracy [{:d}-fold CV]'.format(n_splits),
fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
ax.set_ylim([0.0, 1.0])
ax.set_xlim(6.4, energybins.log_energy_max)
# ax.set_xlim(energybins.log_energy_min, energybins.log_energy_max)
ax.grid()
leg = plt.legend(loc='upper center', frameon=False,
bbox_to_anchor=(0.5,
1.15),
ncol=len(comp_list)+1,
fancybox=False, fontsize=fontsize)
# set the linewidth of each legend object
for legobj in leg.legendHandles:
legobj.set_linewidth(3.0)
# acc = np.nanmean(frac_correct_folds['total'])*100
# acc_err = np.nanstd(frac_correct_folds['total'])*100
# cv_str = 'Total accuracy:\n{}\% (+/- {}\%)'.format(int(acc)+1,
# int(acc_err)+1)
# ax.text(7.4, 0.2, cv_str,
# ha="center", va="center", size=14,
# bbox=dict(boxstyle='round', fc="white", ec="gray", lw=0.8))
outfile = os.path.join(comp.paths.figures_dir, 'model_evaluation',
'frac-correct_{}_{}_{}-groups.png'.format(
energy_key.replace('_', '-'), config, num_groups))
comp.check_output_dir(outfile)
plt.savefig(outfile)
| mit |
shaharkadmiel/seispy | setup.py | 2 | 2383 | import inspect
import os
import re
from setuptools import setup
INSTALL_REQUIRES = [
'numpy',
'scipy',
'matplotlib',
'obspy',
'gdal',
]
SETUP_DIRECTORY = os.path.dirname(os.path.abspath(inspect.getfile(
inspect.currentframe())))
ENTRY_POINTS = {
'console_scripts': [
'pySW4-plot-image = pySW4.cli.plot_image:main',
'pySW4-create-plots = pySW4.cli.create_all_plots:main',
'png2mp4 = pySW4.cli.png2mp4:main']}
def find_packages():
"""
Simple function to find all modules under the current folder.
"""
modules = []
for dirpath, _, filenames in os.walk(
os.path.join(SETUP_DIRECTORY, "pySW4")):
if "__init__.py" in filenames:
modules.append(os.path.relpath(dirpath, SETUP_DIRECTORY))
return [_i.replace(os.sep, ".") for _i in modules]
# get the package version from from the main __init__ file.
version_regex_pattern = r"__version__ += +(['\"])([^\1]+)\1"
for line in open(os.path.join(SETUP_DIRECTORY, 'pySW4', '__init__.py')):
if '__version__' in line:
package_version = re.match(version_regex_pattern, line).group(2)
setup(
name="pySW4",
version=package_version,
description="Python routines for interaction with SW4",
author="Shahar Shani-Kadmiel, Omry Volk, Tobias Megies",
author_email="[email protected]",
url="https://github.com/shaharkadmiel/pySW4",
download_url="https://github.com/shaharkadmiel/pySW4.git",
install_requires=INSTALL_REQUIRES,
keywords=["pySW4", "seismology", "SW4"],
packages=find_packages(),
entry_points=ENTRY_POINTS,
classifiers=[
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description='pySW4 is an open-source project dedicated to '
'provide a Python framework for working with '
'numerical simulations of seismic-wave propagation '
'with SW4 in all phases of the task (preprocessing, '
'post-processing and runtime visualization).'
)
| gpl-3.0 |
nesterione/scikit-learn | sklearn/lda.py | 72 | 17751 | """
Linear Discriminant Analysis (LDA)
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
from __future__ import print_function
import warnings
import numpy as np
from scipy import linalg
from .externals.six import string_types
from .base import BaseEstimator, TransformerMixin
from .linear_model.base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
from .utils.fixes import bincount
from .preprocessing import StandardScaler
__all__ = ['LDA']
def _cov(X, shrinkage=None):
"""Estimate covariance matrix (using optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
s : array, shape (n_features, n_features)
Estimated covariance matrix.
"""
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, string_types):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
s = sc.std_[:, np.newaxis] * s * sc.std_[np.newaxis, :] # rescale
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be of string or int type')
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def _class_cov(X, y, priors=None, shrinkage=None):
"""Compute class covariance matrix.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like, shape (n_classes,)
Class priors.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Returns
-------
cov : array-like, shape (n_features, n_features)
Class covariance matrix.
"""
classes = np.unique(y)
covs = []
for group in classes:
Xg = X[y == group, :]
covs.append(np.atleast_2d(_cov(Xg, shrinkage)))
return np.average(covs, axis=0, weights=priors)
class LDA(BaseEstimator, LinearClassifierMixin, TransformerMixin):
"""Linear Discriminant Analysis (LDA).
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : string, optional
Solver to use, possible values:
- 'svd': Singular value decomposition (default). Does not compute the
covariance matrix, therefore this solver is recommended for
data with a large number of features.
- 'lsqr': Least squares solution, can be combined with shrinkage.
- 'eigen': Eigenvalue decomposition, can be combined with shrinkage.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array, optional, shape (n_classes,)
Class priors.
n_components : int, optional
Number of components (< n_classes - 1) for dimensionality reduction.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation in SVD solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : array, shape (n_features,)
Intercept term.
covariance_ : array-like, shape (n_features, n_features)
Covariance matrix (shared by all classes).
means_ : array-like, shape (n_classes, n_features)
Class means.
priors_ : array-like, shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like, shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
xbar_ : array-like, shape (n_features,)
Overall mean.
classes_ : array-like, shape (n_classes,)
Unique class labels.
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
Notes
-----
The default solver is 'svd'. It can perform both classification and
transform, and it does not rely on the calculation of the covariance
matrix. This can be an advantage in situations where the number of features
is large. However, the 'svd' solver cannot be used with shrinkage.
The 'lsqr' solver is an efficient algorithm that only works for
classification. It supports shrinkage.
The 'eigen' solver is based on the optimization of the between class
scatter to within class scatter ratio. It can be used for both
classification and transform, and it supports shrinkage. However, the
'eigen' solver needs to compute the covariance matrix, so it might not be
suitable for situations with a high number of features.
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None, shrinkage=None, solver='svd',
store_covariance=False, tol=0.0001)
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
def _solve_lsqr(self, X, y, shrinkage):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with optional shrinkage), because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with optional shrinkage).
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : string or float, optional
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
# evecs /= np.linalg.norm(evecs, axis=0) # doesn't work with numpy 1.6
evecs /= np.apply_along_axis(np.linalg.norm, 0, evecs)
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T))
+ np.log(self.priors_))
def _solve_svd(self, X, y, store_covariance=False, tol=1.0e-4):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
store_covariance : bool, optional
Additionally compute class covariance matrix (default False).
tol : float, optional
Threshold used for rank estimation.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
self.scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""Fit LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array, shape (n_samples,)
Target values.
"""
if store_covariance:
warnings.warn("'store_covariance' was moved to the __init__()"
"method in version 0.16 and will be removed from"
"fit() in version 0.18.", DeprecationWarning)
else:
store_covariance = self.store_covariance
if tol != 1.0e-4:
warnings.warn("'tol' was moved to __init__() method in version"
" 0.16 and will be removed from fit() in 0.18",
DeprecationWarning)
self.tol = tol
X, y = check_X_y(X, y)
self.classes_ = unique_labels(y)
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = bincount(y_t) / float(len(y))
else:
self.priors_ = self.priors
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
self._solve_svd(X, y, store_covariance=store_covariance, tol=tol)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage)
elif self.solver == 'eigen':
self._solve_eigen(X, y, shrinkage=self.shrinkage)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self, ['xbar_', 'scalings_'], all_or_any=any)
X = check_array(X)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
n_components = X.shape[1] if self.n_components is None \
else self.n_components
return X_new[:, :n_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated probabilities.
"""
prob = self.decision_function(X)
prob *= -1
np.exp(prob, prob)
prob += 1
np.reciprocal(prob, prob)
if len(self.classes_) == 2: # binary case
return np.column_stack([1 - prob, prob])
else:
# OvR normalization, like LibLinear's predict_probability
prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
return prob
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
C : array, shape (n_samples, n_classes)
Estimated log probabilities.
"""
return np.log(self.predict_proba(X))
| bsd-3-clause |
ldirer/scikit-learn | sklearn/semi_supervised/label_propagation.py | 7 | 16693 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
# Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.randint(0, 2,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = sparse.csgraph.laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
waqasbhatti/astrobase | astrobase/varbase/transits.py | 2 | 21449 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# transits.py - Luke Bouma ([email protected]) - Oct 2018
# License: MIT - see the LICENSE file for the full text.
'''
Contains tools for analyzing transits.
'''
#############
## LOGGING ##
#############
import logging
from astrobase import log_sub, log_fmt, log_date_fmt
DEBUG = False
if DEBUG:
level = logging.DEBUG
else:
level = logging.INFO
LOGGER = logging.getLogger(__name__)
logging.basicConfig(
level=level,
style=log_sub,
format=log_fmt,
datefmt=log_date_fmt,
)
LOGDEBUG = LOGGER.debug
LOGINFO = LOGGER.info
LOGWARNING = LOGGER.warning
LOGERROR = LOGGER.error
LOGEXCEPTION = LOGGER.exception
#############
## IMPORTS ##
#############
import numpy as np
from astropy import units as u
from astrobase.periodbase import kbls
from ..lcfit.transits import traptransit_fit_magseries
from ..lcfit.utils import make_fit_plot
#######################
## UTILITY FUNCTIONS ##
#######################
def transit_duration_range(period,
min_radius_hint,
max_radius_hint):
'''This figures out the minimum and max transit duration (q) given a period
and min/max stellar radius hints.
One can get stellar radii from various places:
- GAIA distances and luminosities
- the TESS input catalog
- isochrone fits
The equation used is::
q ~ 0.076 x R**(2/3) x P**(-2/3)
P = period in days
R = stellar radius in solar radii
Parameters
----------
period : float
The orbital period of the transiting planet.
min_radius_hint,max_radius_hint : float
The minimum and maximum radii of the star the planet is orbiting around.
Returns
-------
(min_transit_duration, max_transit_duration) : tuple
The returned tuple contains the minimum and maximum transit durations
allowed for the orbital geometry of this planetary system. These can be
used with the BLS period-search functions in
:py:mod:`astrobase.periodbase.kbls` or
:py:mod:`astrobase.periodbase.abls` to refine the period-search to only
physically possible transit durations.
'''
return (
0.076 * (min_radius_hint**(2./3.)) * (period**(-2./3.)),
0.076 * (max_radius_hint**(2./3.)) * (period**(-2./3.))
)
##############################
## TRANSIT MODEL ASSESSMENT ##
##############################
def get_snr_of_dip(times,
mags,
modeltimes,
modelmags,
atol_normalization=1e-8,
indsforrms=None,
magsarefluxes=False,
verbose=True,
transitdepth=None,
npoints_in_transit=None):
'''Calculate the total SNR of a transit assuming gaussian uncertainties.
`modelmags` gets interpolated onto the cadence of `mags`. The noise is
calculated as the 1-sigma std deviation of the residual (see below).
Following Carter et al. 2009::
Q = sqrt( Γ T ) * δ / σ
for Q the total SNR of the transit in the r->0 limit, where::
r = Rp/Rstar,
T = transit duration,
δ = transit depth,
σ = RMS of the lightcurve in transit.
Γ = sampling rate
Thus Γ * T is roughly the number of points obtained during transit.
(This doesn't correctly account for the SNR during ingress/egress, but this
is a second-order correction).
Note this is the same total SNR as described by e.g., Kovacs et al. 2002,
their Equation 11.
NOTE: this only works with fluxes at the moment.
Parameters
----------
times,mags : np.array
The input flux time-series to process.
modeltimes,modelmags : np.array
A transiting planet model, either from BLS, a trapezoid model, or a
Mandel-Agol model.
atol_normalization : float
The absolute tolerance to which the median of the passed model fluxes
must be equal to 1.
indsforrms : np.array
A array of bools of `len(mags)` used to select points for the RMS
measurement. If not passed, the RMS of the entire passed timeseries is
used as an approximation. Genearlly, it's best to use out of transit
points, so the RMS measurement is not model-dependent.
magsarefluxes : bool
Currently forced to be True because this function only works with
fluxes.
verbose : bool
If True, indicates progress and warns about problems.
transitdepth : float or None
If the transit depth is known, pass it in here. Otherwise, it is
calculated assuming OOT flux is 1.
npoints_in_transits : int or None
If the number of points in transit is known, pass it in here. Otherwise,
the function will guess at this value.
Returns
-------
(snr, transit_depth, noise) : tuple
The returned tuple contains the calculated SNR, transit depth, and noise
of the residual lightcurve calculated using the relation described
above.
'''
if magsarefluxes:
if not np.isclose(np.nanmedian(modelmags), 1, atol=atol_normalization):
raise AssertionError('snr calculation assumes modelmags are '
'median-normalized')
else:
raise NotImplementedError(
'need to implement a method for identifying in-transit points when'
'mags are mags, and not fluxes'
)
if not transitdepth:
# calculate transit depth from whatever model magnitudes are passed.
transitdepth = np.abs(np.max(modelmags) - np.min(modelmags))
# generally, mags (data) and modelmags are at different cadence.
# interpolate modelmags onto the cadence of mags.
if not len(mags) == len(modelmags):
from scipy.interpolate import interp1d
fn = interp1d(modeltimes, modelmags, kind='cubic', bounds_error=True,
fill_value=np.nan)
modelmags = fn(times)
if verbose:
LOGINFO('interpolated model timeseries onto the data timeseries')
subtractedmags = mags - modelmags
if isinstance(indsforrms, np.ndarray):
subtractedrms = np.std(subtractedmags[indsforrms])
if verbose:
LOGINFO('using selected points to measure RMS')
else:
subtractedrms = np.std(subtractedmags)
if verbose:
LOGINFO('using all points to measure RMS')
def _get_npoints_in_transit(modelmags):
# assumes median-normalized fluxes are input
if np.nanmedian(modelmags) == 1:
return len(modelmags[(modelmags != 1)])
else:
raise NotImplementedError
if not npoints_in_transit:
npoints_in_transit = _get_npoints_in_transit(modelmags)
snr = np.sqrt(npoints_in_transit) * transitdepth/subtractedrms
if verbose:
LOGINFO('\npoints in transit: {:d}'.format(npoints_in_transit) +
'\ndepth: {:.2e}'.format(transitdepth) +
'\nrms in residual: {:.2e}'.format(subtractedrms) +
'\n\t SNR: {:.2e}'.format(snr))
return snr, transitdepth, subtractedrms
def estimate_achievable_tmid_precision(snr, t_ingress_min=10,
t_duration_hr=2.14):
'''Using Carter et al. 2009's estimate, calculate the theoretical optimal
precision on mid-transit time measurement possible given a transit of a
particular SNR.
The relation used is::
sigma_tc = Q^{-1} * T * sqrt(θ/2)
Q = SNR of the transit.
T = transit duration, which is 2.14 hours from discovery paper.
θ = τ/T = ratio of ingress to total duration
~= (few minutes [guess]) / 2.14 hours
Parameters
----------
snr : float
The measured signal-to-noise of the transit, e,g. from
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod` or from
running the `.compute_stats()` method on an Astropy BoxLeastSquares
object.
t_ingress_min : float
The ingress duration in minutes. This is t_I to t_II in Winn (2010)
nomenclature.
t_duration_hr : float
The transit duration in hours. This is t_I to t_IV in Winn (2010)
nomenclature.
Returns
-------
float
Returns the precision achievable for transit-center time as calculated
from the relation above. This is in days.
'''
t_ingress = t_ingress_min*u.minute
t_duration = t_duration_hr*u.hour
theta = t_ingress/t_duration
sigma_tc = (1/snr * t_duration * np.sqrt(theta/2))
LOGINFO('assuming t_ingress = {:.1f}'.format(t_ingress))
LOGINFO('assuming t_duration = {:.1f}'.format(t_duration))
LOGINFO('measured SNR={:.2f}\n\t'.format(snr) +
'-->theoretical sigma_tc = {:.2e} = {:.2e} = {:.2e}'.format(
sigma_tc.to(u.minute), sigma_tc.to(u.hour), sigma_tc.to(u.day)))
return sigma_tc.to(u.day).value
def get_transit_times(
blsd,
time,
extra_maskfrac,
trapd=None,
nperiodint=1000
):
'''Given a BLS period, epoch, and transit ingress/egress points (usually
from :py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`), return
the times within transit durations + `extra_maskfrac` of each transit.
Optionally, can use the (more accurate) trapezoidal fit period and epoch, if
it's passed. Useful for inspecting individual transits, and masking them
out if desired.
Parameters
----------
blsd : dict
This is the dict returned by
:py:func:`astrobase.periodbase.kbls.bls_stats_singleperiod`.
time : np.array
The times from the time-series of transit observations used to calculate
the initial period.
extra_maskfrac : float
This is the separation from in-transit points you desire, in units of
the transit duration. `extra_maskfrac = 0` if you just want points
inside transit (see below).
trapd : dict
This is a dict returned by
:py:func:`astrobase.lcfit.transits.traptransit_fit_magseries` containing
the trapezoid transit model.
nperiodint : int
This indicates how many periods backwards/forwards to try and identify
transits from the epochs reported in `blsd` or `trapd`.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple of np.array
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
'''
if trapd:
period = trapd['fitinfo']['finalparams'][0]
t0 = trapd['fitinfo']['fitepoch']
transitduration_phase = trapd['fitinfo']['finalparams'][3]
tdur = period * transitduration_phase
else:
period = blsd['period']
t0 = blsd['epoch']
tdur = (
period *
(blsd['transegressbin']-blsd['transingressbin'])/blsd['nphasebins']
)
if not blsd['transegressbin'] > blsd['transingressbin']:
raise NotImplementedError(
'careful of the width. '
'this edge case must be dealt with separately.'
)
tmids = [t0 + ix*period for ix in range(-nperiodint,nperiodint)]
sel = (tmids > np.nanmin(time)) & (tmids < np.nanmax(time))
tmids_obsd = np.array(tmids)[sel]
t_Is = tmids_obsd - tdur/2
t_IVs = tmids_obsd + tdur/2
# focus on the times around transit
t_starts = t_Is - extra_maskfrac * tdur
t_ends = t_IVs + extra_maskfrac * tdur
return tmids_obsd, t_starts, t_ends
def given_lc_get_transit_tmids_tstarts_tends(
time,
flux,
err_flux,
blsfit_savpath=None,
trapfit_savpath=None,
magsarefluxes=True,
nworkers=1,
sigclip=None,
extra_maskfrac=0.03
):
'''Gets the transit start, middle, and end times for transits in a given
time-series of observations.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(tmids_obsd, t_starts, t_ends) : tuple
The returned items are::
tmids_obsd (np.ndarray): best guess of transit midtimes in
lightcurve. Has length number of transits in lightcurve.
t_starts (np.ndarray): t_Is - extra_maskfrac*tdur, for t_Is transit
first contact point.
t_ends (np.ndarray): t_Is + extra_maskfrac*tdur, for t_Is transit
first contact point.
'''
# first, run BLS to get an initial epoch and period.
endp = 1.05*(np.nanmax(time) - np.nanmin(time))/2
blsdict = kbls.bls_parallel_pfind(time, flux, err_flux,
magsarefluxes=magsarefluxes, startp=0.1,
endp=endp, maxtransitduration=0.3,
nworkers=nworkers, sigclip=sigclip)
blsd = kbls.bls_stats_singleperiod(time, flux, err_flux,
blsdict['bestperiod'],
magsarefluxes=True, sigclip=sigclip,
perioddeltapercent=5)
# plot the BLS model.
if blsfit_savpath:
make_fit_plot(blsd['phases'], blsd['phasedmags'], None,
blsd['blsmodel'], blsd['period'], blsd['epoch'],
blsd['epoch'], blsfit_savpath,
magsarefluxes=magsarefluxes)
ingduration_guess = blsd['transitduration'] * 0.2 # a guesstimate.
transitparams = [
blsd['period'], blsd['epoch'], blsd['transitdepth'],
blsd['transitduration'], ingduration_guess
]
# fit a trapezoidal transit model; plot the resulting phased LC.
if trapfit_savpath:
trapd = traptransit_fit_magseries(time, flux, err_flux,
transitparams,
magsarefluxes=magsarefluxes,
sigclip=sigclip,
plotfit=trapfit_savpath)
# use the trapezoidal model's epoch as the guess to identify (roughly) in
# and out of transit points
tmids, t_starts, t_ends = get_transit_times(blsd,
time,
extra_maskfrac,
trapd=trapd)
return tmids, t_starts, t_ends
def _in_out_transit_plot(time, flux, intransit, ootransit, savpath):
import matplotlib.pyplot as plt
f, ax = plt.subplots(nrows=1, ncols=1, sharex=True, figsize=(8,4))
ax.scatter(
time[ootransit],
flux[ootransit],
c='k',
s=1.5,
rasterized=True,
linewidths=0
)
ax.scatter(
time[intransit],
flux[intransit],
c='r',
s=1.5,
rasterized=True,
linewidths=0
)
ax.set_ylabel('relative flux')
ax.set_xlabel('time [days]')
f.tight_layout(h_pad=0, w_pad=0)
f.savefig(savpath, dpi=400, bbox_inches='tight')
def given_lc_get_out_of_transit_points(
time, flux, err_flux,
blsfit_savpath=None,
trapfit_savpath=None,
in_out_transit_savpath=None,
sigclip=None,
magsarefluxes=True,
nworkers=1,
extra_maskfrac=0.03
):
'''This gets the out-of-transit light curve points.
Relevant during iterative masking of transits for multiple planet system
search.
Parameters
----------
time,flux,err_flux : np.array
The input flux time-series measurements and their associated measurement
errors
blsfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
simple BLS model fit to the transit using the obtained period and epoch.
trapfit_savpath : str or None
If provided as a str, indicates the path of the fit plot to make for a
trapezoidal transit model fit to the transit using the obtained period
and epoch.
in_out_transit_savpath : str or None
If provided as a str, indicates the path of the plot file that will be
made for a plot showing the in-transit points and out-of-transit points
tagged separately.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
This is by default True for this function, since it works on fluxes only
at the moment.
nworkers : int
The number of parallel BLS period-finder workers to use.
extra_maskfrac : float
This is the separation (N) from in-transit points you desire, in units
of the transit duration. `extra_maskfrac = 0` if you just want points
inside transit, otherwise::
t_starts = t_Is - N*tdur, t_ends = t_IVs + N*tdur
Thus setting N=0.03 masks slightly more than the guessed transit
duration.
Returns
-------
(times_oot, fluxes_oot, errs_oot) : tuple of np.array
The `times`, `flux`, `err_flux` values from the input at the time values
out-of-transit are returned.
'''
tmids_obsd, t_starts, t_ends = (
given_lc_get_transit_tmids_tstarts_tends(
time, flux, err_flux, blsfit_savpath=blsfit_savpath,
trapfit_savpath=trapfit_savpath, magsarefluxes=magsarefluxes,
nworkers=nworkers, sigclip=sigclip, extra_maskfrac=extra_maskfrac
)
)
in_transit = np.zeros_like(time).astype(bool)
for t_start, t_end in zip(t_starts, t_ends):
this_transit = ( (time > t_start) & (time < t_end) )
in_transit |= this_transit
out_of_transit = ~in_transit
if in_out_transit_savpath:
_in_out_transit_plot(time, flux, in_transit, out_of_transit,
in_out_transit_savpath)
return time[out_of_transit], flux[out_of_transit], err_flux[out_of_transit]
| mit |
shusenl/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
amarack/python-rl | pyrl/agents/planners/fitted_qiteration.py | 2 | 8884 |
# Author: Will Dabney
from random import Random
import numpy
import pyrl.basis.fourier as fourier
import pyrl.basis.rbf as rbf
import pyrl.basis.tilecode as tilecode
import pyrl.basis.trivial as trivial
from planner import Planner
from sklearn import linear_model
from sklearn.svm import SVR
from sklearn import tree
class FittedQIteration(Planner):
"""FittedQIteration is an implementation of the Fitted Q-Iteration algorithm of Ernst, Geurts, Wehenkel (2005).
This class allows the use of a variety of regression algorithms, provided by scikits-learn, to be used for
representing the Q-value function. Additionally, different basis functions can be applied to the features before
being passed to the regressors, including trivial, fourier, tile coding, and radial basis functions.
"""
def __init__(self, model, **kwargs):
"""Inits the Fitted Q-Iteration planner with discount factor, instantiated model learner, and additional parameters.
Args:
model: The model learner object
gamma=1.0: The discount factor for the domain
**kwargs: Additional parameters for use in the class.
"""
Planner.__init__(self, model, **kwargs)
self.fa_name = self.params.setdefault('basis', 'trivial')
self.params.setdefault('iterations', 200)
self.params.setdefault('support_size', 200)
self.basis = None
# Set up regressor
learn_name = self.params.setdefault('regressor', 'linreg')
if learn_name == 'linreg':
self.learner = linear_model.LinearRegression()
elif learn_name == 'ridge':
self.learner = linear_model.Ridge(alpha = self.params.setdefault('l2', 0.5))
elif learn_name == 'tree':
self.learner = tree.DecisionTreeRegressor()
elif learn_name == 'svm':
self.learner = SVR()
else:
self.learner = None
def randomize_parameters(self, **args):
"""Generate parameters randomly, constrained by given named parameters.
Parameters that fundamentally change the algorithm are not randomized over. For
example, basis and softmax fundamentally change the domain and have very few values
to be considered. They are not randomized over.
Basis parameters, on the other hand, have many possible values and ARE randomized.
Args:
**args: Named parameters to fix, which will not be randomly generated
Returns:
List of resulting parameters of the class. Will always be in the same order.
Empty list if parameter free.
"""
self.randParameter('iterations', args, sample=numpy.random.randint(500))
self.randParameter('support_size', args, sample=numpy.random.randint(500))
# Randomize basis parameters
if self.fa_name == 'fourier':
self.randParameter('fourier_order', args, sample=numpy.random.randint(1,5)*2 + 1)
elif self.fa_name == 'rbf':
self.randParameter('rbf_number', args, sample=numpy.random.randint(100))
self.randParameter('rbf_beta', args)
elif self.fa_name == 'tile':
self.randParameter('tile_number', args, sample=numpy.random.randint(200))
self.randParameter('tile_weights', args, sample=2**numpy.random.randint(15))
return super(FittedQIteration,self).randomize_parameters(**args)
def planner_init(self, numDiscStates, contFeatureRanges, numActions, rewardRange):
self.has_plan = False
self.ranges, self.actions = self.model.getStateSpace()
# Set up basis
if self.fa_name == 'fourier':
self.basis = fourier.FourierBasis(len(self.ranges), self.ranges,
order=self.params.setdefault('fourier_order', 3))
elif self.fa_name == 'rbf':
self.basis = rbf.RBFBasis(len(self.ranges), self.ranges,
num_functions=self.params.setdefault('rbf_number', len(self.ranges)),
beta=self.params.setdefault('rbf_beta', 1.0))
elif self.fa_name == 'tile':
self.basis = tilecode.TileCodingBasis(len(self.ranges), self.ranges,
num_tiles=self.params.setdefault('tile_number', 100),
num_weights=self.params.setdefault('tile_weights', 2048))
else:
self.basis = trivial.TrivialBasis(len(self.ranges), self.ranges)
def getStateAction(self, state, action):
"""Returns the basified state feature array for the given state action pair.
Args:
state: The array of state features
action: The action taken from the given state
Returns:
The array containing the result of applying the basis functions to the state-action.
"""
state = self.basis.computeFeatures(state)
stateaction = numpy.zeros((self.actions, len(state)))
stateaction[action,:] = state
return stateaction.flatten()
def predict(self, state, action):
"""Predict the next state, reward, and termination probability for the current state-action.
Args:
state: The array of state features
action: The action taken from the given state
Returns:
Tuple (next_state, reward, termination), where next_state gives the predicted next state,
reward gives the predicted reward for transitioning to that state, and termination
gives the expected probabillity of terminating the episode upon transitioning.
All three are None if no model has been learned for the given action.
"""
if self.model.has_fit[action]:
return self.model.predict(state, action)
else:
return None, None, None
def getValue(self, state):
"""Get the Q-value function value for the greedy action choice at the given state (ie V(state)).
Args:
state: The array of state features
Returns:
The double value for the value function at the given state
"""
if self.has_plan:
return self.learner.predict([self.getStateAction(state, a) for a in range(self.actions)]).max()
else:
return None
def getAction(self, state):
"""Get the action under the current plan policy for the given state.
Args:
state: The array of state features
Returns:
The current greedy action under the planned policy for the given state. If no plan has been formed,
return a random action.
"""
if self.has_plan:
return self.learner.predict([self.getStateAction(state, a) for a in range(self.actions)]).argmax()
else:
return self.randGenerator.randint(0, self.actions-1)
def updatePlan(self):
"""Run Fitted Q-Iteration on samples from the model, and update the plan accordingly."""
for sample_iter in range(self.params.setdefault('resample', 1)):
self.has_plan = False
prev_coef = None
samples = self.model.sampleStateActions(self.params['support_size'])
outcomes = self.model.predictSet(samples)
Xp = []
X = []
R = []
gammas = []
for a in range(self.actions):
Xp += map(lambda k: [self.getStateAction(k, b) for b in range(self.actions)], outcomes[a][0])
X += map(lambda k: self.getStateAction(k, a), samples[a])
R += list(outcomes[a][1])
gammas += list((1.0 - outcomes[a][2]) * self.gamma)
Xp = numpy.array(Xp)
Xp = Xp.reshape(Xp.shape[0]*Xp.shape[1], Xp.shape[2])
X = numpy.array(X)
R = numpy.array(R)
gammas = numpy.array(gammas)
targets = []
Qp = None
error = 1.0
iter2 = 0
threshold = 1.0e-4
while error > threshold and iter2 < self.params['iterations']:
if self.has_plan:
Qprimes = self.learner.predict(Xp).reshape((X.shape[0], self.actions))
targets = R + gammas*Qprimes.max(1)
Qp = Qprimes
else:
targets = R
self.has_plan = True
self.learner.fit(X, targets)
try:
if prev_coef is not None:
error = numpy.linalg.norm(prev_coef - self.learner.coef_)
prev_coef = self.learner.coef_.copy()
except:
pass
iter2 += 1
#print "#?", sample_iter, iter2, error, self.model.exp_index
if error <= threshold:
return
| gpl-3.0 |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/lib/mpl_examples/pylab_examples/colours.py | 12 | 1320 | #!/usr/bin/env python
# -*- noplot -*-
"""
Some simple functions to generate colours.
"""
import numpy as np
from matplotlib.colors import colorConverter
def pastel(colour, weight=2.4):
""" Convert colour into a nice pastel shade"""
rgb = np.asarray(colorConverter.to_rgb(colour))
# scale colour
maxc = max(rgb)
if maxc < 1.0 and maxc > 0:
# scale colour
scale = 1.0 / maxc
rgb = rgb * scale
# now decrease saturation
total = rgb.sum()
slack = 0
for x in rgb:
slack += 1.0 - x
# want to increase weight from total to weight
# pick x s.t. slack * x == weight - total
# x = (weight - total) / slack
x = (weight - total) / slack
rgb = [c + (x * (1.0-c)) for c in rgb]
return rgb
def get_colours(n):
""" Return n pastel colours. """
base = np.asarray([[1,0,0], [0,1,0], [0,0,1]])
if n <= 3:
return base[0:n]
# how many new colours to we need to insert between
# red and green and between green and blue?
needed = (((n - 3) + 1) / 2, (n - 3) / 2)
colours = []
for start in (0, 1):
for x in np.linspace(0, 1, needed[start]+2):
colours.append((base[start] * (1.0 - x)) +
(base[start+1] * x))
return [pastel(c) for c in colours[0:n]]
| mit |
msbeta/apollo | modules/tools/navigation/planning/navigation_mobileye_debug.py | 4 | 5785 | #!/usr/bin/env python
###############################################################################
# Copyright 2017 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import rospy
import time
import json
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from std_msgs.msg import String
from numpy.polynomial.polynomial import polyval
from modules.drivers.proto import mobileye_pb2
from modules.canbus.proto import chassis_pb2
from modules.localization.proto import localization_pb2
from path_decider import PathDecider
from speed_decider import SpeedDecider
from trajectory_generator import TrajectoryGenerator
from provider_mobileye import MobileyeProvider
from provider_chassis import ChassisProvider
from provider_localization import LocalizationProvider
from provider_routing import RoutingProvider
path_decider = PathDecider()
speed_decider = SpeedDecider()
traj_generator = TrajectoryGenerator()
mobileye_provider = MobileyeProvider()
chassis_provider = ChassisProvider()
localization_provider = LocalizationProvider()
routing_provider = RoutingProvider()
nx = []
ny = []
local_seg_x = []
local_seg_y = []
local_smooth_seg_x = []
local_smooth_seg_y = []
left_marker_x = []
left_marker_y = []
right_marker_x = []
right_marker_y = []
history_x = []
history_y = []
def routing_callback(routing_str):
routing_provider.update(routing_str)
def localization_callback(localization_pb):
localization_provider.update(localization_pb)
def chassis_callback(chassis_pb):
chassis_provider.update(chassis_pb)
def mobileye_callback(mobileye_pb):
global nx, ny, local_seg_x, local_seg_y
global left_marker_x, left_marker_y
global right_marker_x, right_marker_y
global local_smooth_seg_x, local_smooth_seg_y
global history_x, history_y
mobileye_provider.update(mobileye_pb)
mobileye_provider.process_obstacles()
if localization_provider.localization_pb is None:
return
vx = localization_provider.localization_pb.pose.position.x
vy = localization_provider.localization_pb.pose.position.y
heading = localization_provider.localization_pb.pose.heading
speed = chassis_provider.get_speed_mps()
mobileye_provider.process_history(heading, speed)
hist_x = []
hist_y = []
for line in mobileye_provider.history_left_lines:
if line is None:
continue
x = []
y = []
for p in line.coords:
x.append(p[0])
y.append(-p[1])
hist_x.append(x)
hist_y.append(y)
history_x = hist_x
history_y = hist_y
local_seg_x, local_seg_y = routing_provider.get_local_segment(vx, vy,
heading)
local_smooth_seg_x, local_smooth_seg_y = routing_provider.get_local_segment_spline(
vx, vy, heading)
left_marker_coef = mobileye_provider.left_lm_coef
left_marker_x = []
left_marker_y = []
for x in range(int(mobileye_provider.left_lane_marker_range)):
y = polyval(x, left_marker_coef)
left_marker_x.append(x)
left_marker_y.append(-y)
right_marker_coef = mobileye_provider.right_lm_coef
right_marker_x = []
right_marker_y = []
for x in range(int(mobileye_provider.right_lane_marker_range)):
y = polyval(x, right_marker_coef)
right_marker_x.append(x)
right_marker_y.append(-y)
def add_listener():
rospy.init_node("mobileye_debug", anonymous=True)
rospy.Subscriber('/apollo/sensor/mobileye',
mobileye_pb2.Mobileye,
mobileye_callback)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
localization_callback)
rospy.Subscriber('/apollo/navigation/routing',
String, routing_callback)
rospy.Subscriber('/apollo/canbus/chassis',
chassis_pb2.Chassis,
chassis_callback)
def update(frame_number):
line2.set_xdata(local_seg_x)
line2.set_ydata(local_seg_y)
line1.set_xdata(left_marker_x)
line1.set_ydata(left_marker_y)
line4.set_xdata(right_marker_x)
line4.set_ydata(right_marker_y)
line3.set_xdata(local_smooth_seg_x)
line3.set_ydata(local_smooth_seg_y)
for l in lines:
l.set_xdata([0])
l.set_ydata([0])
for i in range(len(history_x)):
x = history_x[i]
y = history_y[i]
lines[i].set_xdata(x)
lines[i].set_ydata(y)
if __name__ == '__main__':
DEBUG = False
line1 = None
line2 = None
add_listener()
fig = plt.figure()
ax = plt.subplot2grid((1, 1), (0, 0), rowspan=1, colspan=1)
line1, = ax.plot([0], [0], 'r-')
line4, = ax.plot([0], [0], 'k-')
line2, = ax.plot([0], [0], 'b-')
line3, = ax.plot([0], [0], 'g--', lw=3)
lines = []
for i in range(50):
line, = ax.plot([0], [0], '.')
lines.append(line)
ani = animation.FuncAnimation(fig, update, interval=100)
ax.axvline(x=0.0, alpha=0.3)
ax.axhline(y=0.0, alpha=0.3)
ax.set_xlim([-2, 100])
ax.set_ylim([-10, 10])
plt.show()
| apache-2.0 |
Fireblend/scikit-learn | examples/cluster/plot_lena_segmentation.py | 271 | 2444 | """
=========================================
Segmenting the picture of Lena in regions
=========================================
This example uses :ref:`spectral_clustering` on a graph created from
voxel-to-voxel difference on an image to break this image into multiple
partly-homogeneous regions.
This procedure (spectral clustering on an image) is an efficient
approximate solution for finding normalized graph cuts.
There are two options to assign labels:
* with 'kmeans' spectral clustering will cluster samples in the embedding space
using a kmeans algorithm
* whereas 'discrete' will iteratively search for the closest partition
space to the embedding space.
"""
print(__doc__)
# Author: Gael Varoquaux <[email protected]>, Brian Cheung
# License: BSD 3 clause
import time
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
lena = sp.misc.lena()
# Downsample the image by a factor of 4
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(lena)
# Take a decreasing function of the gradient: an exponential
# The smaller beta is, the more independent the segmentation is of the
# actual image. For beta=1, the segmentation is close to a voronoi
beta = 5
eps = 1e-6
graph.data = np.exp(-beta * graph.data / lena.std()) + eps
# Apply spectral clustering (this step goes much faster if you have pyamg
# installed)
N_REGIONS = 11
###############################################################################
# Visualize the resulting regions
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels,
random_state=1)
t1 = time.time()
labels = labels.reshape(lena.shape)
plt.figure(figsize=(5, 5))
plt.imshow(lena, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
plt.show()
| bsd-3-clause |
chrisburr/scikit-learn | examples/covariance/plot_mahalanobis_distances.py | 348 | 6232 | r"""
================================================================
Robust covariance estimation and Mahalanobis distances relevance
================================================================
An example to show covariance estimation with the Mahalanobis
distances on Gaussian distributed data.
For Gaussian distributed data, the distance of an observation
:math:`x_i` to the mode of the distribution can be computed using its
Mahalanobis distance: :math:`d_{(\mu,\Sigma)}(x_i)^2 = (x_i -
\mu)'\Sigma^{-1}(x_i - \mu)` where :math:`\mu` and :math:`\Sigma` are
the location and the covariance of the underlying Gaussian
distribution.
In practice, :math:`\mu` and :math:`\Sigma` are replaced by some
estimates. The usual covariance maximum likelihood estimate is very
sensitive to the presence of outliers in the data set and therefor,
the corresponding Mahalanobis distances are. One would better have to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set and that the
associated Mahalanobis distances accurately reflect the true
organisation of the observations.
The Minimum Covariance Determinant estimator is a robust,
high-breakdown point (i.e. it can be used to estimate the covariance
matrix of highly contaminated datasets, up to
:math:`\frac{n_\text{samples}-n_\text{features}-1}{2}` outliers)
estimator of covariance. The idea is to find
:math:`\frac{n_\text{samples}+n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant,
yielding a "pure" subset of observations from which to compute
standards estimates of location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced
by P.J.Rousseuw in [1].
This example illustrates how the Mahalanobis distances are affected by
outlying data: observations drawn from a contaminating distribution
are not distinguishable from the observations coming from the real,
Gaussian distribution that one may want to work with. Using MCD-based
Mahalanobis distances, the two populations become
distinguishable. Associated applications are outliers detection,
observations ranking, clustering, ...
For visualization purpose, the cubic root of the Mahalanobis distances
are represented in the boxplot, as Wilson and Hilferty suggest [2]
[1] P. J. Rousseeuw. Least median of squares regression. J. Am
Stat Ass, 79:871, 1984.
[2] Wilson, E. B., & Hilferty, M. M. (1931). The distribution of chi-square.
Proceedings of the National Academy of Sciences of the United States
of America, 17, 684-688.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.covariance import EmpiricalCovariance, MinCovDet
n_samples = 125
n_outliers = 25
n_features = 2
# generate data
gen_cov = np.eye(n_features)
gen_cov[0, 0] = 2.
X = np.dot(np.random.randn(n_samples, n_features), gen_cov)
# add some outliers
outliers_cov = np.eye(n_features)
outliers_cov[np.arange(1, n_features), np.arange(1, n_features)] = 7.
X[-n_outliers:] = np.dot(np.random.randn(n_outliers, n_features), outliers_cov)
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
robust_cov = MinCovDet().fit(X)
# compare estimators learnt from the full data set with true parameters
emp_cov = EmpiricalCovariance().fit(X)
###############################################################################
# Display results
fig = plt.figure()
plt.subplots_adjust(hspace=-.1, wspace=.4, top=.95, bottom=.05)
# Show data set
subfig1 = plt.subplot(3, 1, 1)
inlier_plot = subfig1.scatter(X[:, 0], X[:, 1],
color='black', label='inliers')
outlier_plot = subfig1.scatter(X[:, 0][-n_outliers:], X[:, 1][-n_outliers:],
color='red', label='outliers')
subfig1.set_xlim(subfig1.get_xlim()[0], 11.)
subfig1.set_title("Mahalanobis distances of a contaminated data set:")
# Show contours of the distance functions
xx, yy = np.meshgrid(np.linspace(plt.xlim()[0], plt.xlim()[1], 100),
np.linspace(plt.ylim()[0], plt.ylim()[1], 100))
zz = np.c_[xx.ravel(), yy.ravel()]
mahal_emp_cov = emp_cov.mahalanobis(zz)
mahal_emp_cov = mahal_emp_cov.reshape(xx.shape)
emp_cov_contour = subfig1.contour(xx, yy, np.sqrt(mahal_emp_cov),
cmap=plt.cm.PuBu_r,
linestyles='dashed')
mahal_robust_cov = robust_cov.mahalanobis(zz)
mahal_robust_cov = mahal_robust_cov.reshape(xx.shape)
robust_contour = subfig1.contour(xx, yy, np.sqrt(mahal_robust_cov),
cmap=plt.cm.YlOrBr_r, linestyles='dotted')
subfig1.legend([emp_cov_contour.collections[1], robust_contour.collections[1],
inlier_plot, outlier_plot],
['MLE dist', 'robust dist', 'inliers', 'outliers'],
loc="upper right", borderaxespad=0)
plt.xticks(())
plt.yticks(())
# Plot the scores for each point
emp_mahal = emp_cov.mahalanobis(X - np.mean(X, 0)) ** (0.33)
subfig2 = plt.subplot(2, 2, 3)
subfig2.boxplot([emp_mahal[:-n_outliers], emp_mahal[-n_outliers:]], widths=.25)
subfig2.plot(1.26 * np.ones(n_samples - n_outliers),
emp_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig2.plot(2.26 * np.ones(n_outliers),
emp_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig2.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig2.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig2.set_title("1. from non-robust estimates\n(Maximum Likelihood)")
plt.yticks(())
robust_mahal = robust_cov.mahalanobis(X - robust_cov.location_) ** (0.33)
subfig3 = plt.subplot(2, 2, 4)
subfig3.boxplot([robust_mahal[:-n_outliers], robust_mahal[-n_outliers:]],
widths=.25)
subfig3.plot(1.26 * np.ones(n_samples - n_outliers),
robust_mahal[:-n_outliers], '+k', markeredgewidth=1)
subfig3.plot(2.26 * np.ones(n_outliers),
robust_mahal[-n_outliers:], '+k', markeredgewidth=1)
subfig3.axes.set_xticklabels(('inliers', 'outliers'), size=15)
subfig3.set_ylabel(r"$\sqrt[3]{\rm{(Mahal. dist.)}}$", size=16)
subfig3.set_title("2. from robust estimates\n(Minimum Covariance Determinant)")
plt.yticks(())
plt.show()
| bsd-3-clause |
idlead/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
trabucayre/gnuradio | gr-filter/examples/channelize.py | 1 | 6224 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
import numpy
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = numpy.ceil(float(len(self._taps)) / float(self._M))
print("Number of taps: ", len(self._taps))
print("Number of channels: ", self._M)
print("Taps per channel: ", tpc)
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in range(len(freqs)):
f = freqs[i] + (M / 2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in range(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print("Run time: %f" % (tend - tstart))
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = numpy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*numpy.log10(abs(X))
f_in = numpy.arange(-fs / 2.0, fs / 2.0, fs / float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0 / fs
Tmax = len(d)*Ts
t_in = numpy.arange(0, Tmax, Ts)
x_in = numpy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(numpy.floor(numpy.sqrt(tb._M)))
Nrows = int(numpy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0 / fs_o
Tmax_o = len(d)*Ts_o
for i in range(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen / 4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*numpy.log10(abs(X))
f_o = numpy.arange(-fs_o / 2.0, fs_o / 2.0, fs_o / float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = numpy.array(d)
t_o = numpy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
yanlend/scikit-learn | examples/mixture/plot_gmm_pdf.py | 284 | 1528 | """
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/cluster/mean_shift_.py | 96 | 15434 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from collections import defaultdict
from ..externals import six
from ..utils.validation import check_is_fitted
from ..utils import extmath, check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
def estimate_bandwidth(X, quantile=0.3, n_samples=None, random_state=0):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points.
quantile : float, default 0.3
should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, optional
The number of samples to use. If not given, all samples are used.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
nbrs = NearestNeighbors(n_neighbors=int(X.shape[0] * quantile))
nbrs.fit(X)
bandwidth = 0.
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()['radius']
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth,
return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (extmath.norm(my_mean - my_old_mean) < stop_thresh or
completed_iterations == max_iter):
return tuple(my_mean), len(points_within)
completed_iterations += 1
def mean_shift(X, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, max_iter=300,
max_iterations=None, n_jobs=1):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input data.
bandwidth : float, optional
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like, shape=[n_seeds, n_features] or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : boolean, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default 300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Returns
-------
cluster_centers : array, shape=[n_clusters, n_features]
Coordinates of cluster centers.
labels : array, shape=[n_samples]
Cluster labels for each point.
Notes
-----
See examples/cluster/plot_meanshift.py for an example.
"""
# FIXME To be removed in 0.18
if max_iterations is not None:
warnings.warn("The `max_iterations` parameter has been renamed to "
"`max_iter` from version 0.16. The `max_iterations` "
"parameter will be removed in 0.18", DeprecationWarning)
max_iter = max_iterations
if bandwidth is None:
bandwidth = estimate_bandwidth(X)
elif bandwidth <= 0:
raise ValueError("bandwidth needs to be greater than zero or None,\
got %f" % bandwidth)
if seeds is None:
if bin_seeding:
seeds = get_bin_seeds(X, bandwidth, min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
nbrs = NearestNeighbors(radius=bandwidth).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=n_jobs)(
delayed(_mean_shift_single_seed)
(seed, X, nbrs, max_iter) for seed in seeds)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i] is not None:
center_intensity_dict[all_res[i][0]] = all_res[i][1]
if not center_intensity_dict:
# nothing near seeds
raise ValueError("No point was within bandwidth=%f of any seed."
" Try a different seeding strategy \
or increase the bandwidth."
% bandwidth)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(center_intensity_dict.items(),
key=lambda tup: tup[1], reverse=True)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=np.bool)
nbrs = NearestNeighbors(radius=bandwidth).fit(sorted_centers)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center],
return_distance=False)[0]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=np.int)
distances, idxs = nbrs.kneighbors(X)
if cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
return cluster_centers, labels
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Finds seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : integer, optional
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like, shape=[n_samples, n_features]
Points used as initial kernel positions in clustering.mean_shift.
"""
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array([point for point, freq in six.iteritems(bin_sizes) if
freq >= min_bin_freq], dtype=np.float32)
if len(bin_seeds) == len(X):
warnings.warn("Binning data failed with provided bin_size=%f,"
" using data points as seeds." % bin_size)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(BaseEstimator, ClusterMixin):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, optional
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array, shape=[n_samples, n_features], optional
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : boolean, optional
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
default value: False
Ignored if seeds argument is not None.
min_bin_freq : int, optional
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds. If not defined, set to 1.
cluster_all : boolean, default True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers.
labels_ :
Labels of each point.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will is
to O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
def __init__(self, bandwidth=None, seeds=None, bin_seeding=False,
min_bin_freq=1, cluster_all=True, n_jobs=1):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Perform clustering.
Parameters
-----------
X : array-like, shape=[n_samples, n_features]
Samples to cluster.
"""
X = check_array(X)
self.cluster_centers_, self.labels_ = \
mean_shift(X, bandwidth=self.bandwidth, seeds=self.seeds,
min_bin_freq=self.min_bin_freq,
bin_seeding=self.bin_seeding,
cluster_all=self.cluster_all, n_jobs=self.n_jobs)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : {array-like, sparse matrix}, shape=[n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, "cluster_centers_")
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
kiyoto/statsmodels | statsmodels/tsa/statespace/sarimax.py | 6 | 82403 | """
SARIMAX Model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
import numpy as np
import pandas as pd
from .kalman_filter import KalmanFilter, FilterResults
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.tsatools import lagmat
from statsmodels.tools.decorators import cache_readonly
import statsmodels.base.wrapper as wrap
class SARIMAX(MLEModel):
r"""
Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors
model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable or iterable of iterables, optional
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters. `d` must be an integer
indicating the integration order of the process, while
`p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. Default is
an AR(1) model: (1,0,0).
seasonal_order : iterable, optional
The (P,D,Q,s) order of the seasonal component of the model for the
AR parameters, differences, MA parameters, and periodicity.
`d` must be an integer indicating the integration order of the process,
while `p` and `q` may either be an integers indicating the AR and MA
orders (so that all lags up to those orders are included) or else
iterables giving specific AR and / or MA lags to include. `s` is an
integer giving the periodicity (number of periods in season), often it
is 4 for quarterly data or 12 for monthly data. Default is no seasonal
effect.
trend : str{'n','c','t','ct'} or iterable, optional
Parameter controlling the deterministic trend polynomial :math:`A(t)`.
Can be specified as a string where 'c' indicates a constant (i.e. a
degree zero component of the trend polynomial), 't' indicates a
linear trend with time, and 'ct' is both. Can also be specified as an
iterable defining the polynomial as in `numpy.poly1d`, where
`[1,1,0,1]` would denote :math:`a + bt + ct^3`. Default is to not
include a trend component.
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
time_varying_regression : boolean, optional
Used when an explanatory variables, `exog`, are provided provided
to select whether or not coefficients on the exogenous regressors are
allowed to vary over time. Default is False.
mle_regression : boolean, optional
Whether or not to use estimate the regression coefficients for the
exogenous variables as part of maximum likelihood estimation or through
the Kalman filter (i.e. recursive least squares). If
`time_varying_regression` is True, this must be set to False. Default
is True.
simple_differencing : boolean, optional
Whether or not to use partially conditional maximum likelihood
estimation. If True, differencing is performed prior to estimation,
which discards the first :math:`s D + d` initial rows but reuslts in a
smaller state-space formulation. If False, the full SARIMAX model is
put in state-space form so that all datapoints can be used in
estimation. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
hamilton_representation : boolean, optional
Whether or not to use the Hamilton representation of an ARMA process
(if True) or the Harvey representation (if False). Default is False.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
measurement_error : boolean
Whether or not to assume the endogenous
observations `endog` were measured with error.
state_error : boolean
Whether or not the transition equation has an error component.
mle_regression : boolean
Whether or not the regression coefficients for
the exogenous variables were estimated via maximum
likelihood estimation.
state_regression : boolean
Whether or not the regression coefficients for
the exogenous variables are included as elements
of the state space and estimated via the Kalman
filter.
time_varying_regression : boolean
Whether or not coefficients on the exogenous
regressors are allowed to vary over time.
simple_differencing : boolean
Whether or not to use partially conditional maximum likelihood
estimation.
enforce_stationarity : boolean
Whether or not to transform the AR parameters
to enforce stationarity in the autoregressive
component of the model.
enforce_invertibility : boolean
Whether or not to transform the MA parameters
to enforce invertibility in the moving average
component of the model.
hamilton_representation : boolean
Whether or not to use the Hamilton representation of an ARMA process.
trend : str{'n','c','t','ct'} or iterable
Parameter controlling the deterministic
trend polynomial :math:`A(t)`. See the class
parameter documentation for more information.
polynomial_ar : array
Array containing autoregressive lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial
coefficients, ordered from lowest degree to highest.
Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag
polynomial coefficients, ordered from lowest degree
to highest. Initialized with ones, unless a
coefficient is constrained to be zero (in which
case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients,
ordered from lowest degree to highest. Initialized
with ones, unless a coefficient is constrained to be
zero (in which case it is zero).
k_ar : int
Highest autoregressive order in the model, zero-indexed.
k_ar_params : int
Number of autoregressive parameters to be estimated.
k_diff : int
Order of intergration.
k_ma : int
Highest moving average order in the model, zero-indexed.
k_ma_params : int
Number of moving average parameters to be estimated.
k_seasons : int
Number of periods in a season.
k_seasonal_ar : int
Highest seasonal autoregressive order in the model, zero-indexed.
k_seasonal_ar_params : int
Number of seasonal autoregressive parameters to be estimated.
k_seasonal_diff : int
Order of seasonal intergration.
k_seasonal_ma : int
Highest seasonal moving average order in the model, zero-indexed.
k_seasonal_ma_params : int
Number of seasonal moving average parameters to be estimated.
k_trend : int
Order of the trend polynomial plus one (i.e. the constant polynomial
would have `k_trend=1`).
k_exog : int
Number of exogenous regressors.
Notes
-----
The SARIMA model is specified :math:`(p, d, q) \times (P, D, Q)_s`.
.. math::
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D y_t = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
In terms of a univariate structural model, this can be represented as
.. math::
y_t & = u_t + \eta_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
where :math:`\eta_t` is only applicable in the case of measurement error
(although it is also used in the case of a pure regression model, i.e. if
p=q=0).
In terms of this model, regression with SARIMA errors can be represented
easily as
.. math::
y_t & = \beta_t x_t + u_t \\
\phi_p (L) \tilde \phi_P (L^s) \Delta^d \Delta_s^D u_t & = A(t) +
\theta_q (L) \tilde \theta_Q (L^s) \zeta_t
this model is the one used when exogenous regressors are provided.
Note that the reduced form lag polynomials will be written as:
.. math::
\Phi (L) \equiv \phi_p (L) \tilde \phi_P (L^s) \\
\Theta (L) \equiv \theta_q (L) \tilde \theta_Q (L^s)
If `mle_regression` is True, regression coefficients are treated as
additional parameters to be estimated via maximum likelihood. Otherwise
they are included as part of the state with a diffuse initialization.
In this case, however, with approximate diffuse initialization, results
can be sensitive to the initial variance.
This class allows two different underlying representations of ARMA models
as state space models: that of Hamilton and that of Harvey. Both are
equivalent in the sense that they are analytical representations of the
ARMA model, but the state vectors of each have different meanings. For
this reason, maximum likelihood does not result in identical parameter
estimates and even the same set of parameters will result in different
loglikelihoods.
The Harvey representation is convenient because it allows integrating
differencing into the state vector to allow using all observations for
estimation.
In this implementation of differenced models, the Hamilton representation
is not able to accomodate differencing in the state vector, so
`simple_differencing` (which performs differencing prior to estimation so
that the first d + sD observations are lost) must be used.
Many other packages use the Hamilton representation, so that tests against
Stata and R require using it along with simple differencing (as Stata
does).
Detailed information about state space models can be found in [1]_. Some
specific references are:
- Chapter 3.4 describes ARMA and ARIMA models in state space form (using
the Harvey representation), and gives references for basic seasonal
models and models with a multiplicative form (for example the airline
model). It also shows a state space model for a full ARIMA process (this
is what is done here if `simple_differencing=False`).
- Chapter 3.6 describes estimating regression effects via the Kalman filter
(this is performed if `mle_regression` is False), regression with
time-varying coefficients, and regression with ARMA errors (recall from
above that if regression effects are present, the model estimated by this
class is regression with SARIMA errors).
- Chapter 8.4 describes the application of an ARMA model to an example
dataset. A replication of this section is available in an example
IPython notebook in the documentation.
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, exog=None, order=(1, 0, 0),
seasonal_order=(0, 0, 0, 0), trend=None,
measurement_error=False, time_varying_regression=False,
mle_regression=True, simple_differencing=False,
enforce_stationarity=True, enforce_invertibility=True,
hamilton_representation=False, **kwargs):
# Model parameters
self.k_seasons = seasonal_order[3]
self.measurement_error = measurement_error
self.time_varying_regression = time_varying_regression
self.mle_regression = mle_regression
self.simple_differencing = simple_differencing
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
self.hamilton_representation = hamilton_representation
# Save given orders
self.order = order
self.seasonal_order = seasonal_order
# Enforce non-MLE coefficients if time varying coefficients is
# specified
if self.time_varying_regression and self.mle_regression:
raise ValueError('Models with time-varying regression coefficients'
' must integrate the coefficients as part of the'
' state vector, so that `mle_regression` must'
' be set to False.')
# Lag polynomials
# Assume that they are given from lowest degree to highest, that all
# degrees except for the constant are included, and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(order[0], int):
self.polynomial_ar = np.r_[1., np.ones(order[0])]
else:
self.polynomial_ar = np.r_[1., order[0]]
if isinstance(order[2], int):
self.polynomial_ma = np.r_[1., np.ones(order[2])]
else:
self.polynomial_ma = np.r_[1., order[2]]
# Assume that they are given from lowest degree to highest, that the
# degrees correspond to (1*s, 2*s, ..., P*s), and that they are
# boolean vectors (0 for not included, 1 for included).
if isinstance(seasonal_order[0], int):
self.polynomial_seasonal_ar = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[0]
]
else:
self.polynomial_seasonal_ar = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[0])
]
for i in range(len(seasonal_order[0])):
self.polynomial_seasonal_ar[(i + 1) * self.k_seasons] = (
seasonal_order[0][i]
)
if isinstance(seasonal_order[2], int):
self.polynomial_seasonal_ma = np.r_[
1., # constant
([0] * (self.k_seasons - 1) + [1]) * seasonal_order[2]
]
else:
self.polynomial_seasonal_ma = np.r_[
1., [0] * self.k_seasons * len(seasonal_order[2])
]
for i in range(len(seasonal_order[2])):
self.polynomial_seasonal_ma[(i + 1) * self.k_seasons] = (
seasonal_order[2][i]
)
# Deterministic trend polynomial
self.trend = trend
if trend is None or trend == 'n':
self.polynomial_trend = np.ones((0))
elif trend == 'c':
self.polynomial_trend = np.r_[1]
elif trend == 't':
self.polynomial_trend = np.r_[0, 1]
elif trend == 'ct':
self.polynomial_trend = np.r_[1, 1]
else:
self.polynomial_trend = (np.array(trend) > 0).astype(int)
# Model orders
# Note: k_ar, k_ma, k_seasonal_ar, k_seasonal_ma do not include the
# constant term, so they may be zero.
# Note: for a typical ARMA(p,q) model, p = k_ar_params = k_ar - 1 and
# q = k_ma_params = k_ma - 1, although this may not be true for models
# with arbitrary log polynomials.
self.k_ar = int(self.polynomial_ar.shape[0] - 1)
self.k_ar_params = int(np.sum(self.polynomial_ar) - 1)
self.k_diff = int(order[1])
self.k_ma = int(self.polynomial_ma.shape[0] - 1)
self.k_ma_params = int(np.sum(self.polynomial_ma) - 1)
self.k_seasonal_ar = int(self.polynomial_seasonal_ar.shape[0] - 1)
self.k_seasonal_ar_params = (
int(np.sum(self.polynomial_seasonal_ar) - 1)
)
self.k_seasonal_diff = int(seasonal_order[1])
self.k_seasonal_ma = int(self.polynomial_seasonal_ma.shape[0] - 1)
self.k_seasonal_ma_params = (
int(np.sum(self.polynomial_seasonal_ma) - 1)
)
# Make internal copies of the differencing orders because if we use
# simple differencing, then we will need to internally use zeros after
# the simple differencing has been performed
self._k_diff = self.k_diff
self._k_seasonal_diff = self.k_seasonal_diff
# We can only use the Hamilton representation if differencing is not
# performed as a part of the state space
if (self.hamilton_representation and not (self.simple_differencing or
self._k_diff == self._k_seasonal_diff == 0)):
raise ValueError('The Hamilton representation is only available'
' for models in which there is no differencing'
' integrated into the state vector. Set'
' `simple_differencing` to True or set'
' `hamilton_representation` to False')
# Note: k_trend is not the degree of the trend polynomial, because e.g.
# k_trend = 1 corresponds to the degree zero polynomial (with only a
# constant term).
self.k_trend = int(np.sum(self.polynomial_trend))
# Model order
# (this is used internally in a number of locations)
self._k_order = max(self.k_ar + self.k_seasonal_ar,
self.k_ma + self.k_seasonal_ma + 1)
if self._k_order == 1 and self.k_ar + self.k_seasonal_ar == 0:
self._k_order = 0
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Redefine mle_regression to be true only if it was previously set to
# true and there are exogenous regressors
self.mle_regression = (
self.mle_regression and exog is not None and self.k_exog > 0
)
# State regression is regression with coefficients estiamted within
# the state vector
self.state_regression = (
not self.mle_regression and exog is not None and self.k_exog > 0
)
# If all we have is a regression (so k_ar = k_ma = 0), then put the
# error term as measurement error
if self.state_regression and self._k_order == 0:
self.measurement_error = True
# Number of states
k_states = self._k_order
if not self.simple_differencing:
k_states += self.k_seasons * self._k_seasonal_diff + self._k_diff
if self.state_regression:
k_states += self.k_exog
# Number of diffuse states
k_diffuse_states = k_states
if self.enforce_stationarity:
k_diffuse_states -= self._k_order
# Number of positive definite elements of the state covariance matrix
k_posdef = int(self._k_order > 0)
# Only have an error component to the states if k_posdef > 0
self.state_error = k_posdef > 0
if self.state_regression and self.time_varying_regression:
k_posdef += self.k_exog
# Diffuse initialization can be more sensistive to the variance value
# in the case of state regression, so set a higher than usual default
# variance
if self.state_regression:
kwargs.setdefault('initial_variance', 1e10)
# Number of parameters
self.k_params = (
self.k_ar_params + self.k_ma_params +
self.k_seasonal_ar_params + self.k_seasonal_ar_params +
self.k_trend +
self.measurement_error + 1
)
if self.mle_regression:
self.k_params += self.k_exog
# We need to have an array or pandas at this point
self.orig_endog = endog
self.orig_exog = exog
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Update the differencing dimensions if simple differencing is applied
self.orig_k_diff = self._k_diff
self.orig_k_seasonal_diff = self._k_seasonal_diff
if (self.simple_differencing and
(self._k_diff > 0 or self._k_seasonal_diff > 0)):
self._k_diff = 0
self._k_seasonal_diff = 0
# Internally used in several locations
self._k_states_diff = (
self._k_diff + self.k_seasons * self._k_seasonal_diff
)
# Set some model variables now so they will be available for the
# initialize() method, below
self.nobs = len(endog)
self.k_states = k_states
self.k_posdef = k_posdef
# By default, do not calculate likelihood while it is controlled by
# diffuse initial conditions.
kwargs.setdefault('loglikelihood_burn', k_diffuse_states)
# Initialize the statespace
super(SARIMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Handle kwargs specified initialization
if self.ssm.initialization is not None:
self._manual_initialization = True
# Initialize the fixed components of the statespace model
self.ssm.design = self.initial_design
self.ssm.state_intercept = self.initial_state_intercept
self.ssm.transition = self.initial_transition
self.ssm.selection = self.initial_selection
# If we are estimating a simple ARMA model, then we can use a faster
# initialization method (unless initialization was already specified).
if k_diffuse_states == 0 and not self._manual_initialization:
self.initialize_stationary()
# update _init_keys attached by super
self._init_keys += ['order', 'seasonal_order', 'trend',
'measurement_error', 'time_varying_regression',
'mle_regression', 'simple_differencing',
'enforce_stationarity', 'enforce_invertibility',
'hamilton_representation'] + list(kwargs.keys())
# TODO: I think the kwargs or not attached, need to recover from ???
def _get_init_kwds(self):
kwds = super(SARIMAX, self)._get_init_kwds()
for key, value in kwds.items():
if value is None and hasattr(self.ssm, key):
kwds[key] = getattr(self.ssm, key)
return kwds
def prepare_data(self):
endog, exog = super(SARIMAX, self).prepare_data()
# Perform simple differencing if requested
if (self.simple_differencing and
(self.orig_k_diff > 0 or self.orig_k_seasonal_diff > 0)):
# Perform simple differencing
endog = diff(endog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
if exog is not None:
exog = diff(exog.copy(), self.orig_k_diff,
self.orig_k_seasonal_diff, self.k_seasons)
# Reset the ModelData datasets
self.data.endog, self.data.exog = (
self.data._convert_endog_exog(endog, exog))
# Reset the nobs
self.nobs = endog.shape[0]
# Cache the arrays for calculating the intercept from the trend
# components
time_trend = np.arange(1, self.nobs + 1)
self._trend_data = np.zeros((self.nobs, self.k_trend))
i = 0
for k in self.polynomial_trend.nonzero()[0]:
if k == 0:
self._trend_data[:, i] = np.ones(self.nobs,)
else:
self._trend_data[:, i] = time_trend**k
i += 1
return endog, exog
def initialize(self):
"""
Initialize the SARIMAX model.
Notes
-----
These initialization steps must occur following the parent class
__init__ function calls.
"""
super(SARIMAX, self).initialize()
# Internal flag for whether the default mixed approximate diffuse /
# stationary initialization has been overridden with a user-supplied
# initialization
self._manual_initialization = False
# Cache the indexes of included polynomial orders (for update below)
# (but we do not want the index of the constant term, so exclude the
# first index)
self._polynomial_ar_idx = np.nonzero(self.polynomial_ar)[0][1:]
self._polynomial_ma_idx = np.nonzero(self.polynomial_ma)[0][1:]
self._polynomial_seasonal_ar_idx = np.nonzero(
self.polynomial_seasonal_ar
)[0][1:]
self._polynomial_seasonal_ma_idx = np.nonzero(
self.polynomial_seasonal_ma
)[0][1:]
# Save the indices corresponding to the reduced form lag polynomial
# parameters in the transition and selection matrices so that they
# don't have to be recalculated for each update()
start_row = self._k_states_diff
end_row = start_row + self.k_ar + self.k_seasonal_ar
col = self._k_states_diff
if not self.hamilton_representation:
self.transition_ar_params_idx = (
np.s_['transition', start_row:end_row, col]
)
else:
self.transition_ar_params_idx = (
np.s_['transition', col, start_row:end_row]
)
start_row += 1
end_row = start_row + self.k_ma + self.k_seasonal_ma
col = 0
if not self.hamilton_representation:
self.selection_ma_params_idx = (
np.s_['selection', start_row:end_row, col]
)
else:
self.design_ma_params_idx = (
np.s_['design', col, start_row:end_row]
)
# Cache indices for exog variances in the state covariance matrix
if self.state_regression and self.time_varying_regression:
idx = np.diag_indices(self.k_posdef)
self._exog_variance_idx = ('state_cov', idx[0][-self.k_exog:],
idx[1][-self.k_exog:])
def initialize_known(self, initial_state, initial_state_cov):
self._manual_initialization = True
self.ssm.initialize_known(initial_state, initial_state_cov)
initialize_known.__doc__ = KalmanFilter.initialize_known.__doc__
def initialize_approximate_diffuse(self, variance=None):
self._manual_initialization = True
self.ssm.initialize_approximate_diffuse(variance)
initialize_approximate_diffuse.__doc__ = (
KalmanFilter.initialize_approximate_diffuse.__doc__
)
def initialize_stationary(self):
self._manual_initialization = True
self.ssm.initialize_stationary()
initialize_stationary.__doc__ = (
KalmanFilter.initialize_stationary.__doc__
)
def initialize_state(self, variance=None):
"""
Initialize state and state covariance arrays in preparation for the
Kalman filter.
Parameters
----------
variance : float, optional
The variance for approximating diffuse initial conditions. Default
can be found in the Representation class documentation.
Notes
-----
Initializes the ARMA component of the state space to the typical
stationary values and the other components as approximate diffuse.
Can be overridden be calling one of the other initialization methods
before fitting the model.
"""
# Check if a manual initialization has already been specified
if self._manual_initialization:
return
# If we're not enforcing stationarity, then we can't initialize a
# stationary component
if not self.enforce_stationarity:
self.initialize_approximate_diffuse(variance)
return
# Otherwise, create the initial state and state covariance matrix
# as from a combination of diffuse and stationary components
# Create initialized non-stationary components
if variance is None:
variance = self.ssm.initial_variance
dtype = self.ssm.transition.dtype
initial_state = np.zeros(self.k_states, dtype=dtype)
initial_state_cov = np.eye(self.k_states, dtype=dtype) * variance
# Get the offsets (from the bottom or bottom right of the vector /
# matrix) for the stationary component.
if self.state_regression:
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
start = -self._k_order
end = None
# Add in the initialized stationary components
if self._k_order > 0:
selection_stationary = self.ssm.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm.state_cov[:, :, 0]),
selection_stationary.T
)
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
@property
def initial_design(self):
"""Initial design matrix"""
# Basic design matrix
design = np.r_[
[1] * self._k_diff,
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff,
[1] * self.state_error, [0] * (self._k_order - 1)
]
# If we have exogenous regressors included as part of the state vector
# then the exogenous data is incorporated as a time-varying component
# of the design matrix
if self.state_regression:
if self._k_order > 0:
design = np.c_[
np.reshape(
np.repeat(design, self.nobs),
(design.shape[0], self.nobs)
).T,
self.exog
].T[None, :, :]
else:
design = self.exog.T[None, :, :]
return design
@property
def initial_state_intercept(self):
"""Initial state intercept vector"""
# TODO make this self.k_trend > 1 and adjust the update to take
# into account that if the trend is a constant, it is not time-varying
if self.k_trend > 0:
state_intercept = np.zeros((self.k_states, self.nobs))
else:
state_intercept = np.zeros((self.k_states,))
return state_intercept
@property
def initial_transition(self):
"""Initial transition matrix"""
transition = np.zeros((self.k_states, self.k_states))
# Exogenous regressors component
if self.state_regression:
start = -self.k_exog
# T_\beta
transition[start:, start:] = np.eye(self.k_exog)
# Autoregressive component
start = -(self.k_exog + self._k_order)
end = -self.k_exog if self.k_exog > 0 else None
else:
# Autoregressive component
start = -self._k_order
end = None
# T_c
transition[start:end, start:end] = companion_matrix(self._k_order)
if self.hamilton_representation:
transition[start:end, start:end] = np.transpose(
companion_matrix(self._k_order)
)
# Seasonal differencing component
# T^*
if self._k_seasonal_diff > 0:
seasonal_companion = companion_matrix(self.k_seasons).T
seasonal_companion[0, -1] = 1
for d in range(self._k_seasonal_diff):
start = self._k_diff + d * self.k_seasons
end = self._k_diff + (d + 1) * self.k_seasons
# T_c^*
transition[start:end, start:end] = seasonal_companion
# i
for i in range(d + 1, self._k_seasonal_diff):
transition[start, end + self.k_seasons - 1] = 1
# \iota
transition[start, self._k_states_diff] = 1
# Differencing component
if self._k_diff > 0:
idx = np.triu_indices(self._k_diff)
# T^**
transition[idx] = 1
# [0 1]
if self.k_seasons > 0:
start = self._k_diff
end = self._k_states_diff
transition[:self._k_diff, start:end] = (
([0] * (self.k_seasons - 1) + [1]) * self._k_seasonal_diff
)
# [1 0]
column = self._k_states_diff
transition[:self._k_diff, column] = 1
return transition
@property
def initial_selection(self):
"""Initial selection matrix"""
if not (self.state_regression and self.time_varying_regression):
if self.k_posdef > 0:
selection = np.r_[
[0] * (self._k_states_diff),
[1] * (self._k_order > 0), [0] * (self._k_order - 1),
[0] * ((1 - self.mle_regression) * self.k_exog)
][:, None]
else:
selection = np.zeros((self.k_states, 0))
else:
selection = np.zeros((self.k_states, self.k_posdef))
# Typical state variance
if self._k_order > 0:
selection[0, 0] = 1
# Time-varying regression coefficient variances
for i in range(self.k_exog, 0, -1):
selection[-i, -i] = 1
return selection
def filter(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params, ndmin=1)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(SARIMAX, self).filter(params, transformed, cov_type,
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = SARIMAXResultsWrapper(
SARIMAXResults(self, params, result, **result_kwargs)
)
return result
def smooth(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params, ndmin=1)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(SARIMAX, self).smooth(params, transformed, cov_type,
return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = SARIMAXResultsWrapper(
SARIMAXResults(self, params, result, **result_kwargs)
)
return result
@staticmethod
def _conditional_sum_squares(endog, k_ar, polynomial_ar, k_ma,
polynomial_ma, k_trend=0, trend_data=None):
k = 2 * k_ma
r = max(k + k_ma, k_ar)
k_params_ar = 0 if k_ar == 0 else len(polynomial_ar.nonzero()[0]) - 1
k_params_ma = 0 if k_ma == 0 else len(polynomial_ma.nonzero()[0]) - 1
residuals = None
if k_ar + k_ma + k_trend > 0:
# If we have MA terms, get residuals from an AR(k) model to use
# as data for conditional sum of squares estimates of the MA
# parameters
if k_ma > 0:
Y = endog[k:]
X = lagmat(endog, k, trim='both')
params_ar = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params_ar)
# Run an ARMA(p,q) model using the just computed residuals as data
Y = endog[r:]
X = np.empty((Y.shape[0], 0))
if k_trend > 0:
if trend_data is None:
raise ValueError('Trend data must be provided if'
' `k_trend` > 0.')
X = np.c_[X, trend_data[:(-r if r > 0 else None), :]]
if k_ar > 0:
cols = polynomial_ar.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(endog, k_ar)[r:, cols]]
if k_ma > 0:
cols = polynomial_ma.nonzero()[0][1:] - 1
X = np.c_[X, lagmat(residuals, k_ma)[r-k:, cols]]
# Get the array of [ar_params, ma_params]
params = np.linalg.pinv(X).dot(Y)
residuals = Y - np.dot(X, params)
# Default output
params_trend = []
params_ar = []
params_ma = []
params_variance = []
# Get the params
offset = 0
if k_trend > 0:
params_trend = params[offset:k_trend + offset]
offset += k_trend
if k_ar > 0:
params_ar = params[offset:k_params_ar + offset]
offset += k_params_ar
if k_ma > 0:
params_ma = params[offset:k_params_ma + offset]
offset += k_params_ma
if residuals is not None:
params_variance = (residuals[k_params_ma:]**2).mean()
return (params_trend, params_ar, params_ma,
params_variance)
@property
def start_params(self):
"""
Starting parameters for maximum likelihood estimation
"""
# Perform differencing if necessary (i.e. if simple differencing is
# false so that the state-space model will use the entire dataset)
trend_data = self._trend_data
if not self.simple_differencing and (
self._k_diff > 0 or self._k_seasonal_diff > 0):
endog = diff(self.endog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
if self.exog is not None:
exog = diff(self.exog, self._k_diff,
self._k_seasonal_diff, self.k_seasons)
else:
exog = None
trend_data = trend_data[:endog.shape[0], :]
else:
endog = self.endog.copy()
exog = self.exog.copy() if self.exog is not None else None
endog = endog.squeeze()
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
endog = endog[~np.isnan(endog)]
if exog is not None:
exog = exog[~np.isnan(endog)]
if trend_data is not None:
trend_data = trend_data[~np.isnan(endog)]
# Regression effects via OLS
params_exog = []
if self.k_exog > 0:
params_exog = np.linalg.pinv(exog).dot(endog)
endog -= np.dot(exog, params_exog)
if self.state_regression:
params_exog = []
# Non-seasonal ARMA component and trend
(params_trend, params_ar, params_ma,
params_variance) = self._conditional_sum_squares(
endog, self.k_ar, self.polynomial_ar, self.k_ma,
self.polynomial_ma, self.k_trend, trend_data
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_ar = (
self.k_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_ar])
)
if invalid_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_ma = (
self.k_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_ma])
)
if invalid_ma:
raise ValueError('non-invertible starting MA parameters found'
' with `enforce_invertibility` set to True.')
# Seasonal Parameters
_, params_seasonal_ar, params_seasonal_ma, params_seasonal_variance = (
self._conditional_sum_squares(
endog, self.k_seasonal_ar, self.polynomial_seasonal_ar,
self.k_seasonal_ma, self.polynomial_seasonal_ma
)
)
# If we have estimated non-stationary start parameters but enforce
# stationarity is on, raise an error
invalid_seasonal_ar = (
self.k_seasonal_ar > 0 and
self.enforce_stationarity and
not is_invertible(np.r_[1, -params_seasonal_ar])
)
if invalid_seasonal_ar:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# If we have estimated non-invertible start parameters but enforce
# invertibility is on, raise an error
invalid_seasonal_ma = (
self.k_seasonal_ma > 0 and
self.enforce_invertibility and
not is_invertible(np.r_[1, params_seasonal_ma])
)
if invalid_seasonal_ma:
raise ValueError('non-invertible starting seasonal moving average'
' parameters found with `enforce_invertibility`'
' set to True.')
# Variances
params_exog_variance = []
if self.state_regression and self.time_varying_regression:
# TODO how to set the initial variance parameters?
params_exog_variance = [1] * self.k_exog
if self.state_error and params_variance == []:
if not params_seasonal_variance == []:
params_variance = params_seasonal_variance
elif self.k_exog > 0:
params_variance = np.dot(endog, endog)
else:
params_variance = 1
params_measurement_variance = 1 if self.measurement_error else []
# Combine all parameters
return np.r_[
params_trend,
params_exog,
params_ar,
params_ma,
params_seasonal_ar,
params_seasonal_ma,
params_exog_variance,
params_measurement_variance,
params_variance
]
@property
def endog_names(self, latex=False):
"""Names of endogenous variables"""
diff = ''
if self.k_diff > 0:
if self.k_diff == 1:
diff = '\Delta' if latex else 'D'
else:
diff = ('\Delta^%d' if latex else 'D%d') % self.k_diff
seasonal_diff = ''
if self.k_seasonal_diff > 0:
if self.k_seasonal_diff == 1:
seasonal_diff = (('\Delta_%d' if latex else 'DS%d') %
(self.k_seasons))
else:
seasonal_diff = (('\Delta_%d^%d' if latex else 'D%dS%d') %
(self.k_seasonal_diff, self.k_seasons))
endog_diff = self.simple_differencing
if endog_diff and self.k_diff > 0 and self.k_seasonal_diff > 0:
return (('%s%s %s' if latex else '%s.%s.%s') %
(diff, seasonal_diff, self.data.ynames))
elif endog_diff and self.k_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(diff, self.data.ynames))
elif endog_diff and self.k_seasonal_diff > 0:
return (('%s %s' if latex else '%s.%s') %
(seasonal_diff, self.data.ynames))
else:
return self.data.ynames
params_complete = [
'trend', 'exog', 'ar', 'ma', 'seasonal_ar', 'seasonal_ma',
'exog_variance', 'measurement_variance', 'variance'
]
@property
def param_terms(self):
"""
List of parameters actually included in the model, in sorted order.
TODO Make this an OrderedDict with slice or indices as the values.
"""
model_orders = self.model_orders
# Get basic list from model orders
params = [
order for order in self.params_complete
if model_orders[order] > 0
]
# k_exog may be positive without associated parameters if it is in the
# state vector
if 'exog' in params and not self.mle_regression:
params.remove('exog')
return params
@property
def param_names(self):
"""
List of human readable parameter names (for parameters actually
included in the model).
"""
params_sort_order = self.param_terms
model_names = self.model_names
return [
name for param in params_sort_order for name in model_names[param]
]
@property
def model_orders(self):
"""
The orders of each of the polynomials in the model.
"""
return {
'trend': self.k_trend,
'exog': self.k_exog,
'ar': self.k_ar,
'ma': self.k_ma,
'seasonal_ar': self.k_seasonal_ar,
'seasonal_ma': self.k_seasonal_ma,
'reduced_ar': self.k_ar + self.k_seasonal_ar,
'reduced_ma': self.k_ma + self.k_seasonal_ma,
'exog_variance': self.k_exog if (
self.state_regression and self.time_varying_regression) else 0,
'measurement_variance': int(self.measurement_error),
'variance': int(self.state_error),
}
@property
def model_names(self):
"""
The plain text names of all possible model parameters.
"""
return self._get_model_names(latex=False)
@property
def model_latex_names(self):
"""
The latex names of all possible model parameters.
"""
return self._get_model_names(latex=True)
def _get_model_names(self, latex=False):
names = {
'trend': None,
'exog': None,
'ar': None,
'ma': None,
'seasonal_ar': None,
'seasonal_ma': None,
'reduced_ar': None,
'reduced_ma': None,
'exog_variance': None,
'measurement_variance': None,
'variance': None,
}
# Trend
if self.k_trend > 0:
trend_template = 't_%d' if latex else 'trend.%d'
names['trend'] = []
for i in self.polynomial_trend.nonzero()[0]:
if i == 0:
names['trend'].append('intercept')
elif i == 1:
names['trend'].append('drift')
else:
names['trend'].append(trend_template % i)
# Exogenous coefficients
if self.k_exog > 0:
names['exog'] = self.exog_names
# Autoregressive
if self.k_ar > 0:
ar_template = '$\\phi_%d$' if latex else 'ar.L%d'
names['ar'] = []
for i in self.polynomial_ar.nonzero()[0][1:]:
names['ar'].append(ar_template % i)
# Moving Average
if self.k_ma > 0:
ma_template = '$\\theta_%d$' if latex else 'ma.L%d'
names['ma'] = []
for i in self.polynomial_ma.nonzero()[0][1:]:
names['ma'].append(ma_template % i)
# Seasonal Autoregressive
if self.k_seasonal_ar > 0:
seasonal_ar_template = (
'$\\tilde \\phi_%d$' if latex else 'ar.S.L%d'
)
names['seasonal_ar'] = []
for i in self.polynomial_seasonal_ar.nonzero()[0][1:]:
names['seasonal_ar'].append(seasonal_ar_template % i)
# Seasonal Moving Average
if self.k_seasonal_ma > 0:
seasonal_ma_template = (
'$\\tilde \\theta_%d$' if latex else 'ma.S.L%d'
)
names['seasonal_ma'] = []
for i in self.polynomial_seasonal_ma.nonzero()[0][1:]:
names['seasonal_ma'].append(seasonal_ma_template % i)
# Reduced Form Autoregressive
if self.k_ar > 0 or self.k_seasonal_ar > 0:
reduced_polynomial_ar = reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
ar_template = '$\\Phi_%d$' if latex else 'ar.R.L%d'
names['reduced_ar'] = []
for i in reduced_polynomial_ar.nonzero()[0][1:]:
names['reduced_ar'].append(ar_template % i)
# Reduced Form Moving Average
if self.k_ma > 0 or self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
ma_template = '$\\Theta_%d$' if latex else 'ma.R.L%d'
names['reduced_ma'] = []
for i in reduced_polynomial_ma.nonzero()[0][1:]:
names['reduced_ma'].append(ma_template % i)
# Exogenous variances
if self.state_regression and self.time_varying_regression:
exog_var_template = '$\\sigma_\\text{%s}^2$' if latex else 'var.%s'
names['exog_variance'] = [
exog_var_template % exog_name for exog_name in self.exog_names
]
# Measurement error variance
if self.measurement_error:
meas_var_tpl = (
'$\\sigma_\\eta^2$' if latex else 'var.measurement_error'
)
names['measurement_variance'] = [meas_var_tpl]
# State variance
if self.state_error:
var_tpl = '$\\sigma_\\zeta^2$' if latex else 'sigma2'
names['variance'] = [var_tpl]
return names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation.
Used primarily to enforce stationarity of the autoregressive lag
polynomial, invertibility of the moving average lag polynomial, and
positive variance parameters.
Parameters
----------
unconstrained : array_like
Unconstrained parameters used by the optimizer.
Returns
-------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
unconstrained = np.array(unconstrained, ndmin=1)
constrained = np.zeros(unconstrained.shape, unconstrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
constrained[start:end] = unconstrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
constrained[start:end] = (
constrain_stationary_univariate(unconstrained[start:end])
)
else:
constrained[start:end] = unconstrained[start:end]
start += self.k_seasonal_ma_params
# Transform the standard deviation parameters to be positive
if self.state_regression and self.time_varying_regression:
end += self.k_exog
constrained[start:end] = unconstrained[start:end]**2
start += self.k_exog
if self.measurement_error:
constrained[start] = unconstrained[start]**2
start += 1
end += 1
if self.state_error:
constrained[start] = unconstrained[start]**2
# start += 1
# end += 1
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer
Used primarily to reverse enforcement of stationarity of the
autoregressive lag polynomial and invertibility of the moving average
lag polynomial.
Parameters
----------
constrained : array_like
Constrained parameters used in likelihood evaluation.
Returns
-------
constrained : array_like
Unconstrained parameters used by the optimizer.
Notes
-----
If the lag polynomial has non-consecutive powers (so that the
coefficient is zero on some element of the polynomial), then the
constraint function is not onto the entire space of invertible
polynomials, although it only excludes a very small portion very close
to the invertibility boundary.
"""
constrained = np.array(constrained, ndmin=1)
unconstrained = np.zeros(constrained.shape, constrained.dtype)
start = end = 0
# Retain the trend parameters
if self.k_trend > 0:
end += self.k_trend
unconstrained[start:end] = constrained[start:end]
start += self.k_trend
# Retain any MLE regression coefficients
if self.mle_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]
start += self.k_exog
# Transform the AR parameters (phi) to be stationary
if self.k_ar_params > 0:
end += self.k_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ar_params
# Transform the MA parameters (theta) to be invertible
if self.k_ma_params > 0:
end += self.k_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_ma_params
# Transform the seasonal AR parameters (\tilde phi) to be stationary
if self.k_seasonal_ar > 0:
end += self.k_seasonal_ar_params
if self.enforce_stationarity:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ar_params
# Transform the seasonal MA parameters (\tilde theta) to be invertible
if self.k_seasonal_ma_params > 0:
end += self.k_seasonal_ma_params
if self.enforce_invertibility:
unconstrained[start:end] = (
unconstrain_stationary_univariate(constrained[start:end])
)
else:
unconstrained[start:end] = constrained[start:end]
start += self.k_seasonal_ma_params
# Untransform the standard deviation
if self.state_regression and self.time_varying_regression:
end += self.k_exog
unconstrained[start:end] = constrained[start:end]**0.5
start += self.k_exog
if self.measurement_error:
unconstrained[start] = constrained[start]**0.5
start += 1
end += 1
if self.state_error:
unconstrained[start] = constrained[start]**0.5
# start += 1
# end += 1
return unconstrained
def update(self, params, transformed=True):
"""
Update the parameters of the model
Updates the representation matrices to fill in the new parameter
values.
Parameters
----------
params : array_like
Array of new parameters.
transformed : boolean, optional
Whether or not `params` is already transformed. If set to False,
`transform_params` is called. Default is True..
Returns
-------
params : array_like
Array of parameters.
"""
params = super(SARIMAX, self).update(params, transformed)
params_trend = None
params_exog = None
params_ar = None
params_ma = None
params_seasonal_ar = None
params_seasonal_ma = None
params_exog_variance = None
params_measurement_variance = None
params_variance = None
# Extract the parameters
start = end = 0
end += self.k_trend
params_trend = params[start:end]
start += self.k_trend
if self.mle_regression:
end += self.k_exog
params_exog = params[start:end]
start += self.k_exog
end += self.k_ar_params
params_ar = params[start:end]
start += self.k_ar_params
end += self.k_ma_params
params_ma = params[start:end]
start += self.k_ma_params
end += self.k_seasonal_ar_params
params_seasonal_ar = params[start:end]
start += self.k_seasonal_ar_params
end += self.k_seasonal_ma_params
params_seasonal_ma = params[start:end]
start += self.k_seasonal_ma_params
if self.state_regression and self.time_varying_regression:
end += self.k_exog
params_exog_variance = params[start:end]
start += self.k_exog
if self.measurement_error:
params_measurement_variance = params[start]
start += 1
end += 1
if self.state_error:
params_variance = params[start]
# start += 1
# end += 1
# Update lag polynomials
if self.k_ar > 0:
if self.polynomial_ar.dtype == params.dtype:
self.polynomial_ar[self._polynomial_ar_idx] = -params_ar
else:
polynomial_ar = self.polynomial_ar.real.astype(params.dtype)
polynomial_ar[self._polynomial_ar_idx] = -params_ar
self.polynomial_ar = polynomial_ar
if self.k_ma > 0:
if self.polynomial_ma.dtype == params.dtype:
self.polynomial_ma[self._polynomial_ma_idx] = params_ma
else:
polynomial_ma = self.polynomial_ma.real.astype(params.dtype)
polynomial_ma[self._polynomial_ma_idx] = params_ma
self.polynomial_ma = polynomial_ma
if self.k_seasonal_ar > 0:
idx = self._polynomial_seasonal_ar_idx
if self.polynomial_seasonal_ar.dtype == params.dtype:
self.polynomial_seasonal_ar[idx] = -params_seasonal_ar
else:
polynomial_seasonal_ar = (
self.polynomial_seasonal_ar.real.astype(params.dtype)
)
polynomial_seasonal_ar[idx] = -params_seasonal_ar
self.polynomial_seasonal_ar = polynomial_seasonal_ar
if self.k_seasonal_ma > 0:
idx = self._polynomial_seasonal_ma_idx
if self.polynomial_seasonal_ma.dtype == params.dtype:
self.polynomial_seasonal_ma[idx] = params_seasonal_ma
else:
polynomial_seasonal_ma = (
self.polynomial_seasonal_ma.real.astype(params.dtype)
)
polynomial_seasonal_ma[idx] = params_seasonal_ma
self.polynomial_seasonal_ma = polynomial_seasonal_ma
# Get the reduced form lag polynomial terms by multiplying the regular
# and seasonal lag polynomials
# Note: that although the numpy np.polymul examples assume that they
# are ordered from highest degree to lowest, whereas our are from
# lowest to highest, it does not matter.
if self.k_seasonal_ar > 0:
reduced_polynomial_ar = -np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
else:
reduced_polynomial_ar = -self.polynomial_ar
if self.k_seasonal_ma > 0:
reduced_polynomial_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
else:
reduced_polynomial_ma = self.polynomial_ma
# Observation intercept
# Exogenous data with MLE estimation of parameters enters through a
# time-varying observation intercept (is equivalent to simply
# subtracting it out of the endogenous variable first)
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(self.exog, params_exog)[None, :]
# State intercept (Harvey) or additional observation intercept
# (Hamilton)
# SARIMA trend enters through the a time-varying state intercept,
# associated with the first row of the stationary component of the
# state vector (i.e. the first element of the state vector following
# any differencing elements)
if self.k_trend > 0:
data = np.dot(self._trend_data, params_trend).astype(params.dtype)
if not self.hamilton_representation:
self.ssm['state_intercept', self._k_states_diff, :] = data
else:
# The way the trend enters in the Hamilton representation means
# that the parameter is not an ``intercept'' but instead the
# mean of the process. The trend values in `data` are meant for
# an intercept, and so must be transformed to represent the
# mean instead
if self.hamilton_representation:
data /= np.sum(-reduced_polynomial_ar)
# If we already set the observation intercept for MLE
# regression, just add to it
if self.mle_regression:
self.ssm.obs_intercept += data[None, :]
# Otherwise set it directly
else:
self.ssm.obs_intercept = data[None, :]
# Observation covariance matrix
if self.measurement_error:
self.ssm['obs_cov', 0, 0] = params_measurement_variance
# Transition matrix
if self.k_ar > 0 or self.k_seasonal_ar > 0:
self.ssm[self.transition_ar_params_idx] = reduced_polynomial_ar[1:]
elif not self.ssm.transition.dtype == params.dtype:
# This is required if the transition matrix is not really in use
# (e.g. for an MA(q) process) so that it's dtype never changes as
# the parameters' dtype changes. This changes the dtype manually.
self.ssm.transition = self.ssm.transition.real.astype(params.dtype)
# Selection matrix (Harvey) or Design matrix (Hamilton)
if self.k_ma > 0 or self.k_seasonal_ma > 0:
if not self.hamilton_representation:
self.ssm[self.selection_ma_params_idx] = (
reduced_polynomial_ma[1:]
)
else:
self.ssm[self.design_ma_params_idx] = reduced_polynomial_ma[1:]
# State covariance matrix
if self.k_posdef > 0:
self.ssm['state_cov', 0, 0] = params_variance
if self.state_regression and self.time_varying_regression:
self.ssm[self._exog_variance_idx] = params_exog_variance
# Initialize
if not self._manual_initialization:
self.initialize_state()
return params
class SARIMAXResults(MLEResults):
"""
Class to hold results from fitting an SARIMAX model.
Parameters
----------
model : SARIMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the SARIMAX model instance.
polynomial_ar : array
Array containing autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_ma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ar : array
Array containing seasonal autoregressive lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_seasonal_ma : array
Array containing seasonal moving average lag polynomial coefficients,
ordered from lowest degree to highest. Initialized with ones, unless
a coefficient is constrained to be zero (in which case it is zero).
polynomial_trend : array
Array containing trend polynomial coefficients, ordered from lowest
degree to highest. Initialized with ones, unless a coefficient is
constrained to be zero (in which case it is zero).
model_orders : list of int
The orders of each of the polynomials in the model.
param_terms : list of str
List of parameters actually included in the model, in sorted order.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg', **kwargs):
super(SARIMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
# Save _init_kwds
self._init_kwds = self.model._get_init_kwds()
# Save model specification
self.specification = Bunch(**{
# Set additional model parameters
'k_seasons': self.model.k_seasons,
'measurement_error': self.model.measurement_error,
'time_varying_regression': self.model.time_varying_regression,
'mle_regression': self.model.mle_regression,
'simple_differencing': self.model.simple_differencing,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'hamilton_representation': self.model.hamilton_representation,
'order': self.model.order,
'seasonal_order': self.model.seasonal_order,
# Model order
'k_diff': self.model.k_diff,
'k_seasonal_diff': self.model.k_seasonal_diff,
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
'k_seasonal_ar': self.model.k_seasonal_ar,
'k_seasonal_ma': self.model.k_seasonal_ma,
# Param Numbers
'k_ar_params': self.model.k_ar_params,
'k_ma_params': self.model.k_ma_params,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
'mle_regression': self.model.mle_regression,
'state_regression': self.model.state_regression,
})
# Polynomials
self.polynomial_trend = self.model.polynomial_trend
self.polynomial_ar = self.model.polynomial_ar
self.polynomial_ma = self.model.polynomial_ma
self.polynomial_seasonal_ar = self.model.polynomial_seasonal_ar
self.polynomial_seasonal_ma = self.model.polynomial_seasonal_ma
self.polynomial_reduced_ar = np.polymul(
self.polynomial_ar, self.polynomial_seasonal_ar
)
self.polynomial_reduced_ma = np.polymul(
self.polynomial_ma, self.polynomial_seasonal_ma
)
# Distinguish parameters
self.model_orders = self.model.model_orders
self.param_terms = self.model.param_terms
start = end = 0
for name in self.param_terms:
end += self.model_orders[name]
setattr(self, '_params_%s' % name, self.params[start:end])
start += self.model_orders[name]
@cache_readonly
def arroots(self):
"""
(array) Roots of the reduced form autoregressive lag polynomial
"""
return np.roots(self.polynomial_reduced_ar)**-1
@cache_readonly
def maroots(self):
"""
(array) Roots of the reduced form moving average lag polynomial
"""
return np.roots(self.polynomial_reduced_ma)**-1
@cache_readonly
def arfreq(self):
"""
(array) Frequency of the roots of the reduced form autoregressive
lag polynomial
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def mafreq(self):
"""
(array) Frequency of the roots of the reduced form moving average
lag polynomial
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2 * np.pi)
@cache_readonly
def arparams(self):
"""
(array) Autoregressive parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ar
@cache_readonly
def maparams(self):
"""
(array) Moving average parameters actually estimated in the model.
Does not include parameters whose values are constrained to be zero.
"""
return self._params_ma
def predict(self, start=None, end=None, exog=None, dynamic=False,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
full_results : boolean, optional
If True, returns a FilterResults instance; if False returns a
tuple with forecasts, the forecast errors, and the forecast error
covariance matrices. Default is False.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux SARIMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
model_kwargs = self._init_kwds.copy()
model_kwargs['exog'] = exog
model = SARIMAX(endog, **model_kwargs)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.')
return super(SARIMAXResults, self).predict(
start=start, end=end, exog=exog, dynamic=dynamic, **kwargs
)
def forecast(self, steps=1, exog=None, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, optional
The number of out of sample forecasts from the end of the
sample. Default is 1.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables for
each step forecasted.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
return super(SARIMAXResults, self).forecast(steps, exog=exog, **kwargs)
def summary(self, alpha=.05, start=None):
# Create the model name
# See if we have an ARIMA component
order = ''
if self.model.k_ar + self.model.k_diff + self.model.k_ma > 0:
if self.model.k_ar == self.model.k_ar_params:
order_ar = self.model.k_ar
else:
order_ar = tuple(self.polynomial_ar.nonzero()[0][1:])
if self.model.k_ma == self.model.k_ma_params:
order_ma = self.model.k_ma
else:
order_ma = tuple(self.polynomial_ma.nonzero()[0][1:])
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_diff = 0 if self.model.simple_differencing else self.model.k_diff
order = '(%s, %d, %s)' % (order_ar, k_diff, order_ma)
# See if we have an SARIMA component
seasonal_order = ''
has_seasonal = (
self.model.k_seasonal_ar +
self.model.k_seasonal_diff +
self.model.k_seasonal_ma
) > 0
if has_seasonal:
if self.model.k_ar == self.model.k_ar_params:
order_seasonal_ar = (
int(self.model.k_seasonal_ar / self.model.k_seasons)
)
else:
order_seasonal_ar = (
tuple(self.polynomial_seasonal_ar.nonzero()[0][1:])
)
if self.model.k_ma == self.model.k_ma_params:
order_seasonal_ma = (
int(self.model.k_seasonal_ma / self.model.k_seasons)
)
else:
order_seasonal_ma = (
tuple(self.polynomial_seasonal_ma.nonzero()[0][1:])
)
# If there is simple differencing, then that is reflected in the
# dependent variable name
k_seasonal_diff = self.model.k_seasonal_diff
if self.model.simple_differencing:
k_seasonal_diff = 0
seasonal_order = ('(%s, %d, %s, %d)' %
(str(order_seasonal_ar), k_seasonal_diff,
str(order_seasonal_ma), self.model.k_seasons))
if not order == '':
order += 'x'
model_name = (
'%s%s%s' % (self.model.__class__.__name__, order, seasonal_order)
)
return super(SARIMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class SARIMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(SARIMAXResultsWrapper, SARIMAXResults)
| bsd-3-clause |
sauloal/cnidaria | scripts/venv/lib/python2.7/site-packages/matplotlib/afm.py | 10 | 16200 | """
This is a python interface to Adobe Font Metrics Files. Although a
number of other python implementations exist, and may be more complete
than this, it was decided not to go with them because they were
either:
1) copyrighted or used a non-BSD compatible license
2) had too many dependencies and a free standing lib was needed
3) Did more than needed and it was easier to write afresh rather than
figure out how to get just what was needed.
It is pretty easy to use, and requires only built-in python libs:
>>> from matplotlib import rcParams
>>> import os.path
>>> afm_fname = os.path.join(rcParams['datapath'],
... 'fonts', 'afm', 'ptmr8a.afm')
>>>
>>> from matplotlib.afm import AFM
>>> afm = AFM(open(afm_fname))
>>> afm.string_width_height('What the heck?')
(6220.0, 694)
>>> afm.get_fontname()
'Times-Roman'
>>> afm.get_kern_dist('A', 'f')
0
>>> afm.get_kern_dist('A', 'y')
-92.0
>>> afm.get_bbox_char('!')
[130, -9, 238, 676]
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map
import sys
import os
import re
from ._mathtext_data import uni2type1
#Convert string the a python type
# some afm files have floats where we are expecting ints -- there is
# probably a better way to handle this (support floats, round rather
# than truncate). But I don't know what the best approach is now and
# this change to _to_int should at least prevent mpl from crashing on
# these JDH (2009-11-06)
def _to_int(x):
return int(float(x))
_to_float = float
if six.PY3:
def _to_str(x):
return x.decode('utf8')
else:
_to_str = str
def _to_list_of_ints(s):
s = s.replace(b',', b' ')
return [_to_int(val) for val in s.split()]
def _to_list_of_floats(s):
return [_to_float(val) for val in s.split()]
def _to_bool(s):
if s.lower().strip() in (b'false', b'0', b'no'):
return False
else:
return True
def _sanity_check(fh):
"""
Check if the file at least looks like AFM.
If not, raise :exc:`RuntimeError`.
"""
# Remember the file position in case the caller wants to
# do something else with the file.
pos = fh.tell()
try:
line = fh.readline()
finally:
fh.seek(pos, 0)
# AFM spec, Section 4: The StartFontMetrics keyword [followed by a
# version number] must be the first line in the file, and the
# EndFontMetrics keyword must be the last non-empty line in the
# file. We just check the first line.
if not line.startswith(b'StartFontMetrics'):
raise RuntimeError('Not an AFM file')
def _parse_header(fh):
"""
Reads the font metrics header (up to the char metrics) and returns
a dictionary mapping *key* to *val*. *val* will be converted to the
appropriate python type as necessary; e.g.:
* 'False'->False
* '0'->0
* '-168 -218 1000 898'-> [-168, -218, 1000, 898]
Dictionary keys are
StartFontMetrics, FontName, FullName, FamilyName, Weight,
ItalicAngle, IsFixedPitch, FontBBox, UnderlinePosition,
UnderlineThickness, Version, Notice, EncodingScheme, CapHeight,
XHeight, Ascender, Descender, StartCharMetrics
"""
headerConverters = {
b'StartFontMetrics': _to_float,
b'FontName': _to_str,
b'FullName': _to_str,
b'FamilyName': _to_str,
b'Weight': _to_str,
b'ItalicAngle': _to_float,
b'IsFixedPitch': _to_bool,
b'FontBBox': _to_list_of_ints,
b'UnderlinePosition': _to_int,
b'UnderlineThickness': _to_int,
b'Version': _to_str,
b'Notice': _to_str,
b'EncodingScheme': _to_str,
b'CapHeight': _to_float, # Is the second version a mistake, or
b'Capheight': _to_float, # do some AFM files contain 'Capheight'? -JKS
b'XHeight': _to_float,
b'Ascender': _to_float,
b'Descender': _to_float,
b'StdHW': _to_float,
b'StdVW': _to_float,
b'StartCharMetrics': _to_int,
b'CharacterSet': _to_str,
b'Characters': _to_int,
}
d = {}
while 1:
line = fh.readline()
if not line:
break
line = line.rstrip()
if line.startswith(b'Comment'):
continue
lst = line.split(b' ', 1)
#print '%-s\t%-d line :: %-s' % ( fh.name, len(lst), line )
key = lst[0]
if len(lst) == 2:
val = lst[1]
else:
val = b''
#key, val = line.split(' ', 1)
try:
d[key] = headerConverters[key](val)
except ValueError:
print('Value error parsing header in AFM:',
key, val, file=sys.stderr)
continue
except KeyError:
print('Found an unknown keyword in AFM header (was %s)' % key,
file=sys.stderr)
continue
if key == b'StartCharMetrics':
return d
raise RuntimeError('Bad parse')
def _parse_char_metrics(fh):
"""
Return a character metric dictionary. Keys are the ASCII num of
the character, values are a (*wx*, *name*, *bbox*) tuple, where
*wx* is the character width, *name* is the postscript language
name, and *bbox* is a (*llx*, *lly*, *urx*, *ury*) tuple.
This function is incomplete per the standard, but thus far parses
all the sample afm files tried.
"""
ascii_d = {}
name_d = {}
while 1:
line = fh.readline()
if not line:
break
line = line.rstrip()
if line.startswith(b'EndCharMetrics'):
return ascii_d, name_d
vals = line.split(b';')[:4]
if len(vals) != 4:
raise RuntimeError('Bad char metrics line: %s' % line)
num = _to_int(vals[0].split()[1])
wx = _to_float(vals[1].split()[1])
name = vals[2].split()[1]
name = name.decode('ascii')
bbox = _to_list_of_floats(vals[3][2:])
bbox = list(map(int, bbox))
# Workaround: If the character name is 'Euro', give it the
# corresponding character code, according to WinAnsiEncoding (see PDF
# Reference).
if name == 'Euro':
num = 128
if num != -1:
ascii_d[num] = (wx, name, bbox)
name_d[name] = (wx, bbox)
raise RuntimeError('Bad parse')
def _parse_kern_pairs(fh):
"""
Return a kern pairs dictionary; keys are (*char1*, *char2*) tuples and
values are the kern pair value. For example, a kern pairs line like
``KPX A y -50``
will be represented as::
d[ ('A', 'y') ] = -50
"""
line = fh.readline()
if not line.startswith(b'StartKernPairs'):
raise RuntimeError('Bad start of kern pairs data: %s' % line)
d = {}
while 1:
line = fh.readline()
if not line:
break
line = line.rstrip()
if len(line) == 0:
continue
if line.startswith(b'EndKernPairs'):
fh.readline() # EndKernData
return d
vals = line.split()
if len(vals) != 4 or vals[0] != b'KPX':
raise RuntimeError('Bad kern pairs line: %s' % line)
c1, c2, val = _to_str(vals[1]), _to_str(vals[2]), _to_float(vals[3])
d[(c1, c2)] = val
raise RuntimeError('Bad kern pairs parse')
def _parse_composites(fh):
"""
Return a composites dictionary. Keys are the names of the
composites. Values are a num parts list of composite information,
with each element being a (*name*, *dx*, *dy*) tuple. Thus a
composites line reading:
CC Aacute 2 ; PCC A 0 0 ; PCC acute 160 170 ;
will be represented as::
d['Aacute'] = [ ('A', 0, 0), ('acute', 160, 170) ]
"""
d = {}
while 1:
line = fh.readline()
if not line:
break
line = line.rstrip()
if len(line) == 0:
continue
if line.startswith(b'EndComposites'):
return d
vals = line.split(b';')
cc = vals[0].split()
name, numParts = cc[1], _to_int(cc[2])
pccParts = []
for s in vals[1:-1]:
pcc = s.split()
name, dx, dy = pcc[1], _to_float(pcc[2]), _to_float(pcc[3])
pccParts.append((name, dx, dy))
d[name] = pccParts
raise RuntimeError('Bad composites parse')
def _parse_optional(fh):
"""
Parse the optional fields for kern pair data and composites
return value is a (*kernDict*, *compositeDict*) which are the
return values from :func:`_parse_kern_pairs`, and
:func:`_parse_composites` if the data exists, or empty dicts
otherwise
"""
optional = {
b'StartKernData': _parse_kern_pairs,
b'StartComposites': _parse_composites,
}
d = {b'StartKernData': {}, b'StartComposites': {}}
while 1:
line = fh.readline()
if not line:
break
line = line.rstrip()
if len(line) == 0:
continue
key = line.split()[0]
if key in optional:
d[key] = optional[key](fh)
l = (d[b'StartKernData'], d[b'StartComposites'])
return l
def parse_afm(fh):
"""
Parse the Adobe Font Metics file in file handle *fh*. Return value
is a (*dhead*, *dcmetrics*, *dkernpairs*, *dcomposite*) tuple where
*dhead* is a :func:`_parse_header` dict, *dcmetrics* is a
:func:`_parse_composites` dict, *dkernpairs* is a
:func:`_parse_kern_pairs` dict (possibly {}), and *dcomposite* is a
:func:`_parse_composites` dict (possibly {})
"""
_sanity_check(fh)
dhead = _parse_header(fh)
dcmetrics_ascii, dcmetrics_name = _parse_char_metrics(fh)
doptional = _parse_optional(fh)
return dhead, dcmetrics_ascii, dcmetrics_name, doptional[0], doptional[1]
class AFM(object):
def __init__(self, fh):
"""
Parse the AFM file in file object *fh*
"""
(dhead, dcmetrics_ascii, dcmetrics_name, dkernpairs, dcomposite) = \
parse_afm(fh)
self._header = dhead
self._kern = dkernpairs
self._metrics = dcmetrics_ascii
self._metrics_by_name = dcmetrics_name
self._composite = dcomposite
def get_bbox_char(self, c, isord=False):
if not isord:
c = ord(c)
wx, name, bbox = self._metrics[c]
return bbox
def string_width_height(self, s):
"""
Return the string width (including kerning) and string height
as a (*w*, *h*) tuple.
"""
if not len(s):
return 0, 0
totalw = 0
namelast = None
miny = 1e9
maxy = 0
for c in s:
if c == '\n':
continue
wx, name, bbox = self._metrics[ord(c)]
l, b, w, h = bbox
# find the width with kerning
try:
kp = self._kern[(namelast, name)]
except KeyError:
kp = 0
totalw += wx + kp
# find the max y
thismax = b + h
if thismax > maxy:
maxy = thismax
# find the min y
thismin = b
if thismin < miny:
miny = thismin
namelast = name
return totalw, maxy - miny
def get_str_bbox_and_descent(self, s):
"""
Return the string bounding box
"""
if not len(s):
return 0, 0, 0, 0
totalw = 0
namelast = None
miny = 1e9
maxy = 0
left = 0
if not isinstance(s, six.text_type):
s = s.decode('ascii')
for c in s:
if c == '\n':
continue
name = uni2type1.get(ord(c), 'question')
try:
wx, bbox = self._metrics_by_name[name]
except KeyError:
name = 'question'
wx, bbox = self._metrics_by_name[name]
l, b, w, h = bbox
if l < left:
left = l
# find the width with kerning
try:
kp = self._kern[(namelast, name)]
except KeyError:
kp = 0
totalw += wx + kp
# find the max y
thismax = b + h
if thismax > maxy:
maxy = thismax
# find the min y
thismin = b
if thismin < miny:
miny = thismin
namelast = name
return left, miny, totalw, maxy - miny, -miny
def get_str_bbox(self, s):
"""
Return the string bounding box
"""
return self.get_str_bbox_and_descent(s)[:4]
def get_name_char(self, c, isord=False):
"""
Get the name of the character, i.e., ';' is 'semicolon'
"""
if not isord:
c = ord(c)
wx, name, bbox = self._metrics[c]
return name
def get_width_char(self, c, isord=False):
"""
Get the width of the character from the character metric WX
field
"""
if not isord:
c = ord(c)
wx, name, bbox = self._metrics[c]
return wx
def get_width_from_char_name(self, name):
"""
Get the width of the character from a type1 character name
"""
wx, bbox = self._metrics_by_name[name]
return wx
def get_height_char(self, c, isord=False):
"""
Get the height of character *c* from the bounding box. This
is the ink height (space is 0)
"""
if not isord:
c = ord(c)
wx, name, bbox = self._metrics[c]
return bbox[-1]
def get_kern_dist(self, c1, c2):
"""
Return the kerning pair distance (possibly 0) for chars *c1*
and *c2*
"""
name1, name2 = self.get_name_char(c1), self.get_name_char(c2)
return self.get_kern_dist_from_name(name1, name2)
def get_kern_dist_from_name(self, name1, name2):
"""
Return the kerning pair distance (possibly 0) for chars
*name1* and *name2*
"""
try:
return self._kern[(name1, name2)]
except:
return 0
def get_fontname(self):
"Return the font name, e.g., 'Times-Roman'"
return self._header[b'FontName']
def get_fullname(self):
"Return the font full name, e.g., 'Times-Roman'"
name = self._header.get(b'FullName')
if name is None: # use FontName as a substitute
name = self._header[b'FontName']
return name
def get_familyname(self):
"Return the font family name, e.g., 'Times'"
name = self._header.get(b'FamilyName')
if name is not None:
return name
# FamilyName not specified so we'll make a guess
name = self.get_fullname()
extras = br'(?i)([ -](regular|plain|italic|oblique|bold|semibold|light|ultralight|extra|condensed))+$'
return re.sub(extras, '', name)
def get_weight(self):
"Return the font weight, e.g., 'Bold' or 'Roman'"
return self._header[b'Weight']
def get_angle(self):
"Return the fontangle as float"
return self._header[b'ItalicAngle']
def get_capheight(self):
"Return the cap height as float"
return self._header[b'CapHeight']
def get_xheight(self):
"Return the xheight as float"
return self._header[b'XHeight']
def get_underline_thickness(self):
"Return the underline thickness as float"
return self._header[b'UnderlineThickness']
def get_horizontal_stem_width(self):
"""
Return the standard horizontal stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get(b'StdHW', None)
def get_vertical_stem_width(self):
"""
Return the standard vertical stem width as float, or *None* if
not specified in AFM file.
"""
return self._header.get(b'StdVW', None)
| mit |
zaxtax/scikit-learn | sklearn/utils/tests/test_testing.py | 107 | 4210 | import warnings
import unittest
import sys
from nose.tools import assert_raises
from sklearn.utils.testing import (
_assert_less,
_assert_greater,
assert_less_equal,
assert_greater_equal,
assert_warns,
assert_no_warnings,
assert_equal,
set_random_state,
assert_raise_message)
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
try:
from nose.tools import assert_less
def test_assert_less():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_less(0, 1)
_assert_less(0, 1)
assert_raises(AssertionError, assert_less, 1, 0)
assert_raises(AssertionError, _assert_less, 1, 0)
except ImportError:
pass
try:
from nose.tools import assert_greater
def test_assert_greater():
# Check that the nose implementation of assert_less gives the
# same thing as the scikit's
assert_greater(1, 0)
_assert_greater(1, 0)
assert_raises(AssertionError, assert_greater, 0, 1)
assert_raises(AssertionError, _assert_greater, 0, 1)
except ImportError:
pass
def test_assert_less_equal():
assert_less_equal(0, 1)
assert_less_equal(1, 1)
assert_raises(AssertionError, assert_less_equal, 1, 0)
def test_assert_greater_equal():
assert_greater_equal(1, 0)
assert_greater_equal(1, 1)
assert_raises(AssertionError, assert_greater_equal, 0, 1)
def test_set_random_state():
lda = LinearDiscriminantAnalysis()
tree = DecisionTreeClassifier()
# Linear Discriminant Analysis doesn't have random state: smoke test
set_random_state(lda, 3)
set_random_state(tree, 3)
assert_equal(tree.random_state, 3)
def test_assert_raise_message():
def _raise_ValueError(message):
raise ValueError(message)
def _no_raise():
pass
assert_raise_message(ValueError, "test",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "something else",
_raise_ValueError, "test")
assert_raises(ValueError,
assert_raise_message, TypeError, "something else",
_raise_ValueError, "test")
assert_raises(AssertionError,
assert_raise_message, ValueError, "test",
_no_raise)
# multiple exceptions in a tuple
assert_raises(AssertionError,
assert_raise_message, (ValueError, AttributeError),
"test", _no_raise)
# This class is inspired from numpy 1.7 with an alteration to check
# the reset warning filters after calls to assert_warns.
# This assert_warns behavior is specific to scikit-learn because
#`clean_warning_registry()` is called internally by assert_warns
# and clears all previous filters.
class TestWarns(unittest.TestCase):
def test_warn(self):
def f():
warnings.warn("yo")
return 3
# Test that assert_warns is not impacted by externally set
# filters and is reset internally.
# This is because `clean_warning_registry()` is called internally by
# assert_warns and clears all previous filters.
warnings.simplefilter("ignore", UserWarning)
assert_equal(assert_warns(UserWarning, f), 3)
# Test that the warning registry is empty after assert_warns
assert_equal(sys.modules['warnings'].filters, [])
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
filters = sys.modules['warnings'].filters[:]
try:
try:
# Should raise an AssertionError
assert_warns(UserWarning, f)
failed = True
except AssertionError:
pass
finally:
sys.modules['warnings'].filters = filters
if failed:
raise AssertionError("wrong warning caught by assert_warn")
| bsd-3-clause |
doncat99/StockRecommendSystem | Source/FetchData/Fetch_Data_Stock_US_Monthly.py | 1 | 9587 | import sys, os, io, time, datetime, requests, warnings, configparser
import pandas as pd
import numpy as np
import pandas_datareader as pdr
from pandas.tseries.holiday import USFederalHolidayCalendar
import concurrent.futures
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]
cur_path = root_path
sys.path.append(root_path + "/" + 'Source/DataBase/')
sys.path.append(root_path + "/" + 'Source/Utility/')
from Fetch_Data_Stock_US_StockList import getStocksList_US
from DB_API import queryStock, storeStock, queryStockList, storeStockList, queryStockPublishDay, storePublishDay
import fix_yahoo_finance as yf
def getSingleStock(symbol, from_date, till_date):
repeat_times = 1
message = ""
df = pd.DataFrame()
if len(symbol) == 0: return df, message
for _ in range(repeat_times):
try:
data = yf.download(symbol, start=from_date, end=till_date, interval='1mo')
#data = pdr.get_data_yahoo(symbol, start=from_date, end=till_date, interval='d')
data = data.rename(columns = {'Date':'date', 'Open':'open', 'High':'high', 'Low':'low', 'Close':'close', "Adj Close":'adj_close', 'Volume':'volume'})
data.index.name = 'date'
data.sort_index()
return data, ""
except Exception as e:
message = symbol + " fetch exception: " + str(e)
continue
return df, message
def judgeOpenDaysInRange(from_date, to_date):
cal = USFederalHolidayCalendar()
holidays = cal.holidays(from_date, to_date)
duedays = pd.bdate_range(from_date, to_date)
df = pd.DataFrame()
df['date'] = duedays
df['holiday'] = duedays.isin(holidays)
opendays = df[df['holiday'] == False]
return opendays
def judgeNeedPreDownload(root_path, symbol, first_date, from_date, to_date):
publishDay = pd.Timestamp(queryStockPublishDay(root_path, "DB_STOCK", "SHEET_US", symbol))
if pd.isnull(publishDay) == False and publishDay == first_date:
return False
dateList = judgeOpenDaysInRange(from_date, to_date)
if len(dateList) > 0:
lastDay = pd.Timestamp(dateList['date'].index[-1])
if pd.isnull(publishDay) or lastDay > publishDay:
return True
return False
def judgeNeedPostDownload(now_date, from_date, to_date):
start_date = pd.Timestamp(from_date)
end_date = pd.Timestamp(to_date)
if start_date >= now_date: return False
if end_date > now_date: to_date = now_date
dateList = judgeOpenDaysInRange(from_date, to_date)
if len(dateList) > 0: return True
return False
def updateSingleStockData(root_path, symbol, from_date, till_date, force_check):
startTime = time.time()
message = ""
if len(symbol) == 0: return startTime, message
now_date = pd.Timestamp((datetime.datetime.now()).strftime("%Y-%m-%d"))
start_date = pd.Timestamp(from_date)
end_date = pd.Timestamp(till_date)
if end_date == now_date:
end_date = end_date - datetime.timedelta(days=1)
stockData, lastUpdateTime = queryStock(root_path, "DB_STOCK", "SHEET_US", "_MONTHLY", symbol, "monthly_update")
if stockData.empty:
stockData, message = getSingleStock(symbol, from_date, till_date)
if stockData.empty == False:
storeStock(root_path, "DB_STOCK", "SHEET_US", "_MONTHLY", symbol, stockData, "monthly_update")
first_date = pd.Timestamp(stockData.index[0])
to_date = (first_date - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
if judgeNeedPreDownload(root_path, symbol, first_date, from_date, to_date):
storePublishDay(root_path, "DB_STOCK", "SHEET_US", symbol, first_date.strftime("%Y-%m-%d"))
message = message + ", database updated"
else:
print("get stock from network failed", symbol)
return startTime, message
modified = False
savePublishDay = False
first_date = pd.Timestamp(stockData.index[0])
last_date = pd.Timestamp(stockData.index[-1])
if start_date < first_date:
to_date = (first_date - datetime.timedelta(days=1)).strftime("%Y-%m-%d")
if judgeNeedPreDownload(root_path, symbol, first_date, from_date, to_date):
message = message + ", download pre data from " + from_date + " to " + to_date
moreStockData, tempMessage = getSingleStock(symbol, from_date, to_date)
message = message + tempMessage
if len(moreStockData) > 0:
if isinstance(moreStockData.index, pd.DatetimeIndex):
moreStockData.index = moreStockData.index.strftime("%Y-%m-%d")
modified = True
stockData = pd.concat([moreStockData, stockData])
stockData.index.name = 'date'
else:
savePublishDay = True
storePublishDay(root_path, "DB_STOCK", "SHEET_US", symbol, first_date.strftime("%Y-%m-%d"))
message = message + ", save stock publish(IPO) day, next time won't check it again"
updateOnce = now_date > lastUpdateTime
if (end_date > last_date) and (updateOnce or force_check):
to_date = (last_date + datetime.timedelta(days=1)).strftime("%Y-%m-%d")
if judgeNeedPostDownload(now_date, to_date, till_date):
message = message + ", download post data from " + to_date + " to " + till_date
moreStockData, tempMessage = getSingleStock(symbol, to_date, till_date)
message = message + tempMessage
if len(moreStockData) > 0:
if isinstance(moreStockData.index, pd.DatetimeIndex):
moreStockData.index = moreStockData.index.strftime("%Y-%m-%d")
modified = True
stockData = pd.concat([stockData, moreStockData])
stockData.index.name = 'date'
if modified:
stockData = stockData[~stockData.index.duplicated(keep='first')]
storeStock(root_path, "DB_STOCK", "SHEET_US", "_MONTHLY", symbol, stockData, "monthly_update")
elif updateOnce:
now_date = datetime.datetime.now().strftime("%Y-%m-%d")
stockList = queryStockList(root_path, "DB_STOCK", "SHEET_US")
if stockList[stockList.index == symbol]['monthly_update'][0] != now_date:
stockList.set_value(symbol, 'monthly_update', now_date)
storeStockList(root_path, "DB_STOCK", "SHEET_US", stockList, symbol)
elif savePublishDay == False:
message = ""
return startTime, message
def updateStockData_US_Monthly(root_path, from_date, till_date, storeType, force_check = False):
symbols = getStocksList_US(root_path).index
pbar = tqdm(total=len(symbols))
if storeType == 2:# or storeType == 1:
# count = 10
for stock in symbols:
startTime, message = updateSingleStockData(root_path, stock, from_date, till_date, force_check)
outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
# count = count - 1
# if count == 0: break
if storeType == 1:
log_errors = []
log_update = []
# Parallel mode is not suitable in CSV storage mode, since no lock is added to limit csv file IO.
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
# Start the load operations and mark each future with its URL
future_to_stock = {executor.submit(updateSingleStockData, root_path, symbol, from_date, till_date, force_check): symbol for symbol in symbols}
for future in concurrent.futures.as_completed(future_to_stock):
stock = future_to_stock[future]
try:
startTime, message = future.result()
except Exception as exc:
startTime = time.time()
log_errors.append('%r generated an exception: %s' % (stock, exc))
len_errors = len(log_errors)
if len_errors % 5 == 0: print(log_errors[(len_errors-5):])
else:
if len(message) > 0: log_update.append(message)
outMessage = '%-*s fetched in: %.4s seconds' % (6, stock, (time.time() - startTime))
pbar.set_description(outMessage)
pbar.update(1)
if len(log_errors) > 0: print(log_errors)
# if len(log_update) > 0: print(log_update)
pbar.close()
return symbols
if __name__ == "__main__":
pd.set_option('precision', 3)
pd.set_option('display.width',1000)
warnings.filterwarnings('ignore', category=pd.io.pytables.PerformanceWarning)
now = datetime.datetime.now().strftime("%Y-%m-%d")
config = configparser.ConfigParser()
config.read(root_path + "/" + "config.ini")
storeType = int(config.get('Setting', 'StoreType'))
# if storeType == 1:
# from Start_DB_Server import StartServer, ShutdownServer
# # start database server (async)
# thread = StartServer(root_path)
# # wait for db start, the standard procedure should listen to
# # the completed event of function "StartServer"
# time.sleep(5)
updateStockData_US_Monthly(root_path, "2014-01-01", now, storeType)
# if storeType == 1:
# # stop database server (sync)
# time.sleep(5)
# ShutdownServer()
| mit |
CforED/Machine-Learning | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
mazalet/losslessh264 | plot_prior_misses.py | 40 | 1124 | # Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
| bsd-2-clause |
junwucs/h2o-3 | h2o-py/tests/utils/ipynb_demo_runner.py | 4 | 1616 | import json
import os
def ipy_notebook_exec(path,save_and_norun=False):
notebook = json.load(open(path))
program = ''
for block in ipy_code_blocks(notebook):
for line in ipy_valid_lines(block):
if "h2o.init" not in line:
program += line if '\n' in line else line + '\n'
if save_and_norun:
with open(os.path.basename(path).split('ipynb')[0]+'py',"w") as f:
f.write(program)
else:
d={}
exec program in d # safe, but horrible (exec is horrible)
def ipy_blocks(notebook):
if 'worksheets' in notebook.keys():
return notebook['worksheets'][0]['cells'] # just take the first worksheet
elif 'cells' in notebook.keys():
return notebook['cells']
else:
raise NotImplementedError, "ipython notebook cell/block json format not handled"
def ipy_code_blocks(notebook):
return [cell for cell in ipy_blocks(notebook) if cell['cell_type'] == 'code']
def ipy_lines(block):
if 'source' in block.keys():
return block['source']
elif 'input' in block.keys():
return block['input']
else:
raise NotImplementedError, "ipython notebook source/line json format not handled"
def ipy_valid_lines(block):
# remove ipython magic functions
lines = [line for line in ipy_lines(block) if not line.startswith('%')]
# (clunky) matplotlib handling
for line in lines:
if "import matplotlib.pyplot as plt" in line:
import matplotlib
matplotlib.use('Agg', warn=False)
return [line for line in lines if not "plt.show()" in line]
| apache-2.0 |
gengliangwang/spark | python/pyspark/ml/clustering.py | 15 | 62447 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import warnings
from pyspark import since, keyword_only
from pyspark.ml.param.shared import HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, \
HasAggregationDepth, HasWeightCol, HasTol, HasProbabilityCol, HasDistanceMeasure, \
HasCheckpointInterval, Param, Params, TypeConverters
from pyspark.ml.util import JavaMLWritable, JavaMLReadable, GeneralJavaMLWritable, \
HasTrainingSummary, SparkContext
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams, JavaWrapper
from pyspark.ml.common import inherit_doc, _java2py
from pyspark.ml.stat import MultivariateGaussian
from pyspark.sql import DataFrame
__all__ = ['BisectingKMeans', 'BisectingKMeansModel', 'BisectingKMeansSummary',
'KMeans', 'KMeansModel', 'KMeansSummary',
'GaussianMixture', 'GaussianMixtureModel', 'GaussianMixtureSummary',
'LDA', 'LDAModel', 'LocalLDAModel', 'DistributedLDAModel', 'PowerIterationClustering']
class ClusteringSummary(JavaWrapper):
"""
Clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def predictionCol(self):
"""
Name for column of predicted clusters in `predictions`.
"""
return self._call_java("predictionCol")
@property
@since("2.1.0")
def predictions(self):
"""
DataFrame produced by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.1.0")
def featuresCol(self):
"""
Name for column of features in `predictions`.
"""
return self._call_java("featuresCol")
@property
@since("2.1.0")
def k(self):
"""
The number of clusters the model was trained with.
"""
return self._call_java("k")
@property
@since("2.1.0")
def cluster(self):
"""
DataFrame of predicted cluster centers for each training data point.
"""
return self._call_java("cluster")
@property
@since("2.1.0")
def clusterSizes(self):
"""
Size of (number of data points in) each cluster.
"""
return self._call_java("clusterSizes")
@property
@since("2.4.0")
def numIter(self):
"""
Number of iterations.
"""
return self._call_java("numIter")
@inherit_doc
class _GaussianMixtureParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasProbabilityCol, HasTol, HasAggregationDepth, HasWeightCol):
"""
Params for :py:class:`GaussianMixture` and :py:class:`GaussianMixtureModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_GaussianMixtureParams, self).__init__(*args)
self._setDefault(k=2, tol=0.01, maxIter=100, aggregationDepth=2)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
class GaussianMixtureModel(JavaModel, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by GaussianMixture.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("3.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@property
@since("2.0.0")
def weights(self):
"""
Weight for each Gaussian distribution in the mixture.
This is a multinomial probability distribution over the k Gaussians,
where weights[i] is the weight for Gaussian i, and weights sum to 1.
"""
return self._call_java("weights")
@property
@since("3.0.0")
def gaussians(self):
"""
Array of :py:class:`MultivariateGaussian` where gaussians[i] represents
the Multivariate Gaussian (Normal) Distribution for Gaussian i
"""
sc = SparkContext._active_spark_context
jgaussians = self._java_obj.gaussians()
return [
MultivariateGaussian(_java2py(sc, jgaussian.mean()), _java2py(sc, jgaussian.cov()))
for jgaussian in jgaussians]
@property
@since("2.0.0")
def gaussiansDF(self):
"""
Retrieve Gaussian distributions as a DataFrame.
Each row represents a Gaussian Distribution.
The DataFrame has two columns: mean (Vector) and cov (Matrix).
"""
return self._call_java("gaussiansDF")
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return GaussianMixtureSummary(super(GaussianMixtureModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@since("3.0.0")
def predictProbability(self, value):
"""
Predict probability for the given features.
"""
return self._call_java("predictProbability", value)
@inherit_doc
class GaussianMixture(JavaEstimator, _GaussianMixtureParams, JavaMLWritable, JavaMLReadable):
"""
GaussianMixture clustering.
This class performs expectation maximization for multivariate Gaussian
Mixture Models (GMMs). A GMM represents a composite distribution of
independent Gaussian distributions with associated "mixing" weights
specifying each's contribution to the composite.
Given a set of sample points, this class will maximize the log-likelihood
for a mixture of k Gaussians, iterating until the log-likelihood changes by
less than convergenceTol, or until it has reached the max number of iterations.
While this process is generally guaranteed to converge, it is not guaranteed
to find a global optimum.
.. versionadded:: 2.0.0
Notes
-----
For high-dimensional data (with many features), this algorithm may perform poorly.
This is due to high-dimensional data (a) making it difficult to cluster at all
(based on statistical/theoretical arguments) and (b) numerical issues with
Gaussian distributions.
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([-0.1, -0.05 ]),),
... (Vectors.dense([-0.01, -0.1]),),
... (Vectors.dense([0.9, 0.8]),),
... (Vectors.dense([0.75, 0.935]),),
... (Vectors.dense([-0.83, -0.68]),),
... (Vectors.dense([-0.91, -0.76]),)]
>>> df = spark.createDataFrame(data, ["features"])
>>> gm = GaussianMixture(k=3, tol=0.0001, seed=10)
>>> gm.getMaxIter()
100
>>> gm.setMaxIter(30)
GaussianMixture...
>>> gm.getMaxIter()
30
>>> model = gm.fit(df)
>>> model.getAggregationDepth()
2
>>> model.getFeaturesCol()
'features'
>>> model.setPredictionCol("newPrediction")
GaussianMixtureModel...
>>> model.predict(df.head().features)
2
>>> model.predictProbability(df.head().features)
DenseVector([0.0, 0.0, 1.0])
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
3
>>> summary.clusterSizes
[2, 2, 2]
>>> weights = model.weights
>>> len(weights)
3
>>> gaussians = model.gaussians
>>> len(gaussians)
3
>>> gaussians[0].mean
DenseVector([0.825, 0.8675])
>>> gaussians[0].cov
DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], 0)
>>> gaussians[1].mean
DenseVector([-0.87, -0.72])
>>> gaussians[1].cov
DenseMatrix(2, 2, [0.0016, 0.0016, 0.0016, 0.0016], 0)
>>> gaussians[2].mean
DenseVector([-0.055, -0.075])
>>> gaussians[2].cov
DenseMatrix(2, 2, [0.002, -0.0011, -0.0011, 0.0006], 0)
>>> model.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[4].newPrediction == rows[5].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> gmm_path = temp_path + "/gmm"
>>> gm.save(gmm_path)
>>> gm2 = GaussianMixture.load(gmm_path)
>>> gm2.getK()
3
>>> model_path = temp_path + "/gmm_model"
>>> model.save(model_path)
>>> model2 = GaussianMixtureModel.load(model_path)
>>> model2.hasSummary
False
>>> model2.weights == model.weights
True
>>> model2.gaussians[0].mean == model.gaussians[0].mean
True
>>> model2.gaussians[0].cov == model.gaussians[0].cov
True
>>> model2.gaussians[1].mean == model.gaussians[1].mean
True
>>> model2.gaussians[1].cov == model.gaussians[1].cov
True
>>> model2.gaussians[2].mean == model.gaussians[2].mean
True
>>> model2.gaussians[2].cov == model.gaussians[2].cov
True
>>> model2.gaussiansDF.select("mean").head()
Row(mean=DenseVector([0.825, 0.8675]))
>>> model2.gaussiansDF.select("cov").head()
Row(cov=DenseMatrix(2, 2, [0.0056, -0.0051, -0.0051, 0.0046], False))
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
>>> gm2.setWeightCol("weight")
GaussianMixture...
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
"""
super(GaussianMixture, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.GaussianMixture",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return GaussianMixtureModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
probabilityCol="probability", tol=0.01, maxIter=100, seed=None,
aggregationDepth=2, weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
probabilityCol="probability", tol=0.01, maxIter=100, seed=None, \
aggregationDepth=2, weightCol=None)
Sets params for GaussianMixture.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setProbabilityCol(self, value):
"""
Sets the value of :py:attr:`probabilityCol`.
"""
return self._set(probabilityCol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setAggregationDepth(self, value):
"""
Sets the value of :py:attr:`aggregationDepth`.
"""
return self._set(aggregationDepth=value)
class GaussianMixtureSummary(ClusteringSummary):
"""
Gaussian mixture clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("2.1.0")
def probabilityCol(self):
"""
Name for column of predicted probability of each cluster in `predictions`.
"""
return self._call_java("probabilityCol")
@property
@since("2.1.0")
def probability(self):
"""
DataFrame of probabilities of each cluster for each training data point.
"""
return self._call_java("probability")
@property
@since("2.2.0")
def logLikelihood(self):
"""
Total log-likelihood for this model on the given data.
"""
return self._call_java("logLikelihood")
class KMeansSummary(ClusteringSummary):
"""
Summary of KMeans.
.. versionadded:: 2.1.0
"""
@property
@since("2.4.0")
def trainingCost(self):
"""
K-means cost (sum of squared distances to the nearest centroid for all points in the
training dataset). This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _KMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol, HasTol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`KMeans` and :py:class:`KMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either \"random\" to " +
"choose random points as initial cluster centers, or \"k-means||\" " +
"to use a parallel variant of k-means++",
typeConverter=TypeConverters.toString)
initSteps = Param(Params._dummy(), "initSteps", "The number of steps for k-means|| " +
"initialization mode. Must be > 0.", typeConverter=TypeConverters.toInt)
def __init__(self, *args):
super(_KMeansParams, self).__init__(*args)
self._setDefault(k=2, initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20,
distanceMeasure="euclidean")
@since("1.5.0")
def getK(self):
"""
Gets the value of `k`
"""
return self.getOrDefault(self.k)
@since("1.5.0")
def getInitMode(self):
"""
Gets the value of `initMode`
"""
return self.getOrDefault(self.initMode)
@since("1.5.0")
def getInitSteps(self):
"""
Gets the value of `initSteps`
"""
return self.getOrDefault(self.initSteps)
class KMeansModel(JavaModel, _KMeansParams, GeneralJavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by KMeans.
.. versionadded:: 1.5.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return KMeansSummary(super(KMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class KMeans(JavaEstimator, _KMeansParams, JavaMLWritable, JavaMLReadable):
"""
K-means clustering with a k-means++ like initialization mode
(the k-means|| algorithm by Bahmani et al).
.. versionadded:: 1.5.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> kmeans = KMeans(k=2)
>>> kmeans.setSeed(1)
KMeans...
>>> kmeans.setWeightCol("weighCol")
KMeans...
>>> kmeans.setMaxIter(10)
KMeans...
>>> kmeans.getMaxIter()
10
>>> kmeans.clear(kmeans.maxIter)
>>> model = kmeans.fit(df)
>>> model.getDistanceMeasure()
'euclidean'
>>> model.setPredictionCol("newPrediction")
KMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.0
>>> kmeans_path = temp_path + "/kmeans"
>>> kmeans.save(kmeans_path)
>>> kmeans2 = KMeans.load(kmeans_path)
>>> kmeans2.getK()
2
>>> model_path = temp_path + "/kmeans_model"
>>> model.save(model_path)
>>> model2 = KMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
"""
super(KMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.KMeans", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
return KMeansModel(java_model)
@keyword_only
@since("1.5.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", k=2,
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None,
distanceMeasure="euclidean", weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", k=2, \
initMode="k-means||", initSteps=2, tol=1e-4, maxIter=20, seed=None, \
distanceMeasure="euclidean", weightCol=None)
Sets params for KMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("1.5.0")
def setInitSteps(self, value):
"""
Sets the value of :py:attr:`initSteps`.
"""
return self._set(initSteps=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("1.5.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("1.5.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("1.5.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("1.5.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("1.5.0")
def setTol(self, value):
"""
Sets the value of :py:attr:`tol`.
"""
return self._set(tol=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@inherit_doc
class _BisectingKMeansParams(HasMaxIter, HasFeaturesCol, HasSeed, HasPredictionCol,
HasDistanceMeasure, HasWeightCol):
"""
Params for :py:class:`BisectingKMeans` and :py:class:`BisectingKMeansModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The desired number of leaf clusters. Must be > 1.",
typeConverter=TypeConverters.toInt)
minDivisibleClusterSize = Param(Params._dummy(), "minDivisibleClusterSize",
"The minimum number of points (if >= 1.0) or the minimum " +
"proportion of points (if < 1.0) of a divisible cluster.",
typeConverter=TypeConverters.toFloat)
def __init__(self, *args):
super(_BisectingKMeansParams, self).__init__(*args)
self._setDefault(maxIter=20, k=4, minDivisibleClusterSize=1.0)
@since("2.0.0")
def getK(self):
"""
Gets the value of `k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getMinDivisibleClusterSize(self):
"""
Gets the value of `minDivisibleClusterSize` or its default value.
"""
return self.getOrDefault(self.minDivisibleClusterSize)
class BisectingKMeansModel(JavaModel, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable,
HasTrainingSummary):
"""
Model fitted by BisectingKMeans.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def clusterCenters(self):
"""Get the cluster centers, represented as a list of NumPy arrays."""
return [c.toArray() for c in self._call_java("clusterCenters")]
@since("2.0.0")
def computeCost(self, dataset):
"""
Computes the sum of squared distances between the input points
and their corresponding cluster centers.
.. deprecated:: 3.0.0
It will be removed in future versions. Use :py:class:`ClusteringEvaluator` instead.
You can also get the cost on the training dataset in the summary.
"""
warnings.warn("Deprecated in 3.0.0. It will be removed in future versions. Use "
"ClusteringEvaluator instead. You can also get the cost on the training "
"dataset in the summary.", FutureWarning)
return self._call_java("computeCost", dataset)
@property
@since("2.1.0")
def summary(self):
"""
Gets summary (cluster assignments, cluster sizes) of the model trained on the
training set. An exception is thrown if no summary exists.
"""
if self.hasSummary:
return BisectingKMeansSummary(super(BisectingKMeansModel, self).summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@since("3.0.0")
def predict(self, value):
"""
Predict label for the given features.
"""
return self._call_java("predict", value)
@inherit_doc
class BisectingKMeans(JavaEstimator, _BisectingKMeansParams, JavaMLWritable, JavaMLReadable):
"""
A bisecting k-means algorithm based on the paper "A comparison of document clustering
techniques" by Steinbach, Karypis, and Kumar, with modification to fit Spark.
The algorithm starts from a single cluster that contains all points.
Iteratively it finds divisible clusters on the bottom level and bisects each of them using
k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
The bisecting steps of clusters on the same level are grouped together to increase parallelism.
If bisecting all divisible clusters on the bottom level would result more than `k` leaf
clusters, larger clusters get higher priority.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.dense([0.0, 0.0]), 2.0), (Vectors.dense([1.0, 1.0]), 2.0),
... (Vectors.dense([9.0, 8.0]), 2.0), (Vectors.dense([8.0, 9.0]), 2.0)]
>>> df = spark.createDataFrame(data, ["features", "weighCol"])
>>> bkm = BisectingKMeans(k=2, minDivisibleClusterSize=1.0)
>>> bkm.setMaxIter(10)
BisectingKMeans...
>>> bkm.getMaxIter()
10
>>> bkm.clear(bkm.maxIter)
>>> bkm.setSeed(1)
BisectingKMeans...
>>> bkm.setWeightCol("weighCol")
BisectingKMeans...
>>> bkm.getSeed()
1
>>> bkm.clear(bkm.seed)
>>> model = bkm.fit(df)
>>> model.getMaxIter()
20
>>> model.setPredictionCol("newPrediction")
BisectingKMeansModel...
>>> model.predict(df.head().features)
0
>>> centers = model.clusterCenters()
>>> len(centers)
2
>>> model.computeCost(df)
2.0
>>> model.hasSummary
True
>>> summary = model.summary
>>> summary.k
2
>>> summary.clusterSizes
[2, 2]
>>> summary.trainingCost
4.000...
>>> transformed = model.transform(df).select("features", "newPrediction")
>>> rows = transformed.collect()
>>> rows[0].newPrediction == rows[1].newPrediction
True
>>> rows[2].newPrediction == rows[3].newPrediction
True
>>> bkm_path = temp_path + "/bkm"
>>> bkm.save(bkm_path)
>>> bkm2 = BisectingKMeans.load(bkm_path)
>>> bkm2.getK()
2
>>> bkm2.getDistanceMeasure()
'euclidean'
>>> model_path = temp_path + "/bkm_model"
>>> model.save(model_path)
>>> model2 = BisectingKMeansModel.load(model_path)
>>> model2.hasSummary
False
>>> model.clusterCenters()[0] == model2.clusterCenters()[0]
array([ True, True], dtype=bool)
>>> model.clusterCenters()[1] == model2.clusterCenters()[1]
array([ True, True], dtype=bool)
>>> model.transform(df).take(1) == model2.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
__init__(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
"""
super(BisectingKMeans, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.BisectingKMeans",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", predictionCol="prediction", maxIter=20,
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean",
weightCol=None):
"""
setParams(self, \\*, featuresCol="features", predictionCol="prediction", maxIter=20, \
seed=None, k=4, minDivisibleClusterSize=1.0, distanceMeasure="euclidean", \
weightCol=None)
Sets params for BisectingKMeans.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.0.0")
def setMinDivisibleClusterSize(self, value):
"""
Sets the value of :py:attr:`minDivisibleClusterSize`.
"""
return self._set(minDivisibleClusterSize=value)
@since("2.4.0")
def setDistanceMeasure(self, value):
"""
Sets the value of :py:attr:`distanceMeasure`.
"""
return self._set(distanceMeasure=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("2.0.0")
def setPredictionCol(self, value):
"""
Sets the value of :py:attr:`predictionCol`.
"""
return self._set(predictionCol=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
def _create_model(self, java_model):
return BisectingKMeansModel(java_model)
class BisectingKMeansSummary(ClusteringSummary):
"""
Bisecting KMeans clustering results for a given model.
.. versionadded:: 2.1.0
"""
@property
@since("3.0.0")
def trainingCost(self):
"""
Sum of squared distances to the nearest centroid for all points in the training dataset.
This is equivalent to sklearn's inertia.
"""
return self._call_java("trainingCost")
@inherit_doc
class _LDAParams(HasMaxIter, HasFeaturesCol, HasSeed, HasCheckpointInterval):
"""
Params for :py:class:`LDA` and :py:class:`LDAModel`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k", "The number of topics (clusters) to infer. Must be > 1.",
typeConverter=TypeConverters.toInt)
optimizer = Param(Params._dummy(), "optimizer",
"Optimizer or inference algorithm used to estimate the LDA model. "
"Supported: online, em", typeConverter=TypeConverters.toString)
learningOffset = Param(Params._dummy(), "learningOffset",
"A (positive) learning parameter that downweights early iterations."
" Larger values make early iterations count less",
typeConverter=TypeConverters.toFloat)
learningDecay = Param(Params._dummy(), "learningDecay", "Learning rate, set as an"
"exponential decay rate. This should be between (0.5, 1.0] to "
"guarantee asymptotic convergence.", typeConverter=TypeConverters.toFloat)
subsamplingRate = Param(Params._dummy(), "subsamplingRate",
"Fraction of the corpus to be sampled and used in each iteration "
"of mini-batch gradient descent, in range (0, 1].",
typeConverter=TypeConverters.toFloat)
optimizeDocConcentration = Param(Params._dummy(), "optimizeDocConcentration",
"Indicates whether the docConcentration (Dirichlet parameter "
"for document-topic distribution) will be optimized during "
"training.", typeConverter=TypeConverters.toBoolean)
docConcentration = Param(Params._dummy(), "docConcentration",
"Concentration parameter (commonly named \"alpha\") for the "
"prior placed on documents' distributions over topics (\"theta\").",
typeConverter=TypeConverters.toListFloat)
topicConcentration = Param(Params._dummy(), "topicConcentration",
"Concentration parameter (commonly named \"beta\" or \"eta\") for "
"the prior placed on topic' distributions over terms.",
typeConverter=TypeConverters.toFloat)
topicDistributionCol = Param(Params._dummy(), "topicDistributionCol",
"Output column with estimates of the topic mixture distribution "
"for each document (often called \"theta\" in the literature). "
"Returns a vector of zeros for an empty document.",
typeConverter=TypeConverters.toString)
keepLastCheckpoint = Param(Params._dummy(), "keepLastCheckpoint",
"(For EM optimizer) If using checkpointing, this indicates whether"
" to keep the last checkpoint. If false, then the checkpoint will be"
" deleted. Deleting the checkpoint can cause failures if a data"
" partition is lost, so set this bit with care.",
TypeConverters.toBoolean)
def __init__(self, *args):
super(_LDAParams, self).__init__(*args)
self._setDefault(maxIter=20, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
@since("2.0.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.0.0")
def getOptimizer(self):
"""
Gets the value of :py:attr:`optimizer` or its default value.
"""
return self.getOrDefault(self.optimizer)
@since("2.0.0")
def getLearningOffset(self):
"""
Gets the value of :py:attr:`learningOffset` or its default value.
"""
return self.getOrDefault(self.learningOffset)
@since("2.0.0")
def getLearningDecay(self):
"""
Gets the value of :py:attr:`learningDecay` or its default value.
"""
return self.getOrDefault(self.learningDecay)
@since("2.0.0")
def getSubsamplingRate(self):
"""
Gets the value of :py:attr:`subsamplingRate` or its default value.
"""
return self.getOrDefault(self.subsamplingRate)
@since("2.0.0")
def getOptimizeDocConcentration(self):
"""
Gets the value of :py:attr:`optimizeDocConcentration` or its default value.
"""
return self.getOrDefault(self.optimizeDocConcentration)
@since("2.0.0")
def getDocConcentration(self):
"""
Gets the value of :py:attr:`docConcentration` or its default value.
"""
return self.getOrDefault(self.docConcentration)
@since("2.0.0")
def getTopicConcentration(self):
"""
Gets the value of :py:attr:`topicConcentration` or its default value.
"""
return self.getOrDefault(self.topicConcentration)
@since("2.0.0")
def getTopicDistributionCol(self):
"""
Gets the value of :py:attr:`topicDistributionCol` or its default value.
"""
return self.getOrDefault(self.topicDistributionCol)
@since("2.0.0")
def getKeepLastCheckpoint(self):
"""
Gets the value of :py:attr:`keepLastCheckpoint` or its default value.
"""
return self.getOrDefault(self.keepLastCheckpoint)
@inherit_doc
class LDAModel(JavaModel, _LDAParams):
"""
Latent Dirichlet Allocation (LDA) model.
This abstraction permits for different underlying representations,
including local and distributed data structures.
.. versionadded:: 2.0.0
"""
@since("3.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@since("3.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("3.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def isDistributed(self):
"""
Indicates whether this instance is of type DistributedLDAModel
"""
return self._call_java("isDistributed")
@since("2.0.0")
def vocabSize(self):
"""Vocabulary size (number of terms or words in the vocabulary)"""
return self._call_java("vocabSize")
@since("2.0.0")
def topicsMatrix(self):
"""
Inferred topics, where each topic is represented by a distribution over terms.
This is a matrix of size vocabSize x k, where each column is a topic.
No guarantees are given about the ordering of the topics.
.. warning:: If this model is actually a :py:class:`DistributedLDAModel`
instance produced by the Expectation-Maximization ("em") `optimizer`,
then this method could involve collecting a large amount of data
to the driver (on the order of vocabSize x k).
"""
return self._call_java("topicsMatrix")
@since("2.0.0")
def logLikelihood(self, dataset):
"""
Calculates a lower bound on the log likelihood of the entire corpus.
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logLikelihood", dataset)
@since("2.0.0")
def logPerplexity(self, dataset):
"""
Calculate an upper bound on perplexity. (Lower is better.)
See Equation (16) in the Online LDA paper (Hoffman et al., 2010).
.. warning:: If this model is an instance of :py:class:`DistributedLDAModel` (produced when
:py:attr:`optimizer` is set to "em"), this involves collecting a large
:py:func:`topicsMatrix` to the driver. This implementation may be changed in the future.
"""
return self._call_java("logPerplexity", dataset)
@since("2.0.0")
def describeTopics(self, maxTermsPerTopic=10):
"""
Return the topics described by their top-weighted terms.
"""
return self._call_java("describeTopics", maxTermsPerTopic)
@since("2.0.0")
def estimatedDocConcentration(self):
"""
Value for :py:attr:`LDA.docConcentration` estimated from data.
If Online LDA was used and :py:attr:`LDA.optimizeDocConcentration` was set to false,
then this returns the fixed (given) value for the :py:attr:`LDA.docConcentration` parameter.
"""
return self._call_java("estimatedDocConcentration")
@inherit_doc
class DistributedLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Distributed model fitted by :py:class:`LDA`.
This type of model is currently only produced by Expectation-Maximization (EM).
This model stores the inferred topics, the full training dataset, and the topic distribution
for each training document.
.. versionadded:: 2.0.0
"""
@since("2.0.0")
def toLocal(self):
"""
Convert this distributed model to a local representation. This discards info about the
training dataset.
.. warning:: This involves collecting a large :py:func:`topicsMatrix` to the driver.
"""
model = LocalLDAModel(self._call_java("toLocal"))
# SPARK-10931: Temporary fix to be removed once LDAModel defines Params
model._create_params_from_java()
model._transfer_params_from_java()
return model
@since("2.0.0")
def trainingLogLikelihood(self):
"""
Log likelihood of the observed tokens in the training set,
given the current parameter estimates:
log P(docs | topics, topic distributions for docs, Dirichlet hyperparameters)
Notes
-----
- This excludes the prior; for that, use :py:func:`logPrior`.
- Even with :py:func:`logPrior`, this is NOT the same as the data log likelihood given
the hyperparameters.
- This is computed from the topic distributions computed during training. If you call
:py:func:`logLikelihood` on the same training dataset, the topic distributions
will be computed again, possibly giving different results.
"""
return self._call_java("trainingLogLikelihood")
@since("2.0.0")
def logPrior(self):
"""
Log probability of the current parameter estimate:
log P(topics, topic distributions for docs | alpha, eta)
"""
return self._call_java("logPrior")
def getCheckpointFiles(self):
"""
If using checkpointing and :py:attr:`LDA.keepLastCheckpoint` is set to true, then there may
be saved checkpoint files. This method is provided so that users can manage those files.
.. versionadded:: 2.0.0
Returns
-------
list
List of checkpoint files from training
Notes
-----
Removing the checkpoints can cause failures if a partition is lost and is needed
by certain :py:class:`DistributedLDAModel` methods. Reference counting will clean up
the checkpoints when this model and derivative data go out of scope.
"""
return self._call_java("getCheckpointFiles")
@inherit_doc
class LocalLDAModel(LDAModel, JavaMLReadable, JavaMLWritable):
"""
Local (non-distributed) model fitted by :py:class:`LDA`.
This model stores the inferred topics only; it does not store info about the training dataset.
.. versionadded:: 2.0.0
"""
pass
@inherit_doc
class LDA(JavaEstimator, _LDAParams, JavaMLReadable, JavaMLWritable):
"""
Latent Dirichlet Allocation (LDA), a topic model designed for text documents.
Terminology:
- "term" = "word": an element of the vocabulary
- "token": instance of a term appearing in a document
- "topic": multinomial distribution over terms representing some concept
- "document": one piece of text, corresponding to one row in the input data
Original LDA paper (journal version):
Blei, Ng, and Jordan. "Latent Dirichlet Allocation." JMLR, 2003.
Input data (featuresCol):
LDA is given a collection of documents as input data, via the featuresCol parameter.
Each document is specified as a :py:class:`Vector` of length vocabSize, where each entry is the
count for the corresponding term (word) in the document. Feature transformers such as
:py:class:`pyspark.ml.feature.Tokenizer` and :py:class:`pyspark.ml.feature.CountVectorizer`
can be useful for converting text to word count vectors.
.. versionadded:: 2.0.0
Examples
--------
>>> from pyspark.ml.linalg import Vectors, SparseVector
>>> from pyspark.ml.clustering import LDA
>>> df = spark.createDataFrame([[1, Vectors.dense([0.0, 1.0])],
... [2, SparseVector(2, {0: 1.0})],], ["id", "features"])
>>> lda = LDA(k=2, seed=1, optimizer="em")
>>> lda.setMaxIter(10)
LDA...
>>> lda.getMaxIter()
10
>>> lda.clear(lda.maxIter)
>>> model = lda.fit(df)
>>> model.setSeed(1)
DistributedLDAModel...
>>> model.getTopicDistributionCol()
'topicDistribution'
>>> model.isDistributed()
True
>>> localModel = model.toLocal()
>>> localModel.isDistributed()
False
>>> model.vocabSize()
2
>>> model.describeTopics().show()
+-----+-----------+--------------------+
|topic|termIndices| termWeights|
+-----+-----------+--------------------+
| 0| [1, 0]|[0.50401530077160...|
| 1| [0, 1]|[0.50401530077160...|
+-----+-----------+--------------------+
...
>>> model.topicsMatrix()
DenseMatrix(2, 2, [0.496, 0.504, 0.504, 0.496], 0)
>>> lda_path = temp_path + "/lda"
>>> lda.save(lda_path)
>>> sameLDA = LDA.load(lda_path)
>>> distributed_model_path = temp_path + "/lda_distributed_model"
>>> model.save(distributed_model_path)
>>> sameModel = DistributedLDAModel.load(distributed_model_path)
>>> local_model_path = temp_path + "/lda_local_model"
>>> localModel.save(local_model_path)
>>> sameLocalModel = LocalLDAModel.load(local_model_path)
>>> model.transform(df).take(1) == sameLocalModel.transform(df).take(1)
True
"""
@keyword_only
def __init__(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
__init__(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
"""
super(LDA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.clustering.LDA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
def _create_model(self, java_model):
if self.getOptimizer() == "em":
return DistributedLDAModel(java_model)
else:
return LocalLDAModel(java_model)
@keyword_only
@since("2.0.0")
def setParams(self, *, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,
subsamplingRate=0.05, optimizeDocConcentration=True,
docConcentration=None, topicConcentration=None,
topicDistributionCol="topicDistribution", keepLastCheckpoint=True):
"""
setParams(self, \\*, featuresCol="features", maxIter=20, seed=None, checkpointInterval=10,\
k=10, optimizer="online", learningOffset=1024.0, learningDecay=0.51,\
subsamplingRate=0.05, optimizeDocConcentration=True,\
docConcentration=None, topicConcentration=None,\
topicDistributionCol="topicDistribution", keepLastCheckpoint=True)
Sets params for LDA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setCheckpointInterval(self, value):
"""
Sets the value of :py:attr:`checkpointInterval`.
"""
return self._set(checkpointInterval=value)
@since("2.0.0")
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
@since("2.0.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
>>> algo = LDA().setK(10)
>>> algo.getK()
10
"""
return self._set(k=value)
@since("2.0.0")
def setOptimizer(self, value):
"""
Sets the value of :py:attr:`optimizer`.
Currently only support 'em' and 'online'.
Examples
--------
>>> algo = LDA().setOptimizer("em")
>>> algo.getOptimizer()
'em'
"""
return self._set(optimizer=value)
@since("2.0.0")
def setLearningOffset(self, value):
"""
Sets the value of :py:attr:`learningOffset`.
Examples
--------
>>> algo = LDA().setLearningOffset(100)
>>> algo.getLearningOffset()
100.0
"""
return self._set(learningOffset=value)
@since("2.0.0")
def setLearningDecay(self, value):
"""
Sets the value of :py:attr:`learningDecay`.
Examples
--------
>>> algo = LDA().setLearningDecay(0.1)
>>> algo.getLearningDecay()
0.1...
"""
return self._set(learningDecay=value)
@since("2.0.0")
def setSubsamplingRate(self, value):
"""
Sets the value of :py:attr:`subsamplingRate`.
Examples
--------
>>> algo = LDA().setSubsamplingRate(0.1)
>>> algo.getSubsamplingRate()
0.1...
"""
return self._set(subsamplingRate=value)
@since("2.0.0")
def setOptimizeDocConcentration(self, value):
"""
Sets the value of :py:attr:`optimizeDocConcentration`.
Examples
--------
>>> algo = LDA().setOptimizeDocConcentration(True)
>>> algo.getOptimizeDocConcentration()
True
"""
return self._set(optimizeDocConcentration=value)
@since("2.0.0")
def setDocConcentration(self, value):
"""
Sets the value of :py:attr:`docConcentration`.
Examples
--------
>>> algo = LDA().setDocConcentration([0.1, 0.2])
>>> algo.getDocConcentration()
[0.1..., 0.2...]
"""
return self._set(docConcentration=value)
@since("2.0.0")
def setTopicConcentration(self, value):
"""
Sets the value of :py:attr:`topicConcentration`.
Examples
--------
>>> algo = LDA().setTopicConcentration(0.5)
>>> algo.getTopicConcentration()
0.5...
"""
return self._set(topicConcentration=value)
@since("2.0.0")
def setTopicDistributionCol(self, value):
"""
Sets the value of :py:attr:`topicDistributionCol`.
Examples
--------
>>> algo = LDA().setTopicDistributionCol("topicDistributionCol")
>>> algo.getTopicDistributionCol()
'topicDistributionCol'
"""
return self._set(topicDistributionCol=value)
@since("2.0.0")
def setKeepLastCheckpoint(self, value):
"""
Sets the value of :py:attr:`keepLastCheckpoint`.
Examples
--------
>>> algo = LDA().setKeepLastCheckpoint(False)
>>> algo.getKeepLastCheckpoint()
False
"""
return self._set(keepLastCheckpoint=value)
@since("2.0.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.0.0")
def setFeaturesCol(self, value):
"""
Sets the value of :py:attr:`featuresCol`.
"""
return self._set(featuresCol=value)
@inherit_doc
class _PowerIterationClusteringParams(HasMaxIter, HasWeightCol):
"""
Params for :py:class:`PowerIterationClustering`.
.. versionadded:: 3.0.0
"""
k = Param(Params._dummy(), "k",
"The number of clusters to create. Must be > 1.",
typeConverter=TypeConverters.toInt)
initMode = Param(Params._dummy(), "initMode",
"The initialization algorithm. This can be either " +
"'random' to use a random vector as vertex properties, or 'degree' to use " +
"a normalized sum of similarities with other vertices. Supported options: " +
"'random' and 'degree'.",
typeConverter=TypeConverters.toString)
srcCol = Param(Params._dummy(), "srcCol",
"Name of the input column for source vertex IDs.",
typeConverter=TypeConverters.toString)
dstCol = Param(Params._dummy(), "dstCol",
"Name of the input column for destination vertex IDs.",
typeConverter=TypeConverters.toString)
def __init__(self, *args):
super(_PowerIterationClusteringParams, self).__init__(*args)
self._setDefault(k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst")
@since("2.4.0")
def getK(self):
"""
Gets the value of :py:attr:`k` or its default value.
"""
return self.getOrDefault(self.k)
@since("2.4.0")
def getInitMode(self):
"""
Gets the value of :py:attr:`initMode` or its default value.
"""
return self.getOrDefault(self.initMode)
@since("2.4.0")
def getSrcCol(self):
"""
Gets the value of :py:attr:`srcCol` or its default value.
"""
return self.getOrDefault(self.srcCol)
@since("2.4.0")
def getDstCol(self):
"""
Gets the value of :py:attr:`dstCol` or its default value.
"""
return self.getOrDefault(self.dstCol)
@inherit_doc
class PowerIterationClustering(_PowerIterationClusteringParams, JavaParams, JavaMLReadable,
JavaMLWritable):
"""
Power Iteration Clustering (PIC), a scalable graph clustering algorithm developed by
`Lin and Cohen <http://www.cs.cmu.edu/~frank/papers/icml2010-pic-final.pdf>`_. From the
abstract: PIC finds a very low-dimensional embedding of a dataset using truncated power
iteration on a normalized pair-wise similarity matrix of the data.
This class is not yet an Estimator/Transformer, use :py:func:`assignClusters` method
to run the PowerIterationClustering algorithm.
.. versionadded:: 2.4.0
Notes
-----
See `Wikipedia on Spectral clustering <http://en.wikipedia.org/wiki/Spectral_clustering>`_
Examples
--------
>>> data = [(1, 0, 0.5),
... (2, 0, 0.5), (2, 1, 0.7),
... (3, 0, 0.5), (3, 1, 0.7), (3, 2, 0.9),
... (4, 0, 0.5), (4, 1, 0.7), (4, 2, 0.9), (4, 3, 1.1),
... (5, 0, 0.5), (5, 1, 0.7), (5, 2, 0.9), (5, 3, 1.1), (5, 4, 1.3)]
>>> df = spark.createDataFrame(data).toDF("src", "dst", "weight").repartition(1)
>>> pic = PowerIterationClustering(k=2, weightCol="weight")
>>> pic.setMaxIter(40)
PowerIterationClustering...
>>> assignments = pic.assignClusters(df)
>>> assignments.sort(assignments.id).show(truncate=False)
+---+-------+
|id |cluster|
+---+-------+
|0 |0 |
|1 |0 |
|2 |0 |
|3 |0 |
|4 |0 |
|5 |1 |
+---+-------+
...
>>> pic_path = temp_path + "/pic"
>>> pic.save(pic_path)
>>> pic2 = PowerIterationClustering.load(pic_path)
>>> pic2.getK()
2
>>> pic2.getMaxIter()
40
>>> pic2.assignClusters(df).take(6) == assignments.take(6)
True
"""
@keyword_only
def __init__(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
__init__(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
"""
super(PowerIterationClustering, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.clustering.PowerIterationClustering", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.4.0")
def setParams(self, *, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",
weightCol=None):
"""
setParams(self, \\*, k=2, maxIter=20, initMode="random", srcCol="src", dstCol="dst",\
weightCol=None)
Sets params for PowerIterationClustering.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.4.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("2.4.0")
def setInitMode(self, value):
"""
Sets the value of :py:attr:`initMode`.
"""
return self._set(initMode=value)
@since("2.4.0")
def setSrcCol(self, value):
"""
Sets the value of :py:attr:`srcCol`.
"""
return self._set(srcCol=value)
@since("2.4.0")
def setDstCol(self, value):
"""
Sets the value of :py:attr:`dstCol`.
"""
return self._set(dstCol=value)
@since("2.4.0")
def setMaxIter(self, value):
"""
Sets the value of :py:attr:`maxIter`.
"""
return self._set(maxIter=value)
@since("2.4.0")
def setWeightCol(self, value):
"""
Sets the value of :py:attr:`weightCol`.
"""
return self._set(weightCol=value)
@since("2.4.0")
def assignClusters(self, dataset):
"""
Run the PIC algorithm and returns a cluster assignment for each input vertex.
Parameters
----------
dataset : :py:class:`pyspark.sql.DataFrame`
A dataset with columns src, dst, weight representing the affinity matrix,
which is the matrix A in the PIC paper. Suppose the src column value is i,
the dst column value is j, the weight column value is similarity s,,ij,,
which must be nonnegative. This is a symmetric matrix and hence
s,,ij,, = s,,ji,,. For any (i, j) with nonzero similarity, there should be
either (i, j, s,,ij,,) or (j, i, s,,ji,,) in the input. Rows with i = j are
ignored, because we assume s,,ij,, = 0.0.
Returns
-------
:py:class:`pyspark.sql.DataFrame`
A dataset that contains columns of vertex id and the corresponding cluster for
the id. The schema of it will be:
- id: Long
- cluster: Int
"""
self._transfer_params_to_java()
jdf = self._java_obj.assignClusters(dataset._jdf)
return DataFrame(jdf, dataset.sql_ctx)
if __name__ == "__main__":
import doctest
import numpy
import pyspark.ml.clustering
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = pyspark.ml.clustering.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.clustering tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
| apache-2.0 |
kellerberrin/OSM-QSAR | OSMKerasRegress.py | 1 | 8794 | # MIT License
#
# Copyright (c) 2017
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON INFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Python 2 and Python 3 compatibility imports.
from __future__ import absolute_import, division, print_function, unicode_literals
from six import with_metaclass
import copy
import sys
import os
import numpy as np
from sklearn.utils import shuffle
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import normalization, BatchNormalization
from keras.regularizers import l2, l1l2, activity_l2
from keras.models import load_model
from keras.constraints import maxnorm
from keras.optimizers import SGD, Adam, Adagrad, Adadelta
from keras.utils import np_utils
#from keras.utils.visualize_util import plot
import keras.backend as backend
from OSMBase import ModelMetaClass # The virtual model class.
from OSMRegression import OSMRegression # Display and save regression results.
from OSMGraphics import OSMSimilarityMap
from OSMModelData import OSMModelData
from OSMIterative import OSMIterative
# ===============================================================================
# Base class for the Keras neural network regression models.
# ===============================================================================
class KerasRegression(OSMRegression):
def __init__(self, args, log):
super(KerasRegression, self).__init__(args, log)
self.default_epochs = 1000
self.iterative = OSMIterative(self)
def model_write(self):
self.iterative.write()
def model_read(self):
return self.iterative.read()
def model_train(self):
self.iterative.train(self.default_epochs)
def epoch_write(self, epoch):
file_name = self.args.saveFilename + "_" + "{}".format(epoch) + ".krs"
self.log.info("KERAS - Saving Trained %s Model in File: %s", self.model_name(), file_name)
self.model.save(file_name)
def epoch_read(self, epoch):
file_name = self.args.loadFilename + "_" + "{}".format(epoch) + ".krs"
self.log.info("KERAS - Loading Trained %s Model in File: %s", self.model_name(), file_name)
model = load_model(file_name)
return model
def model_epochs(self):
return self.iterative.trained_epochs()
def model_graphics(self):
def keras_probability(fp, predict_func):
int_list = []
for arr in fp:
int_list.append(arr)
shape = []
shape.append(int_list)
fp_floats = np.array(shape, dtype=float)
prediction = predict_func(fp_floats, verbose=0)[0][0] # returns a prediction (not probability)
return prediction
func = lambda x: keras_probability(x, self.model.predict)
if self.args.checkPoint < 0 or self.args.extendFlag:
OSMSimilarityMap(self, self.data.testing(), func).maps(self.args.testDirectory)
if self.args.extendFlag:
OSMSimilarityMap(self, self.data.training(), func).maps(self.args.trainDirectory)
# ===============================================================================
# The sequential neural net class developed by Vito Spadavecchio.
# ===============================================================================
class SequentialModel(with_metaclass(ModelMetaClass, KerasRegression)):
def __init__(self, args, log):
super(SequentialModel, self).__init__(args, log)
# Define the model data view.
# Define the model variable types here. Documented in "OSMModelData.py".
self.arguments = { "DEPENDENT" : { "VARIABLE" : "pEC50", "SHAPE" : [1], "TYPE": OSMModelData.FLOAT64 }
, "INDEPENDENT" : [ { "VARIABLE" : "MORGAN1024", "SHAPE": [1024], "TYPE": OSMModelData.FLOAT64 } ] }
# These functions need to be re-defined in all classifier model classes.
def model_name(self):
return "Sequential"
def model_postfix(self): # Must be unique for each model.
return "seq"
def model_description(self):
return ("A KERAS (TensorFlow) based Neural Network classifier developed by Vito Spadavecchio.\n"
"The classifier uses 1024 bit Morgan molecular fingerprints in a single layer fully connected NN.")
def model_define(self):
model = Sequential()
model.add(Dense(1024, input_dim=1024, init="uniform", activation="relu"))
model.add(Dropout(0.2, input_shape=(1024,)))
model.add(Dense(1, init="normal"))
model.compile(loss="mean_absolute_error", optimizer="Adam", metrics=["accuracy"])
return model
def model_prediction(self, data):
predictions = self.model.predict(data.input_data(), verbose=0)
predictions_array = predictions.flatten()
return {"prediction": predictions_array, "actual": data.target_data() }
def train_epoch(self, epoch):
self.model.fit(self.data.training().input_data(), self.data.training().target_data()
, nb_epoch=epoch, batch_size=45, verbose=1)
# ===============================================================================
# Modified sequential class is a multi layer neural network.
# ===============================================================================
class ModifiedSequential(with_metaclass(ModelMetaClass, KerasRegression)):
def __init__(self, args, log):
super(ModifiedSequential, self).__init__(args, log)
self.default_epochs = 200
# Define the model data view.
# Define the model variable types here. Documented in "OSMModelData.py".
self.arguments = { "DEPENDENT" : { "VARIABLE" : "pEC50", "SHAPE" : [1], "TYPE": OSMModelData.FLOAT64 }
, "INDEPENDENT" : [ { "VARIABLE" : "MORGAN2048", "SHAPE": [2048], "TYPE": OSMModelData.FLOAT64 } ] }
# These functions need to be re-defined in all classifier model classes.
def model_name(self):
return "Modified Sequential"
def model_postfix(self): # Must be unique for each model.
return "mod"
def model_description(self):
return ("A KERAS (TensorFlow) multi-layer Neural Network classification model. \n"
"This classifier analyzes 2048 bit Morgan molecular fingerprints.")
def model_define(self): # Defines the modified sequential class with regularizers defined.
model = Sequential()
# model.add(Dense(2048, input_dim=2048, init='uniform', activation='relu',W_regularizer=l2(0.01), activity_regularizer=activity_l2(0.01)))
model.add(Dense(2048, input_dim=2048, init="uniform", activation="relu", W_constraint=maxnorm(3)))
model.add(Dropout(0.3, input_shape=(2048,)))
model.add(Dense(30, init="normal", activation="relu", W_constraint=maxnorm(3)))
model.add(Dropout(0.3, input_shape=(30,)))
model.add(Dense(1, init="normal", activation="tanh"))
model.add(Dense(1, init="normal", activation="linear"))
sgd = SGD(lr=0.1, momentum=0.9, decay=0.0, nesterov=False)
model.compile(loss='mean_absolute_error', optimizer="Adam", metrics=['accuracy'])
# model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
# model.compile(loss='mean_absolute_error', optimizer='Adam', metrics=['accuracy'])
return model
def model_prediction(self,data):
predictions = self.model.predict(data.input_data(), verbose=0)
predictions_array = predictions.flatten()
return {"prediction": predictions_array, "actual": data.target_data()}
def train_epoch(self, epoch):
self.model.fit(self.data.training().input_data(), self.data.training().target_data()
, nb_epoch=epoch, batch_size=100, verbose=1)
| mit |
ktaneishi/deepchem | deepchem/dock/binding_pocket.py | 2 | 11569 | """
Computes putative binding pockets on protein.
"""
from __future__ import division
from __future__ import unicode_literals
__author__ = "Bharath Ramsundar"
__copyright__ = "Copyright 2017, Stanford University"
__license__ = "MIT"
import os
import logging
import tempfile
import numpy as np
from subprocess import call
from scipy.spatial import ConvexHull
from deepchem.feat.binding_pocket_features import BindingPocketFeaturizer
from deepchem.feat.fingerprints import CircularFingerprint
from deepchem.models.sklearn_models import SklearnModel
from deepchem.utils import rdkit_util
logger = logging.getLogger(__name__)
def extract_active_site(protein_file, ligand_file, cutoff=4):
"""Extracts a box for the active site."""
protein_coords = rdkit_util.load_molecule(
protein_file, add_hydrogens=False)[0]
ligand_coords = rdkit_util.load_molecule(
ligand_file, add_hydrogens=True, calc_charges=True)[0]
num_ligand_atoms = len(ligand_coords)
num_protein_atoms = len(protein_coords)
pocket_inds = []
pocket_atoms = set([])
for lig_atom_ind in range(num_ligand_atoms):
lig_atom = ligand_coords[lig_atom_ind]
for protein_atom_ind in range(num_protein_atoms):
protein_atom = protein_coords[protein_atom_ind]
if np.linalg.norm(lig_atom - protein_atom) < cutoff:
if protein_atom_ind not in pocket_atoms:
pocket_atoms = pocket_atoms.union(set([protein_atom_ind]))
# Should be an array of size (n_pocket_atoms, 3)
pocket_atoms = list(pocket_atoms)
n_pocket_atoms = len(pocket_atoms)
pocket_coords = np.zeros((n_pocket_atoms, 3))
for ind, pocket_ind in enumerate(pocket_atoms):
pocket_coords[ind] = protein_coords[pocket_ind]
x_min = int(np.floor(np.amin(pocket_coords[:, 0])))
x_max = int(np.ceil(np.amax(pocket_coords[:, 0])))
y_min = int(np.floor(np.amin(pocket_coords[:, 1])))
y_max = int(np.ceil(np.amax(pocket_coords[:, 1])))
z_min = int(np.floor(np.amin(pocket_coords[:, 2])))
z_max = int(np.ceil(np.amax(pocket_coords[:, 2])))
return (((x_min, x_max), (y_min, y_max), (z_min, z_max)), pocket_atoms,
pocket_coords)
def compute_overlap(mapping, box1, box2):
"""Computes overlap between the two boxes.
Overlap is defined as % atoms of box1 in box2. Note that
overlap is not a symmetric measurement.
"""
atom1 = set(mapping[box1])
atom2 = set(mapping[box2])
return len(atom1.intersection(atom2)) / float(len(atom1))
def get_all_boxes(coords, pad=5):
"""Get all pocket boxes for protein coords.
We pad all boxes the prescribed number of angstroms.
TODO(rbharath): It looks like this may perhaps be non-deterministic?
"""
hull = ConvexHull(coords)
boxes = []
for triangle in hull.simplices:
# coords[triangle, 0] gives the x-dimension of all triangle points
# Take transpose to make sure rows correspond to atoms.
points = np.array(
[coords[triangle, 0], coords[triangle, 1], coords[triangle, 2]]).T
# We voxelize so all grids have integral coordinates (convenience)
x_min, x_max = np.amin(points[:, 0]), np.amax(points[:, 0])
x_min, x_max = int(np.floor(x_min)) - pad, int(np.ceil(x_max)) + pad
y_min, y_max = np.amin(points[:, 1]), np.amax(points[:, 1])
y_min, y_max = int(np.floor(y_min)) - pad, int(np.ceil(y_max)) + pad
z_min, z_max = np.amin(points[:, 2]), np.amax(points[:, 2])
z_min, z_max = int(np.floor(z_min)) - pad, int(np.ceil(z_max)) + pad
boxes.append(((x_min, x_max), (y_min, y_max), (z_min, z_max)))
return boxes
def boxes_to_atoms(atom_coords, boxes):
"""Maps each box to a list of atoms in that box.
TODO(rbharath): This does a num_atoms x num_boxes computations. Is
there a reasonable heuristic we can use to speed this up?
"""
mapping = {}
for box_ind, box in enumerate(boxes):
box_atoms = []
(x_min, x_max), (y_min, y_max), (z_min, z_max) = box
logger.info("Handing box %d/%d" % (box_ind, len(boxes)))
for atom_ind in range(len(atom_coords)):
atom = atom_coords[atom_ind]
x_cont = x_min <= atom[0] and atom[0] <= x_max
y_cont = y_min <= atom[1] and atom[1] <= y_max
z_cont = z_min <= atom[2] and atom[2] <= z_max
if x_cont and y_cont and z_cont:
box_atoms.append(atom_ind)
mapping[box] = box_atoms
return mapping
def merge_boxes(box1, box2):
"""Merges two boxes."""
(x_min1, x_max1), (y_min1, y_max1), (z_min1, z_max1) = box1
(x_min2, x_max2), (y_min2, y_max2), (z_min2, z_max2) = box2
x_min = min(x_min1, x_min2)
y_min = min(y_min1, y_min2)
z_min = min(z_min1, z_min2)
x_max = max(x_max1, x_max2)
y_max = max(y_max1, y_max2)
z_max = max(z_max1, z_max2)
return ((x_min, x_max), (y_min, y_max), (z_min, z_max))
def merge_overlapping_boxes(mapping, boxes, threshold=.8):
"""Merge boxes which have an overlap greater than threshold.
TODO(rbharath): This merge code is terribly inelegant. It's also quadratic
in number of boxes. It feels like there ought to be an elegant divide and
conquer approach here. Figure out later...
"""
num_boxes = len(boxes)
outputs = []
for i in range(num_boxes):
box = boxes[0]
new_boxes = []
new_mapping = {}
# If overlap of box with previously generated output boxes, return
contained = False
for output_box in outputs:
# Carry forward mappings
new_mapping[output_box] = mapping[output_box]
if compute_overlap(mapping, box, output_box) == 1:
contained = True
if contained:
continue
# We know that box has at least one atom not in outputs
unique_box = True
for merge_box in boxes[1:]:
overlap = compute_overlap(mapping, box, merge_box)
if overlap < threshold:
new_boxes.append(merge_box)
new_mapping[merge_box] = mapping[merge_box]
else:
# Current box has been merged into box further down list.
# No need to output current box
unique_box = False
merged = merge_boxes(box, merge_box)
new_boxes.append(merged)
new_mapping[merged] = list(
set(mapping[box]).union(set(mapping[merge_box])))
if unique_box:
outputs.append(box)
new_mapping[box] = mapping[box]
boxes = new_boxes
mapping = new_mapping
return outputs, mapping
class BindingPocketFinder(object):
"""Abstract superclass for binding pocket detectors"""
def find_pockets(self, protein_file, ligand_file):
"""Finds potential binding pockets in proteins."""
raise NotImplementedError
class ConvexHullPocketFinder(BindingPocketFinder):
"""Implementation that uses convex hull of protein to find pockets.
Based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4112621/pdf/1472-6807-14-18.pdf
"""
def __init__(self, pad=5):
self.pad = pad
def find_all_pockets(self, protein_file):
"""Find list of binding pockets on protein."""
# protein_coords is (N, 3) tensor
coords = rdkit_util.load_molecule(protein_file)[0]
return get_all_boxes(coords, self.pad)
def find_pockets(self, protein_file, ligand_file):
"""Find list of suitable binding pockets on protein."""
protein_coords = rdkit_util.load_molecule(
protein_file, add_hydrogens=False, calc_charges=False)[0]
ligand_coords = rdkit_util.load_molecule(
ligand_file, add_hydrogens=False, calc_charges=False)[0]
boxes = get_all_boxes(protein_coords, self.pad)
mapping = boxes_to_atoms(protein_coords, boxes)
pockets, pocket_atoms_map = merge_overlapping_boxes(mapping, boxes)
pocket_coords = []
for pocket in pockets:
atoms = pocket_atoms_map[pocket]
coords = np.zeros((len(atoms), 3))
for ind, atom in enumerate(atoms):
coords[ind] = protein_coords[atom]
pocket_coords.append(coords)
return pockets, pocket_atoms_map, pocket_coords
class RFConvexHullPocketFinder(BindingPocketFinder):
"""Uses pre-trained RF model + ConvexHulPocketFinder to select pockets."""
def __init__(self, pad=5):
self.pad = pad
self.convex_finder = ConvexHullPocketFinder(pad)
# Load binding pocket model
self.base_dir = tempfile.mkdtemp()
logger.info("About to download trained model.")
# TODO(rbharath): Shift refined to full once trained.
call((
"wget -nv -c http://deepchem.io.s3-website-us-west-1.amazonaws.com/trained_models/pocket_random_refined_RF.tar.gz"
).split())
call(("tar -zxvf pocket_random_refined_RF.tar.gz").split())
call(("mv pocket_random_refined_RF %s" % (self.base_dir)).split())
self.model_dir = os.path.join(self.base_dir, "pocket_random_refined_RF")
# Fit model on dataset
self.model = SklearnModel(model_dir=self.model_dir)
self.model.reload()
# Create featurizers
self.pocket_featurizer = BindingPocketFeaturizer()
self.ligand_featurizer = CircularFingerprint(size=1024)
def find_pockets(self, protein_file, ligand_file):
"""Compute features for a given complex
TODO(rbharath): This has a log of code overlap with
compute_binding_pocket_features in
examples/binding_pockets/binding_pocket_datasets.py. Find way to refactor
to avoid code duplication.
"""
# if not ligand_file.endswith(".sdf"):
# raise ValueError("Only .sdf ligand files can be featurized.")
# ligand_basename = os.path.basename(ligand_file).split(".")[0]
# ligand_mol2 = os.path.join(
# self.base_dir, ligand_basename + ".mol2")
#
# # Write mol2 file for ligand
# obConversion = ob.OBConversion()
# conv_out = obConversion.SetInAndOutFormats(str("sdf"), str("mol2"))
# ob_mol = ob.OBMol()
# obConversion.ReadFile(ob_mol, str(ligand_file))
# obConversion.WriteFile(ob_mol, str(ligand_mol2))
#
# # Featurize ligand
# mol = Chem.MolFromMol2File(str(ligand_mol2), removeHs=False)
# if mol is None:
# return None, None
# # Default for CircularFingerprint
# n_ligand_features = 1024
# ligand_features = self.ligand_featurizer.featurize([mol])
#
# # Featurize pocket
# pockets, pocket_atoms_map, pocket_coords = self.convex_finder.find_pockets(
# protein_file, ligand_file)
# n_pockets = len(pockets)
# n_pocket_features = BindingPocketFeaturizer.n_features
#
# features = np.zeros((n_pockets, n_pocket_features+n_ligand_features))
# pocket_features = self.pocket_featurizer.featurize(
# protein_file, pockets, pocket_atoms_map, pocket_coords)
# # Note broadcast operation
# features[:, :n_pocket_features] = pocket_features
# features[:, n_pocket_features:] = ligand_features
# dataset = NumpyDataset(X=features)
# pocket_preds = self.model.predict(dataset)
# pocket_pred_proba = np.squeeze(self.model.predict_proba(dataset))
#
# # Find pockets which are active
# active_pockets = []
# active_pocket_atoms_map = {}
# active_pocket_coords = []
# for pocket_ind in range(len(pockets)):
# #################################################### DEBUG
# # TODO(rbharath): For now, using a weak cutoff. Fix later.
# #if pocket_preds[pocket_ind] == 1:
# if pocket_pred_proba[pocket_ind][1] > .15:
# #################################################### DEBUG
# pocket = pockets[pocket_ind]
# active_pockets.append(pocket)
# active_pocket_atoms_map[pocket] = pocket_atoms_map[pocket]
# active_pocket_coords.append(pocket_coords[pocket_ind])
# return active_pockets, active_pocket_atoms_map, active_pocket_coords
# # TODO(LESWING)
raise ValueError("Karl Implement")
| mit |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pandas/io/pickle.py | 6 | 3578 | """ pickle compat """
import numpy as np
from numpy.lib.format import read_array, write_array
from pandas.compat import BytesIO, cPickle as pkl, pickle_compat as pc, PY3
from pandas.core.dtypes.common import is_datetime64_dtype, _NS_DTYPE
from pandas.io.common import _get_handle, _infer_compression
def to_pickle(obj, path, compression='infer'):
"""
Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path
compression : {'infer', 'gzip', 'bz2', 'xz', None}, default 'infer'
a string representing the compression to use in the output file
.. versionadded:: 0.20.0
"""
inferred_compression = _infer_compression(path, compression)
f, fh = _get_handle(path, 'wb',
compression=inferred_compression,
is_text=False)
try:
pkl.dump(obj, f, protocol=pkl.HIGHEST_PROTOCOL)
finally:
for _f in fh:
_f.close()
def read_pickle(path, compression='infer'):
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Warning: Loading pickled data received from untrusted sources can be
unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
path : string
File path
compression : {'infer', 'gzip', 'bz2', 'xz', 'zip', None}, default 'infer'
For on-the-fly decompression of on-disk data. If 'infer', then use
gzip, bz2, xz or zip if path is a string ending in '.gz', '.bz2', 'xz',
or 'zip' respectively, and no decompression otherwise.
Set to None for no decompression.
.. versionadded:: 0.20.0
Returns
-------
unpickled : type of object stored in file
"""
inferred_compression = _infer_compression(path, compression)
def read_wrapper(func):
# wrapper file handle open/close operation
f, fh = _get_handle(path, 'rb',
compression=inferred_compression,
is_text=False)
try:
return func(f)
finally:
for _f in fh:
_f.close()
def try_read(path, encoding=None):
# try with cPickle
# try with current pickle, if we have a Type Error then
# try with the compat pickle to handle subclass changes
# pass encoding only if its not None as py2 doesn't handle
# the param
# cpickle
# GH 6899
try:
return read_wrapper(lambda f: pkl.load(f))
except Exception:
# reg/patched pickle
try:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=False))
# compat pickle
except:
return read_wrapper(
lambda f: pc.load(f, encoding=encoding, compat=True))
try:
return try_read(path)
except:
if PY3:
return try_read(path, encoding='latin1')
raise
# compat with sparse pickle / unpickle
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
# All datetimes should be stored as M8[ns]. When unpickling with
# numpy1.6, it will read these as M8[us]. So this ensures all
# datetime64 types are read as MS[ns]
if is_datetime64_dtype(arr):
arr = arr.view(_NS_DTYPE)
return arr
| mit |
bradleypallen/keras-quora-question-pairs | keras-quora-question-pairs.py | 1 | 8028 | from __future__ import print_function
import numpy as np
import csv, datetime, time, json
from zipfile import ZipFile
from os.path import expanduser, exists
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.layers import Input, TimeDistributed, Dense, Lambda, concatenate, Dropout, BatchNormalization
from keras.layers.embeddings import Embedding
from keras.regularizers import l2
from keras.callbacks import Callback, ModelCheckpoint
from keras.utils.data_utils import get_file
from keras import backend as K
from sklearn.model_selection import train_test_split
# Initialize global variables
KERAS_DATASETS_DIR = expanduser('~/.keras/datasets/')
QUESTION_PAIRS_FILE_URL = 'http://qim.ec.quoracdn.net/quora_duplicate_questions.tsv'
QUESTION_PAIRS_FILE = 'quora_duplicate_questions.tsv'
GLOVE_ZIP_FILE_URL = 'http://nlp.stanford.edu/data/glove.840B.300d.zip'
GLOVE_ZIP_FILE = 'glove.840B.300d.zip'
GLOVE_FILE = 'glove.840B.300d.txt'
Q1_TRAINING_DATA_FILE = 'q1_train.npy'
Q2_TRAINING_DATA_FILE = 'q2_train.npy'
LABEL_TRAINING_DATA_FILE = 'label_train.npy'
WORD_EMBEDDING_MATRIX_FILE = 'word_embedding_matrix.npy'
NB_WORDS_DATA_FILE = 'nb_words.json'
MAX_NB_WORDS = 200000
MAX_SEQUENCE_LENGTH = 25
EMBEDDING_DIM = 300
MODEL_WEIGHTS_FILE = 'question_pairs_weights.h5'
VALIDATION_SPLIT = 0.1
TEST_SPLIT = 0.1
RNG_SEED = 13371447
NB_EPOCHS = 25
DROPOUT = 0.1
BATCH_SIZE = 32
OPTIMIZER = 'adam'
# If the dataset, embedding matrix and word count exist in the local directory
if exists(Q1_TRAINING_DATA_FILE) and exists(Q2_TRAINING_DATA_FILE) and exists(LABEL_TRAINING_DATA_FILE) and exists(NB_WORDS_DATA_FILE) and exists(WORD_EMBEDDING_MATRIX_FILE):
# Then load them
q1_data = np.load(open(Q1_TRAINING_DATA_FILE, 'rb'))
q2_data = np.load(open(Q2_TRAINING_DATA_FILE, 'rb'))
labels = np.load(open(LABEL_TRAINING_DATA_FILE, 'rb'))
word_embedding_matrix = np.load(open(WORD_EMBEDDING_MATRIX_FILE, 'rb'))
with open(NB_WORDS_DATA_FILE, 'r') as f:
nb_words = json.load(f)['nb_words']
else:
# Else download and extract questions pairs data
if not exists(KERAS_DATASETS_DIR + QUESTION_PAIRS_FILE):
get_file(QUESTION_PAIRS_FILE, QUESTION_PAIRS_FILE_URL)
print("Processing", QUESTION_PAIRS_FILE)
question1 = []
question2 = []
is_duplicate = []
with open(KERAS_DATASETS_DIR + QUESTION_PAIRS_FILE, encoding='utf-8') as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
for row in reader:
question1.append(row['question1'])
question2.append(row['question2'])
is_duplicate.append(row['is_duplicate'])
print('Question pairs: %d' % len(question1))
# Build tokenized word index
questions = question1 + question2
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(questions)
question1_word_sequences = tokenizer.texts_to_sequences(question1)
question2_word_sequences = tokenizer.texts_to_sequences(question2)
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
# Download and process GloVe embeddings
if not exists(KERAS_DATASETS_DIR + GLOVE_ZIP_FILE):
zipfile = ZipFile(get_file(GLOVE_ZIP_FILE, GLOVE_ZIP_FILE_URL))
zipfile.extract(GLOVE_FILE, path=KERAS_DATASETS_DIR)
print("Processing", GLOVE_FILE)
embeddings_index = {}
with open(KERAS_DATASETS_DIR + GLOVE_FILE, encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings: %d' % len(embeddings_index))
# Prepare word embedding matrix
nb_words = min(MAX_NB_WORDS, len(word_index))
word_embedding_matrix = np.zeros((nb_words + 1, EMBEDDING_DIM))
for word, i in word_index.items():
if i > MAX_NB_WORDS:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
word_embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# Prepare training data tensors
q1_data = pad_sequences(question1_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
q2_data = pad_sequences(question2_word_sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = np.array(is_duplicate, dtype=int)
print('Shape of question1 data tensor:', q1_data.shape)
print('Shape of question2 data tensor:', q2_data.shape)
print('Shape of label tensor:', labels.shape)
# Persist training and configuration data to files
np.save(open(Q1_TRAINING_DATA_FILE, 'wb'), q1_data)
np.save(open(Q2_TRAINING_DATA_FILE, 'wb'), q2_data)
np.save(open(LABEL_TRAINING_DATA_FILE, 'wb'), labels)
np.save(open(WORD_EMBEDDING_MATRIX_FILE, 'wb'), word_embedding_matrix)
with open(NB_WORDS_DATA_FILE, 'w') as f:
json.dump({'nb_words': nb_words}, f)
# Partition the dataset into train and test sets
X = np.stack((q1_data, q2_data), axis=1)
y = labels
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=TEST_SPLIT, random_state=RNG_SEED)
Q1_train = X_train[:,0]
Q2_train = X_train[:,1]
Q1_test = X_test[:,0]
Q2_test = X_test[:,1]
# Define the model
question1 = Input(shape=(MAX_SEQUENCE_LENGTH,))
question2 = Input(shape=(MAX_SEQUENCE_LENGTH,))
q1 = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[word_embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)(question1)
q1 = TimeDistributed(Dense(EMBEDDING_DIM, activation='relu'))(q1)
q1 = Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))(q1)
q2 = Embedding(nb_words + 1,
EMBEDDING_DIM,
weights=[word_embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)(question2)
q2 = TimeDistributed(Dense(EMBEDDING_DIM, activation='relu'))(q2)
q2 = Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))(q2)
merged = concatenate([q1,q2])
merged = Dense(200, activation='relu')(merged)
merged = Dropout(DROPOUT)(merged)
merged = BatchNormalization()(merged)
merged = Dense(200, activation='relu')(merged)
merged = Dropout(DROPOUT)(merged)
merged = BatchNormalization()(merged)
merged = Dense(200, activation='relu')(merged)
merged = Dropout(DROPOUT)(merged)
merged = BatchNormalization()(merged)
merged = Dense(200, activation='relu')(merged)
merged = Dropout(DROPOUT)(merged)
merged = BatchNormalization()(merged)
is_duplicate = Dense(1, activation='sigmoid')(merged)
model = Model(inputs=[question1,question2], outputs=is_duplicate)
model.compile(loss='binary_crossentropy', optimizer=OPTIMIZER, metrics=['accuracy'])
# Train the model, checkpointing weights with best validation accuracy
print("Starting training at", datetime.datetime.now())
t0 = time.time()
callbacks = [ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_acc', save_best_only=True)]
history = model.fit([Q1_train, Q2_train],
y_train,
epochs=NB_EPOCHS,
validation_split=VALIDATION_SPLIT,
verbose=2,
batch_size=BATCH_SIZE,
callbacks=callbacks)
t1 = time.time()
print("Training ended at", datetime.datetime.now())
print("Minutes elapsed: %f" % ((t1 - t0) / 60.))
# Print best validation accuracy and epoch
max_val_acc, idx = max((val, idx) for (idx, val) in enumerate(history.history['val_acc']))
print('Maximum validation accuracy = {0:.4f} (epoch {1:d})'.format(max_val_acc, idx+1))
# Evaluate the model with best validation accuracy on the test partition
model.load_weights(MODEL_WEIGHTS_FILE)
loss, accuracy = model.evaluate([Q1_test, Q2_test], y_test, verbose=0)
print('Test loss = {0:.4f}, test accuracy = {1:.4f}'.format(loss, accuracy))
| mit |
rlowrance/re-avm | chart06_make_chart_hi.py | 1 | 27390 | from __future__ import division
import collections
import math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pdb
from ColumnsTable import ColumnsTable
from columns_contain import columns_contain
import errors
from Month import Month
from Report import Report
cc = columns_contain
class ChartHReport(object):
def __init__(self, k, validation_month, ensemble_weighting, column_definitions, test):
self._column_definitions = column_definitions
self._report = Report()
self._test = test
self._header(k, validation_month, ensemble_weighting)
cd = self._column_definitions.defs_for_columns(
'description',
'mae_validation',
'mae_query',
'mare_validation',
'mare_query',
)
self._ct = ColumnsTable(columns=cd, verbose=True)
def write(self, path):
self._ct.append_legend()
for line in self._ct.iterlines():
self._report.append(line)
if self._test:
self._report.append('** TESTING: DISCARD')
self._report.write(path)
def detail_line(self, **kwds):
with_spaces = {
k: (None if self._column_definitions.replace_by_spaces(k, v) else v)
for k, v in kwds.iteritems()
}
self._ct.append_detail(**with_spaces)
def preformatted_line(self, line):
print line
self._ct.append_line(line)
def _header(self, k, validation_month, ensemble_weighting):
self._report.append('Performance of Best Models Separately and as an Ensemble')
self._report.append(' ')
self._report.append('Considering Best K = %d models' % k)
self._report.append('For validation month %s' % validation_month)
self._report.append('Ensemble weighting: %s' % ensemble_weighting)
class ChartIReport(object):
def __init__(self, column_definitions, test):
self._column_definitions = column_definitions
self._report = Report()
self._header()
self._test = test
self._appended = []
cd = self._column_definitions.defs_for_columns(
'validation_month',
'k',
'oracle_less_best',
'oracle_less_ensemble',
)
self._ct = ColumnsTable(columns=cd, verbose=True)
def write(self, path):
self._ct.append_legend()
for line in self._ct.iterlines():
self._report.append(line)
for line in self._appended:
self._report.append(line)
if self._test:
self._report.append('** TESTING: DISCARD')
self._report.write(path)
def append(self, line):
self._ct.append_line(line)
def detail_line(self, **kwds):
with_spaces = {
k: (None if self._column_definitions.replace_by_spaces(k, v) else v)
for k, v in kwds.iteritems()
}
self._ct.append_detail(**with_spaces)
def _header(self):
self._report.append('Performance of Best and Ensemble Models Relative to the Oracle')
self._report.append(' ')
# return string describing key features of the model
def short_model_description(model_description):
# build model decsription
model = model_description.model
if model == 'gb':
description = '%s(%d, %d, %s, %d, %3.2f)' % (
model,
model_description.n_months_back,
model_description.n_estimators,
model_description.max_features,
model_description.max_depth,
model_description.learning_rate,
)
elif model == 'rf':
description = '%s(%d, %d, %s, %d)' % (
model,
model_description.n_months_back,
model_description.n_estimators,
model_description.max_features,
model_description.max_depth,
)
else:
assert model == 'en', model_description
description = '%s(%f, %f)' % (
model,
model_description.alpha,
model_description.l1_ratio,
)
return description
def make_confidence_intervals(df, regret_column_name, ci):
'return ndarrays (ks, lower, upper) of confidence intervals for each value of df.k'
trace = False
if trace:
pdb.set_trace()
n_resamples = 10000
lower_percentile = 100 - ci
upper_percentile = ci
k_uniques = sorted(set(df.k))
ks = np.zeros((len(k_uniques),))
lower = np.zeros((len(k_uniques),))
upper = np.zeros((len(k_uniques),))
for i, k in enumerate(k_uniques):
for_k = df[df.k == k]
values = np.abs(for_k[regret_column_name])
sample = np.random.choice(
values,
size=n_resamples,
replace=True,
)
ks[i] = k
lower[i] = np.percentile(sample, lower_percentile)
upper[i] = np.percentile(sample, upper_percentile)
if trace:
print 'lower', lower
print 'upper', upper
pdb.set_trace()
return (ks, lower, upper)
def add_regret(df_caller, confidence_interval=None):
'mutate plt object by adding 2 regret lines'
# confindence_interval in {'only', 'add', 'skip'}
# we are not interested in the direction of the regret, just its magnitude
df = df_caller.assign( # create new data frame
abs_oracle_less_best=np.abs(df_caller.oracle_less_best),
abs_oracle_less_ensemble=np.abs(df_caller.oracle_less_ensemble),
index=df_caller.index,
)
def maybe_adjust_y_value(series, max_y):
'replace an all-zeroes value with one that plots just above the x axis'
if sum(series == 0.0) == len(series):
# all the values are zero and hence will plot on top of the x axis and will be invisible
# subsitute small positive value
fraction = ( # put the line just above the x axis
.02 if max_y < 3000 else
.01 if max_y < 6000 else
.01
)
substitute_value = fraction * max_y
return pd.Series([substitute_value] * len(series))
else:
return series
def plot_regrets(markers=True, lines=True):
'mutate plt by adding 2 regret lines and one scatter plot'
if markers:
plt.plot(
df.k,
df.abs_oracle_less_ensemble,
'b.', # blue point markers
label='abs(oracle_less_ensemble)',
)
if lines:
plt.plot(
df.k,
pd.Series([np.mean(df.abs_oracle_less_ensemble)] * len(df)),
'b-', # blue line marker
label='mean(abs(oracle_less_ensemble))',
)
plt.plot(
df.k,
maybe_adjust_y_value(df.abs_oracle_less_best, max(np.max(df.abs_oracle_less_best), np.max(df.abs_oracle_less_ensemble))),
'r-', # red line marker
label='abs(mean(oracle_less_best))',
)
def plot_confidence_intervals():
'mutate plt by adding confidence intervals for all K values for oracle_less_ensemble'
trace = False
ci = 90
ks, lower, upper = make_confidence_intervals(df, 'abs_oracle_less_ensemble', ci)
if trace:
print 'lower', lower
print 'upper', upper
print 'ks', ks
print len(ks), len(lower), len(ks)
assert len(ks) == len(lower) == len(lower)
pdb.set_trace()
plt.plot(
ks,
lower,
'bv', # blue triangle-down marker
label='%d%% ci lower bound' % ci,
)
plt.plot(
ks,
upper,
'b^', # blue triangle-up market
label='%d%% ci upper bound' % ci,
)
# main code starts here
plt.autoscale(
enable=True,
axis='both',
tight=False, # let locator and margins expand the view limits
)
if confidence_interval == 'add':
plot_regrets(markers=True, lines=True)
plot_confidence_intervals()
elif confidence_interval == 'only':
plot_regrets(markers=False, lines=True)
plot_confidence_intervals()
elif confidence_interval == 'skip':
plot_regrets(markers=True, lines=True)
else:
print 'bad confidence_interval', confidence_interval
pdb.set_trace()
def add_title(s):
'mutate plt'
plt.title(
s,
loc='right',
fontdict={
'fontsize': 'xx-small',
'style': 'italic',
},
)
def add_labels():
'mutate plt'
plt.xlabel('K')
plt.ylabel('abs(reget)')
def add_legend():
'mutate plt'
plt.legend(
loc='best',
fontsize=5,
)
def set_layout():
'mutate plt'
plt.tight_layout(
pad=0.4,
w_pad=0.5,
h_pad=1.0,
)
def make_i_plt_1(df, confidence_interval=None):
'return plt, a 1-up figure with one subplot for all the validation months'
plt.subplot(1, 1, 1) # 1 x 1 grid, draw first subplot
first_month = '200612'
last_month = '200711'
add_regret(
df[np.logical_and(
df.validation_month >= first_month,
df.validation_month <= last_month)],
confidence_interval=confidence_interval,
)
add_title('yr mnth %s through yr mnth %s' % (first_month, last_month))
add_labels()
add_legend()
set_layout()
return plt
def make_i_plt_12(i_df, confidence_interval=None):
'return plt, a 12-up figure with one subplot for each validation month'
# make the figure; imitate make_chart_a
def make_subplot(validation_month): # TODO: remove this dead code
'mutate plt by adding an axes with the two regret lines for the validation_month'
in_month = i_df[i_df.validation_month == validation_month]
oracle_less_ensemble_x = in_month.k
oracle_less_ensemble_y = in_month.oracle_less_ensemble
plt.autoscale(enable=True, axis='both', tight=True)
plt.plot(
oracle_less_ensemble_x,
oracle_less_ensemble_y,
'b.', # blue point markers
label='oracle less ensemble',
)
oracle_less_best_x = in_month.k
oracle_less_best_y = np.abs(in_month.oracle_less_best) # always the same value
if False and sum(oracle_less_best_y == 0.0) == len(oracle_less_best_y):
# all the values are zero
reset_value = 10.0 # replace 0 values with this value, so that the y value is not plotted on the x axis
xx = pd.Series([reset_value] * len(oracle_less_best_y))
oracle_less_best_y = xx
plt.plot(
oracle_less_best_x,
oracle_less_best_y,
'r-', # red with solid line
label='oracle less best',
)
plt.title(
'yr mnth %s' % validation_month,
loc='right',
fontdict={
'fontsize': 'xx-small',
'style': 'italic',
},
)
axes_number = 0
validation_months = (
'200612', '200701', '200702', '200703', '200704', '200705',
'200706', '200707', '200708', '200709', '200710', '200711',
)
row_seq = (1, 2, 3, 4)
col_seq = (1, 2, 3)
for row in row_seq:
for col in col_seq:
validation_month = validation_months[axes_number]
axes_number += 1 # count across rows
plt.subplot(len(row_seq), len(col_seq), axes_number)
add_regret(
i_df[i_df.validation_month == validation_month],
confidence_interval=confidence_interval,
)
add_title('yr mnth %s' % validation_month)
# make_subplot(validation_month)
# annotate the bottom row only
if row == 4 and col == 1:
add_labels()
if row == 4 and col == 3:
add_legend()
set_layout()
return plt
# write report files for all K values and validation months for the year 2007
def make_chart_hi(reduction, actuals, median_prices, control):
'return None'
def make_dispersion_lines(report=None, tag=None, actuals=None, estimates=None):
# append lines to report
def quartile_median(low, hi):
'return median error of actuals s.t. low <= actuals <= hi, return count of number of values in range'
mask = np.array(np.logical_and(actuals >= low, actuals <= hi), dtype=bool)
q_actuals = actuals[mask]
if len(q_actuals) == 0:
print 'no elements selected by mask', low, hi, sum(mask)
return 0.0, 1.0, sum(mask) # can't return 0.0's because if future divide by zero
q_estimates = estimates[mask]
q_abs_errors = np.abs(q_actuals - q_estimates)
q_median_error = np.median(q_abs_errors)
try:
print 'q_actuals:', q_actuals
q_median_value = np.percentile(q_actuals, 50)
except Exception as e:
pdb.set_trace()
print type(e)
print e.args
print e
pdb.set_trace()
q_median_value = 0
return q_median_error, q_median_value, sum(mask)
actuals_quartiles = np.percentile(actuals, (0, 25, 50, 75, 100))
report.preformatted_line('\nMedian Error by Price Quartile for %s\n' % tag)
for q in (0, 1, 2, 3):
q_median_error, q_median_value, count = quartile_median(
actuals_quartiles[q] + (0 if q == 0 else 1),
actuals_quartiles[q + 1] - (1 if q == 3 else 0),
)
report.preformatted_line('quartile %d (prices %8.0f to %8.0f N=%5d): median price: %8.0f median error: %8.0f error / price: %6.4f' % (
q + 1,
actuals_quartiles[q] + (0 if q == 0 else 1),
actuals_quartiles[q + 1] - (1 if q == 3 else 0),
count,
q_median_value,
q_median_error,
q_median_error / q_median_value,
))
def mae(actuals, predictions):
'return named tuple'
e = errors.errors(actuals, predictions)
mae_index = 1
return e[mae_index]
def chart_h(reduction, median_prices, actuals, k, validation_month):
'return (Report, oracle_less_best, oracle_less_ensemble)'
def median_price(month_str):
return median_prices[Month(month_str)]
print 'chart_h', k, validation_month
if k == 2 and False:
pdb.set_trace()
h = ChartHReport(k, validation_month, 'exp(-MAE/$100000)', control.column_definitions, control.test)
query_month = Month(validation_month).increment(1).as_str()
# write results for each of the k best models in the validation month
cum_weight = None
eta = 1.0
weight_scale = 200000.0 # to get weight < 1
for index in xrange(k):
# write detail line for this expert
try:
expert_key = reduction[validation_month].keys()[index]
except IndexError as e:
h.preformatted_line('IndexError: %s' % str(e))
h.preformatted_line('index: %d' % index)
h.preformatted_line('giving up on completing the chart')
return h, 1, 1
expert_results_validation_month = reduction[validation_month][expert_key]
if expert_key not in reduction[query_month]:
h.preformatted_line('expert_key not in query month')
h.preformatted_line('expert key: %s' % str(expert_key))
h.preformatted_line('query_month: %s' % query_month)
h.preformatted_line('index: %d' % index)
h.preformatted_line('giving up on completing the chart')
return h, 1, 1
expert_results_query_month = reduction[query_month][expert_key]
h.detail_line(
description='expert ranked %d: %s' % (index + 1, short_model_description(expert_key)),
mae_validation=expert_results_validation_month.mae,
mae_query=expert_results_query_month.mae,
mare_validation=expert_results_validation_month.mae / median_price(validation_month),
mare_query=expert_results_query_month.mae / median_price(query_month),
)
# computing running ensemble model prediction
weight = math.exp(- eta * expert_results_validation_month.mae / weight_scale)
if not (weight < 1):
print weight, eta, expert_results_validation_month.mae, weight_scale
pdb.set_trace()
assert weight < 1, (eta, expert_results_validation_month.mae, weight_scale)
incremental_ensemble_predictions_query = weight * expert_results_query_month.predictions
incremental_ensemble_predictions_validation = weight * expert_results_validation_month.predictions
if cum_weight is None:
cum_ensemble_predictions_query = incremental_ensemble_predictions_query
cum_ensemble_predictions_validation = incremental_ensemble_predictions_validation
cum_weight = weight
else:
cum_ensemble_predictions_query += incremental_ensemble_predictions_query
cum_ensemble_predictions_validation += incremental_ensemble_predictions_validation
cum_weight += weight
# write detail line for the ensemble
# pdb.set_trace()
h.detail_line(
description=' ',
)
if k == 10 and validation_month == '200705' and False:
print k, validation_month
pdb.set_trace()
ensemble_predictions_query = cum_ensemble_predictions_query / cum_weight
ensemble_predictions_validation = cum_ensemble_predictions_validation / cum_weight
ensemble_errors_query_mae = mae(actuals[query_month], ensemble_predictions_query)
ensemble_errors_validation_mae = mae(actuals[validation_month], ensemble_predictions_validation)
h.detail_line(
description='ensemble of best %d experts' % k,
mae_validation=ensemble_errors_validation_mae,
mae_query=ensemble_errors_query_mae,
mare_validation=ensemble_errors_validation_mae / median_price(validation_month),
mare_query=ensemble_errors_query_mae / median_price(query_month),
)
# write detail line for the oracle's model
oracle_key = reduction[query_month].keys()[0]
if oracle_key not in reduction[validation_month]:
h.preformatted_line('validation month %s missing %s' % (validation_month, str(oracle_key)))
h.preformatted_line('skipping remainder of report')
return (h, 1.0, 1.0)
oracle_results_validation_month = reduction[validation_month][oracle_key]
oracle_results_query_month = reduction[query_month][oracle_key]
h.detail_line(
description='oracle: %s' % short_model_description(oracle_key),
mae_validation=oracle_results_validation_month.mae,
mae_query=oracle_results_query_month.mae,
mare_validation=oracle_results_validation_month.mae / median_price(validation_month),
mare_query=oracle_results_query_month.mae / median_price(query_month),
)
# report differences from oracle
best_key = reduction[validation_month].keys()[0]
best_results_query_month = reduction[query_month][best_key]
mpquery = median_price(query_month)
oracle_less_best_query_month = oracle_results_query_month.mae - best_results_query_month.mae
oracle_less_ensemble_query_month = oracle_results_query_month.mae - ensemble_errors_query_mae
def iszero(name, value):
print name, type(value), value
if value == 0:
print 'zero divisor:', name, type(value), value
return True
else:
return False
h.detail_line(
description=' ',
)
h.detail_line(
description='oracle - expert ranked 1',
mae_query=oracle_less_best_query_month,
mare_query=oracle_results_query_month.mae / mpquery - best_results_query_month.mae / mpquery,
)
h.detail_line(
description='oracle - ensemble model',
mae_query=oracle_less_ensemble_query_month,
mare_query=oracle_results_query_month.mae / mpquery - ensemble_errors_query_mae / mpquery,
)
h.detail_line(
description=' ',
)
if oracle_results_query_month.mae == 0.0:
h.detail_line(description='relative regrets are infinite because oracle MAE is 0')
h.detail_line(
description='100*(oracle - expert ranked 1)/oracle',
)
h.detail_line(
description='100*(oracle - ensemble model)/oracle',
)
else:
h.detail_line(
description='100*(oracle - expert ranked 1)/oracle',
mae_query=100 * (oracle_less_best_query_month / oracle_results_query_month.mae),
)
h.detail_line(
description='100*(oracle - ensemble model)/oracle',
mae_query=100 * (oracle_less_ensemble_query_month / oracle_results_query_month.mae),
)
# dispersion of errors relative to prices
make_dispersion_lines(
report=h,
tag='ensemble',
actuals=actuals[query_month],
estimates=ensemble_predictions_query,
)
return h, oracle_less_best_query_month, oracle_less_ensemble_query_month
def median_value(value_list):
sum = 0.0
for value in value_list:
sum += value
return sum / len(value_list)
def make_hi(reduction, median_prices, actuals):
'return (dict[(k, validation_month)]Report, Report)'
# make chart h
hs = {}
comparison = {}
i_df = None
for k in control.all_k_values:
for validation_month in control.validation_months:
h, oracle_less_best, oracle_less_ensemble = chart_h(reduction, median_prices, actuals, k, validation_month)
hs[(k, validation_month)] = h
comparison[(k, validation_month)] = (oracle_less_best, oracle_less_ensemble)
new_i_df = pd.DataFrame(
data={
'k': k,
'validation_month': validation_month,
'oracle_less_best': oracle_less_best,
'oracle_less_ensemble': oracle_less_ensemble,
},
index=['%03d-%s' % (k, validation_month)],
)
i_df = new_i_df if i_df is None else i_df.append(new_i_df, verify_integrity=True)
# report I is in inverted order relative to chart h grouped_by
# make graphical report to help select the best value of k
if control.arg.locality == 'global':
def write_i_plot_12(df, path):
i_plt = make_i_plt_12(df, confidence_interval='skip')
i_plt.savefig(path)
i_plt.close()
def write_i_plot_1(df, path, confidence_interval=None):
# replace df.oralce_less_best with it's mean value
copied = df.copy()
new_value = np.mean(df.oracle_less_best)
copied.oracle_less_best = pd.Series([new_value] * len(df), index=df.index)
i_plt = make_i_plt_1(copied, confidence_interval=confidence_interval)
i_plt.savefig(path)
i_plt.close()
write_i_plot_1(i_df, control.path_out_i_all_1_skip_pdf, confidence_interval='skip')
write_i_plot_1(i_df, control.path_out_i_all_1_only_pdf, confidence_interval='only')
write_i_plot_12(i_df, control.path_out_i_all_12_pdf)
write_i_plot_12(i_df[i_df.k <= 50], control.path_out_i_le_50_12_pdf)
# create text report (this can be deleted later)
i = ChartIReport(control.column_definitions, control.test)
count = 0
sum_abs_oracle_less_best = 0
sum_abs_oracle_less_ensemble = 0
oracle_less_ensemble_by_k = collections.defaultdict(list)
for validation_month in control.validation_months:
for k in control.all_k_values:
oracle_less_best, oracle_less_ensemble = comparison[(k, validation_month)]
i.detail_line(
validation_month=validation_month,
k=k,
oracle_less_best=oracle_less_best,
oracle_less_ensemble=oracle_less_ensemble,
)
oracle_less_ensemble_by_k[k].append(oracle_less_ensemble)
count += 1
sum_abs_oracle_less_best += abs(oracle_less_best)
sum_abs_oracle_less_ensemble += abs(oracle_less_ensemble)
# make chart i part 2 (TODO: create separate chart)
i.append(' ')
i.append('Median (oracle - ensemble)')
for k in sorted(oracle_less_ensemble_by_k.keys()):
value_list = oracle_less_ensemble_by_k[k]
i.detail_line(
k=k,
oracle_less_ensemble=median_value(value_list),
)
i.append(' ')
i.append('median absolute oracle less best : %f' % (sum_abs_oracle_less_best / count))
i.append('median absolute oracle less ensemble: %f' % (sum_abs_oracle_less_ensemble / count))
return hs, i
control.timer.lap('start charts h and i')
if control.arg.locality == 'global':
hs, i = make_hi(reduction, median_prices, actuals)
# write the reports (the order of writing does not matter)
for key, report in hs.iteritems():
k, validation_month = key
report.write(control.path_out_h_template % (k, validation_month))
i.write(control.path_out_i_template)
return
elif control.arg.locality == 'city':
cities = reduction.keys() if control.arg.all else control.selected_cities
for city in cities:
city_reduction = reduction[city]
city_median_prices = median_prices[city]
city_actuals = actuals[city]
print city, len(city_reduction), city_median_prices
print 'city:', city
hs, i = make_hi(city_reduction, city_median_prices, city_actuals)
# write the reports (the order of writing does not matter)
if hs is None:
print 'no h report for city', city
continue
for key, report in hs.iteritems():
k, validation_month = key
report.write(control.path_out_h_template % (city, k, validation_month))
i.write(control.path_out_i_template % city)
return
else:
print control.arg.locality
print 'bad locality'
pdb.set_trace()
| bsd-3-clause |
jordancheah/aas | ch11-neuro/fish.py | 16 | 3438 | # coding=utf-8
# Copyright 2015 Sanford Ryza, Uri Laserson, Sean Owen and Joshua Wills
#
# See LICENSE file for further information.
# this code assumes you are working from an interactive Thunder (PySpark) shell
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
plt.ion()
##################
# data exploration
##################
# load some zebrafish brain data
path_to_images = ('path/to/thunder/python/thunder/utils/data/fish/tif-stack')
imagesRDD = tsc.loadImages(path_to_images, inputformat='tif-stack')
# explore the resulting object
print imagesRDD
print imagesRDD.rdd
print imagesRDD.first()
print imagesRDD.first()[1].shape
print imagesRDD.dims
print imagesRDD.nimages
# plot the raw data
img = imagesRDD.values().first()
plt.imshow(img[:, : ,0], interpolation='nearest', aspect='equal', cmap='gray')
# plot subsampled data
subsampled = imagesRDD.subsample((5, 5, 1))
plt.imshow(subsampled.first()[1][:, : ,0], interpolation='nearest', aspect='equal', cmap='gray')
print subsampled.dims
# reshuffle data to series representation
seriesRDD = imagesRDD.toSeries()
print seriesRDD.dims
print seriesRDD.index
print seriesRDD.count()
print seriesRDD.rdd.takeSample(False, 1, 0)[0]
print seriesRDD.max()
# distributed computation of stats
stddevRDD = seriesRDD.seriesStdev()
print stddevRDD.take(3)
print stddevRDD.dims
# collecting data locally and repacking it
repacked = stddevRDD.pack()
plt.imshow(repacked[:,:,0], interpolation='nearest', cmap='gray', aspect='equal')
print type(repacked)
print repacked.shape
# plot some of the time series themselves
plt.plot(seriesRDD.center().subset(50).T)
# distributed computatino of custom statistics
seriesRDD.apply(lambda x: x.argmin())
###############################
# Clustering fish brain regions
###############################
import numpy as np
from thunder import KMeans
seriesRDD = tsc.loadSeries('path/to/thunder/python/thunder/utils/data/fish/bin')
print seriesRDD.dims
print seriesRDD.index
normalizedRDD = seriesRDD.normalize(baseline='mean')
stddevs = (normalizedRDD
.seriesStdev()
.values()
.sample(False, 0.1, 0)
.collect())
plt.hist(stddevs, bins=20)
plt.plot(normalizedRDD.subset(50, thresh=0.1, stat='std').T)
# perform k-means on the normalized series
ks = [5, 10, 15, 20, 30, 50, 100, 200]
models = []
for k in ks:
models.append(KMeans(k=k).fit(normalizedRDD))
# define a couple functions to score the clustering quality
def model_error_1(model):
def series_error(series):
cluster_id = model.predict(series)
center = model.centers[cluster_id]
diff = center - series
return diff.dot(diff) ** 0.5
return normalizedRDD.apply(series_error).sum()
def model_error_2(model):
return 1. / model.similarity(normalizedRDD).sum()
# compute the error metrics for the different resulting clusterings
errors_1 = np.asarray(map(model_error_1, models))
errors_2 = np.asarray(map(model_error_2, models))
plt.plot(
ks, errors_1 / errors_1.sum(), 'k-o',
ks, errors_2 / errors_2.sum(), 'b:v')
# plot the best performing model
model20 = models[3]
plt.plot(model20.centers.T)
# finally, plot each brain region according to its characteristic behavior
by_cluster = model20.predict(normalizedRDD).pack()
cmap_cat = ListedColormap(sns.color_palette("hls", 10), name='from_list')
plt.imshow(by_cluster[:, :, 0], interpolation='nearest', aspect='equal', cmap='gray')
| apache-2.0 |
krzychb/rtd-test-bed | tools/tiny-test-fw/Utility/LineChart.py | 1 | 1742 | # Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import matplotlib
# fix can't draw figure with docker
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402 - matplotlib.use('Agg') need to be before this
# candidate colors
LINE_STYLE_CANDIDATE = ['b-o', 'r-o', 'k-o', 'm-o', 'c-o', 'g-o', 'y-o',
'b-s', 'r-s', 'k-s', 'm-s', 'c-s', 'g-s', 'y-s']
def draw_line_chart(file_name, title, x_label, y_label, data_list):
"""
draw line chart and save to file.
:param file_name: abs/relative file name to save chart figure
:param title: chart title
:param x_label: x-axis label
:param y_label: y-axis label
:param data_list: a list of line data.
each line is a dict of ("x-axis": list, "y-axis": list, "label": string)
"""
plt.figure(figsize=(12, 6))
plt.grid(True)
for i, data in enumerate(data_list):
plt.plot(data["x-axis"], data["y-axis"], LINE_STYLE_CANDIDATE[i], label=data["label"])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend(fontsize=12)
plt.title(title)
plt.tight_layout(pad=3, w_pad=3, h_pad=3)
plt.savefig(file_name)
plt.close()
| apache-2.0 |
glenngillen/dotfiles | .vscode/extensions/ms-python.python-2021.5.842923320/pythonFiles/lib/python/debugpy/_vendored/pydevd/pydevd.py | 1 | 142180 | '''
Entry point module (keep at root):
This module starts the debugger.
'''
import sys # @NoMove
if sys.version_info[:2] < (2, 6):
raise RuntimeError('The PyDev.Debugger requires Python 2.6 onwards to be run. If you need to use an older Python version, use an older version of the debugger.')
import atexit
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
import itertools
import os
import traceback
import weakref
import getpass as getpass_mod
import functools
try:
import pydevd_file_utils
except ImportError:
# On the first import of a pydevd module, add pydevd itself to the PYTHONPATH
# if its dependencies cannot be imported.
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import pydevd_file_utils
from _pydev_bundle import pydev_imports, pydev_log
from _pydev_bundle._pydev_filesystem_encoding import getfilesystemencoding
from _pydev_bundle.pydev_is_thread_alive import is_thread_alive
from _pydev_bundle.pydev_override import overrides
from _pydev_imps._pydev_saved_modules import thread
from _pydev_imps._pydev_saved_modules import threading
from _pydev_imps._pydev_saved_modules import time
from _pydevd_bundle import pydevd_extension_utils, pydevd_frame_utils, pydevd_constants
from _pydevd_bundle.pydevd_filtering import FilesFiltering, glob_matches_path
from _pydevd_bundle import pydevd_io, pydevd_vm_type
from _pydevd_bundle import pydevd_utils
from _pydev_bundle.pydev_console_utils import DebugConsoleStdIn
from _pydevd_bundle.pydevd_additional_thread_info import set_additional_thread_info
from _pydevd_bundle.pydevd_breakpoints import ExceptionBreakpoint, get_exception_breakpoint
from _pydevd_bundle.pydevd_comm_constants import (CMD_THREAD_SUSPEND, CMD_STEP_INTO, CMD_SET_BREAK,
CMD_STEP_INTO_MY_CODE, CMD_STEP_OVER, CMD_SMART_STEP_INTO, CMD_RUN_TO_LINE,
CMD_SET_NEXT_STATEMENT, CMD_STEP_RETURN, CMD_ADD_EXCEPTION_BREAK, CMD_STEP_RETURN_MY_CODE,
CMD_STEP_OVER_MY_CODE, constant_to_str, CMD_STEP_INTO_COROUTINE)
from _pydevd_bundle.pydevd_constants import (IS_JYTH_LESS25, get_thread_id, get_current_thread_id,
dict_keys, dict_iter_items, DebugInfoHolder, PYTHON_SUSPEND, STATE_SUSPEND, STATE_RUN, get_frame,
clear_cached_thread_id, INTERACTIVE_MODE_AVAILABLE, SHOW_DEBUG_INFO_ENV, IS_PY34_OR_GREATER, IS_PY2, NULL,
NO_FTRACE, IS_IRONPYTHON, JSON_PROTOCOL, IS_CPYTHON, HTTP_JSON_PROTOCOL, USE_CUSTOM_SYS_CURRENT_FRAMES_MAP, call_only_once,
ForkSafeLock, IGNORE_BASENAMES_STARTING_WITH, EXCEPTION_TYPE_UNHANDLED)
from _pydevd_bundle.pydevd_defaults import PydevdCustomization # Note: import alias used on pydev_monkey.
from _pydevd_bundle.pydevd_custom_frames import CustomFramesContainer, custom_frames_container_init
from _pydevd_bundle.pydevd_dont_trace_files import DONT_TRACE, PYDEV_FILE, LIB_FILE, DONT_TRACE_DIRS
from _pydevd_bundle.pydevd_extension_api import DebuggerEventHandler
from _pydevd_bundle.pydevd_frame_utils import add_exception_to_frame, remove_exception_from_frame
from _pydevd_bundle.pydevd_net_command_factory_xml import NetCommandFactory
from _pydevd_bundle.pydevd_trace_dispatch import (
trace_dispatch as _trace_dispatch, global_cache_skips, global_cache_frame_skips, fix_top_level_trace_and_get_trace_func)
from _pydevd_bundle.pydevd_utils import save_main_module, is_current_thread_main_thread
from _pydevd_frame_eval.pydevd_frame_eval_main import (
frame_eval_func, dummy_trace_dispatch)
import pydev_ipython # @UnusedImport
from _pydevd_bundle.pydevd_source_mapping import SourceMapping
from pydevd_concurrency_analyser.pydevd_concurrency_logger import ThreadingLogger, AsyncioLogger, send_concurrency_message, cur_time
from pydevd_concurrency_analyser.pydevd_thread_wrappers import wrap_threads
from pydevd_file_utils import get_abs_path_real_path_and_base_from_frame, NORM_PATHS_AND_BASE_CONTAINER
from pydevd_file_utils import get_fullname, get_package_dir
from os.path import abspath as os_path_abspath
import pydevd_tracing
from _pydevd_bundle.pydevd_comm import (InternalThreadCommand, InternalThreadCommandForAnyThread,
create_server_socket, FSNotifyThread)
from _pydevd_bundle.pydevd_comm import(InternalConsoleExec,
_queue, ReaderThread, GetGlobalDebugger, get_global_debugger,
set_global_debugger, WriterThread,
start_client, start_server, InternalGetBreakpointException, InternalSendCurrExceptionTrace,
InternalSendCurrExceptionTraceProceeded)
from _pydevd_bundle.pydevd_daemon_thread import PyDBDaemonThread, mark_as_pydevd_daemon_thread
from _pydevd_bundle.pydevd_process_net_command_json import PyDevJsonCommandProcessor
from _pydevd_bundle.pydevd_process_net_command import process_net_command
from _pydevd_bundle.pydevd_net_command import NetCommand
from _pydevd_bundle.pydevd_breakpoints import stop_on_unhandled_exception
from _pydevd_bundle.pydevd_collect_bytecode_info import collect_try_except_info, collect_return_info
from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager
from socket import SHUT_RDWR
from _pydevd_bundle.pydevd_api import PyDevdAPI
from _pydevd_bundle.pydevd_timeout import TimeoutTracker
from _pydevd_bundle.pydevd_thread_lifecycle import suspend_all_threads, mark_thread_suspended
if USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:
from _pydevd_bundle.pydevd_constants import constructed_tid_to_last_frame
__version_info__ = (2, 1, 0)
__version_info_str__ = []
for v in __version_info__:
__version_info_str__.append(str(v))
__version__ = '.'.join(__version_info_str__)
# IMPORTANT: pydevd_constants must be the 1st thing defined because it'll keep a reference to the original sys._getframe
def install_breakpointhook(pydevd_breakpointhook=None):
if pydevd_breakpointhook is None:
def pydevd_breakpointhook(*args, **kwargs):
hookname = os.getenv('PYTHONBREAKPOINT')
if (
hookname is not None
and len(hookname) > 0
and hasattr(sys, '__breakpointhook__')
and sys.__breakpointhook__ != pydevd_breakpointhook
):
sys.__breakpointhook__(*args, **kwargs)
else:
settrace(*args, **kwargs)
if sys.version_info[0:2] >= (3, 7):
# There are some choices on how to provide the breakpoint hook. Namely, we can provide a
# PYTHONBREAKPOINT which provides the import path for a method to be executed or we
# can override sys.breakpointhook.
# pydevd overrides sys.breakpointhook instead of providing an environment variable because
# it's possible that the debugger starts the user program but is not available in the
# PYTHONPATH (and would thus fail to be imported if PYTHONBREAKPOINT was set to pydevd.settrace).
# Note that the implementation still takes PYTHONBREAKPOINT in account (so, if it was provided
# by someone else, it'd still work).
sys.breakpointhook = pydevd_breakpointhook
else:
if sys.version_info[0] >= 3:
import builtins as __builtin__ # Py3 noqa
else:
import __builtin__ # noqa
# In older versions, breakpoint() isn't really available, so, install the hook directly
# in the builtins.
__builtin__.breakpoint = pydevd_breakpointhook
sys.__breakpointhook__ = pydevd_breakpointhook
# Install the breakpoint hook at import time.
install_breakpointhook()
SUPPORT_PLUGINS = not IS_JYTH_LESS25
PluginManager = None
if SUPPORT_PLUGINS:
from _pydevd_bundle.pydevd_plugin_utils import PluginManager
threadingEnumerate = threading.enumerate
threadingCurrentThread = threading.currentThread
try:
'dummy'.encode('utf-8') # Added because otherwise Jython 2.2.1 wasn't finding the encoding (if it wasn't loaded in the main thread).
except:
pass
_global_redirect_stdout_to_server = False
_global_redirect_stderr_to_server = False
file_system_encoding = getfilesystemencoding()
_CACHE_FILE_TYPE = {}
pydev_log.debug('Using GEVENT_SUPPORT: %s', pydevd_constants.SUPPORT_GEVENT)
pydev_log.debug('pydevd __file__: %s', os.path.abspath(__file__))
#=======================================================================================================================
# PyDBCommandThread
#=======================================================================================================================
class PyDBCommandThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self._py_db_command_thread_event = py_db._py_db_command_thread_event
self.setName('pydevd.CommandThread')
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
# Delay a bit this initialization to wait for the main program to start.
self._py_db_command_thread_event.wait(0.3)
if self._kill_received:
return
try:
while not self._kill_received:
try:
self.py_db.process_internal_commands()
except:
pydev_log.info('Finishing debug communication...(2)')
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.3)
except:
try:
pydev_log.debug(sys.exc_info()[0])
except:
# In interpreter shutdown many things can go wrong (any module variables may
# be None, streams can be closed, etc).
pass
# only got this error in interpreter shutdown
# pydev_log.info('Finishing debug communication...(3)')
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
# Set flag so that it can exit before the usual timeout.
self._py_db_command_thread_event.set()
#=======================================================================================================================
# CheckAliveThread
# Non-daemon thread: guarantees that all data is written even if program is finished
#=======================================================================================================================
class CheckAliveThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self.setName('pydevd.CheckAliveThread')
self.daemon = False
self._wait_event = threading.Event()
@overrides(PyDBDaemonThread._on_run)
def _on_run(self):
py_db = self.py_db
def can_exit():
with py_db._main_lock:
# Note: it's important to get the lock besides checking that it's empty (this
# means that we're not in the middle of some command processing).
writer = py_db.writer
writer_empty = writer is not None and writer.empty()
return not py_db.has_user_threads_alive() and writer_empty
try:
while not self._kill_received:
self._wait_event.wait(0.3)
if can_exit():
break
py_db.check_output_redirect()
if can_exit():
pydev_log.debug("No threads alive, finishing debug session")
py_db.dispose_and_kill_all_pydevd_threads()
except:
pydev_log.exception()
def join(self, timeout=None):
# If someone tries to join this thread, mark it to be killed.
# This is the case for CherryPy when auto-reload is turned on.
self.do_kill_pydev_thread()
PyDBDaemonThread.join(self, timeout=timeout)
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
# Set flag so that it can exit before the usual timeout.
self._wait_event.set()
class AbstractSingleNotificationBehavior(object):
'''
The basic usage should be:
# Increment the request time for the suspend.
single_notification_behavior.increment_suspend_time()
# Notify that this is a pause request (when a pause, not a breakpoint).
single_notification_behavior.on_pause()
# Mark threads to be suspended.
set_suspend(...)
# On do_wait_suspend, use notify_thread_suspended:
def do_wait_suspend(...):
with single_notification_behavior.notify_thread_suspended(thread_id):
...
'''
__slots__ = [
'_last_resume_notification_time',
'_last_suspend_notification_time',
'_lock',
'_next_request_time',
'_suspend_time_request',
'_suspended_thread_ids',
'_pause_requested',
'_py_db',
]
NOTIFY_OF_PAUSE_TIMEOUT = .5
def __init__(self, py_db):
self._py_db = weakref.ref(py_db)
self._next_request_time = partial(next, itertools.count())
self._last_suspend_notification_time = -1
self._last_resume_notification_time = -1
self._suspend_time_request = self._next_request_time()
self._lock = thread.allocate_lock()
self._suspended_thread_ids = set()
self._pause_requested = False
def send_suspend_notification(self, thread_id, stop_reason):
raise AssertionError('abstract: subclasses must override.')
def send_resume_notification(self, thread_id):
raise AssertionError('abstract: subclasses must override.')
def increment_suspend_time(self):
with self._lock:
self._suspend_time_request = self._next_request_time()
def on_pause(self):
# Upon a pause, we should force sending new suspend notifications
# if no notification is sent after some time and there's some thread already stopped.
with self._lock:
self._pause_requested = True
global_suspend_time = self._suspend_time_request
py_db = self._py_db()
if py_db is not None:
py_db.timeout_tracker.call_on_timeout(
self.NOTIFY_OF_PAUSE_TIMEOUT,
self._notify_after_timeout,
kwargs={'global_suspend_time': global_suspend_time}
)
def _notify_after_timeout(self, global_suspend_time):
with self._lock:
if self._suspended_thread_ids:
if global_suspend_time > self._last_suspend_notification_time:
self._last_suspend_notification_time = global_suspend_time
# Notify about any thread which is currently suspended.
pydev_log.info('Sending suspend notification after timeout.')
self.send_suspend_notification(next(iter(self._suspended_thread_ids)), CMD_THREAD_SUSPEND)
def on_thread_suspend(self, thread_id, stop_reason):
with self._lock:
pause_requested = self._pause_requested
if pause_requested:
# When a suspend notification is sent, reset the pause flag.
self._pause_requested = False
self._suspended_thread_ids.add(thread_id)
# CMD_THREAD_SUSPEND should always be a side-effect of a break, so, only
# issue for a CMD_THREAD_SUSPEND if a pause is pending.
if stop_reason != CMD_THREAD_SUSPEND or pause_requested:
if self._suspend_time_request > self._last_suspend_notification_time:
pydev_log.info('Sending suspend notification.')
self._last_suspend_notification_time = self._suspend_time_request
self.send_suspend_notification(thread_id, stop_reason)
else:
pydev_log.info(
'Suspend not sent (it was already sent). Last suspend % <= Last resume %s',
self._last_suspend_notification_time,
self._last_resume_notification_time,
)
else:
pydev_log.info(
'Suspend not sent because stop reason is thread suspend and pause was not requested.',
)
def on_thread_resume(self, thread_id):
# on resume (step, continue all):
with self._lock:
self._suspended_thread_ids.remove(thread_id)
if self._last_resume_notification_time < self._last_suspend_notification_time:
pydev_log.info('Sending resume notification.')
self._last_resume_notification_time = self._last_suspend_notification_time
self.send_resume_notification(thread_id)
else:
pydev_log.info(
'Resume not sent (it was already sent). Last resume %s >= Last suspend %s',
self._last_resume_notification_time,
self._last_suspend_notification_time,
)
@contextmanager
def notify_thread_suspended(self, thread_id, stop_reason):
self.on_thread_suspend(thread_id, stop_reason)
try:
yield # At this point the thread must be actually suspended.
finally:
self.on_thread_resume(thread_id)
class ThreadsSuspendedSingleNotification(AbstractSingleNotificationBehavior):
__slots__ = AbstractSingleNotificationBehavior.__slots__ + [
'multi_threads_single_notification', '_callbacks', '_callbacks_lock']
def __init__(self, py_db):
AbstractSingleNotificationBehavior.__init__(self, py_db)
# If True, pydevd will send a single notification when all threads are suspended/resumed.
self.multi_threads_single_notification = False
self._callbacks_lock = threading.Lock()
self._callbacks = []
def add_on_resumed_callback(self, callback):
with self._callbacks_lock:
self._callbacks.append(callback)
@overrides(AbstractSingleNotificationBehavior.send_resume_notification)
def send_resume_notification(self, thread_id):
py_db = self._py_db()
if py_db is not None:
py_db.writer.add_command(py_db.cmd_factory.make_thread_resume_single_notification(thread_id))
with self._callbacks_lock:
callbacks = self._callbacks
self._callbacks = []
for callback in callbacks:
callback()
@overrides(AbstractSingleNotificationBehavior.send_suspend_notification)
def send_suspend_notification(self, thread_id, stop_reason):
py_db = self._py_db()
if py_db is not None:
py_db.writer.add_command(py_db.cmd_factory.make_thread_suspend_single_notification(py_db, thread_id, stop_reason))
@overrides(AbstractSingleNotificationBehavior.notify_thread_suspended)
@contextmanager
def notify_thread_suspended(self, thread_id, stop_reason):
if self.multi_threads_single_notification:
with AbstractSingleNotificationBehavior.notify_thread_suspended(self, thread_id, stop_reason):
yield
else:
yield
class _Authentication(object):
__slots__ = ['access_token', 'client_access_token', '_authenticated', '_wrong_attempts']
def __init__(self):
# A token to be send in the command line or through the settrace api -- when such token
# is given, the first message sent to the IDE must pass the same token to authenticate.
# Note that if a disconnect is sent, the same message must be resent to authenticate.
self.access_token = None
# This token is the one that the client requires to accept a connection from pydevd
# (it's stored here and just passed back when required, it's not used internally
# for anything else).
self.client_access_token = None
self._authenticated = None
self._wrong_attempts = 0
def is_authenticated(self):
if self._authenticated is None:
return self.access_token is None
return self._authenticated
def login(self, access_token):
if self._wrong_attempts >= 10: # A user can fail to authenticate at most 10 times.
return
self._authenticated = access_token == self.access_token
if not self._authenticated:
self._wrong_attempts += 1
else:
self._wrong_attempts = 0
def logout(self):
self._authenticated = None
self._wrong_attempts = 0
class PyDB(object):
""" Main debugging class
Lots of stuff going on here:
PyDB starts two threads on startup that connect to remote debugger (RDB)
The threads continuously read & write commands to RDB.
PyDB communicates with these threads through command queues.
Every RDB command is processed by calling process_net_command.
Every PyDB net command is sent to the net by posting NetCommand to WriterThread queue
Some commands need to be executed on the right thread (suspend/resume & friends)
These are placed on the internal command queue.
"""
# Direct child pids which should not be terminated when terminating processes.
# Note: class instance because it should outlive PyDB instances.
dont_terminate_child_pids = set()
def __init__(self, set_as_global=True):
if set_as_global:
pydevd_tracing.replace_sys_set_trace_func()
self.authentication = _Authentication()
self.reader = None
self.writer = None
self._fsnotify_thread = None
self.created_pydb_daemon_threads = {}
self._waiting_for_connection_thread = None
self._on_configuration_done_event = threading.Event()
self.check_alive_thread = None
self.py_db_command_thread = None
self.quitting = None
self.cmd_factory = NetCommandFactory()
self._cmd_queue = defaultdict(_queue.Queue) # Key is thread id or '*', value is Queue
self.suspended_frames_manager = SuspendedFramesManager()
self._files_filtering = FilesFiltering()
self.timeout_tracker = TimeoutTracker(self)
# Note: when the source mapping is changed we also have to clear the file types cache
# (because if a given file is a part of the project or not may depend on it being
# defined in the source mapping).
self.source_mapping = SourceMapping(on_source_mapping_changed=self._clear_filters_caches)
# Determines whether we should terminate child processes when asked to terminate.
self.terminate_child_processes = True
# These are the breakpoints received by the PyDevdAPI. They are meant to store
# the breakpoints in the api -- its actual contents are managed by the api.
self.api_received_breakpoints = {}
# These are the breakpoints meant to be consumed during runtime.
self.breakpoints = {}
# Set communication protocol
PyDevdAPI().set_protocol(self, 0, PydevdCustomization.DEFAULT_PROTOCOL)
self.variable_presentation = PyDevdAPI.VariablePresentation()
# mtime to be raised when breakpoints change
self.mtime = 0
self.file_to_id_to_line_breakpoint = {}
self.file_to_id_to_plugin_breakpoint = {}
# Note: breakpoints dict should not be mutated: a copy should be created
# and later it should be assigned back (to prevent concurrency issues).
self.break_on_uncaught_exceptions = {}
self.break_on_caught_exceptions = {}
self.break_on_user_uncaught_exceptions = {}
self.ready_to_run = False
self._main_lock = thread.allocate_lock()
self._lock_running_thread_ids = thread.allocate_lock()
self._lock_create_fs_notify = thread.allocate_lock()
self._py_db_command_thread_event = threading.Event()
if set_as_global:
CustomFramesContainer._py_db_command_thread_event = self._py_db_command_thread_event
self.pydb_disposed = False
self._wait_for_threads_to_finish_called = False
self._wait_for_threads_to_finish_called_lock = thread.allocate_lock()
self._wait_for_threads_to_finish_called_event = threading.Event()
self.terminate_requested = False
self._disposed_lock = thread.allocate_lock()
self.signature_factory = None
self.SetTrace = pydevd_tracing.SetTrace
self.skip_on_exceptions_thrown_in_same_context = False
self.ignore_exceptions_thrown_in_lines_with_ignore_exception = True
# Suspend debugger even if breakpoint condition raises an exception.
# May be changed with CMD_PYDEVD_JSON_CONFIG.
self.skip_suspend_on_breakpoint_exception = () # By default suspend on any Exception.
self.skip_print_breakpoint_exception = () # By default print on any Exception.
# By default user can step into properties getter/setter/deleter methods
self.disable_property_trace = False
self.disable_property_getter_trace = False
self.disable_property_setter_trace = False
self.disable_property_deleter_trace = False
# this is a dict of thread ids pointing to thread ids. Whenever a command is passed to the java end that
# acknowledges that a thread was created, the thread id should be passed here -- and if at some time we do not
# find that thread alive anymore, we must remove it from this list and make the java side know that the thread
# was killed.
self._running_thread_ids = {}
# Note: also access '_enable_thread_notifications' with '_lock_running_thread_ids'
self._enable_thread_notifications = False
self._set_breakpoints_with_id = False
# This attribute holds the file-> lines which have an @IgnoreException.
self.filename_to_lines_where_exceptions_are_ignored = {}
# working with plugins (lazily initialized)
self.plugin = None
self.has_plugin_line_breaks = False
self.has_plugin_exception_breaks = False
self.thread_analyser = None
self.asyncio_analyser = None
# matplotlib support in debugger and debug console
self.mpl_in_use = False
self.mpl_hooks_in_debug_console = False
self.mpl_modules_for_patching = {}
self._filename_to_not_in_scope = {}
self.first_breakpoint_reached = False
self._exclude_filters_enabled = self._files_filtering.use_exclude_filters()
self._is_libraries_filter_enabled = self._files_filtering.use_libraries_filter()
self.is_files_filter_enabled = self._exclude_filters_enabled or self._is_libraries_filter_enabled
self.show_return_values = False
self.remove_return_values_flag = False
self.redirect_output = False
# this flag disables frame evaluation even if it's available
self.use_frame_eval = True
# If True, pydevd will send a single notification when all threads are suspended/resumed.
self._threads_suspended_single_notification = ThreadsSuspendedSingleNotification(self)
# If True a step command will do a step in one thread and will also resume all other threads.
self.stepping_resumes_all_threads = False
self._local_thread_trace_func = threading.local()
self._server_socket_ready_event = threading.Event()
self._server_socket_name = None
# Bind many locals to the debugger because upon teardown those names may become None
# in the namespace (and thus can't be relied upon unless the reference was previously
# saved).
if IS_IRONPYTHON:
# A partial() cannot be used in IronPython for sys.settrace.
def new_trace_dispatch(frame, event, arg):
return _trace_dispatch(self, frame, event, arg)
self.trace_dispatch = new_trace_dispatch
else:
self.trace_dispatch = partial(_trace_dispatch, self)
self.fix_top_level_trace_and_get_trace_func = fix_top_level_trace_and_get_trace_func
self.frame_eval_func = frame_eval_func
self.dummy_trace_dispatch = dummy_trace_dispatch
# Note: this is different from pydevd_constants.thread_get_ident because we want Jython
# to be None here because it also doesn't have threading._active.
try:
self.threading_get_ident = threading.get_ident # Python 3
self.threading_active = threading._active
except:
try:
self.threading_get_ident = threading._get_ident # Python 2 noqa
self.threading_active = threading._active
except:
self.threading_get_ident = None # Jython
self.threading_active = None
self.threading_current_thread = threading.currentThread
self.set_additional_thread_info = set_additional_thread_info
self.stop_on_unhandled_exception = stop_on_unhandled_exception
self.collect_try_except_info = collect_try_except_info
self.collect_return_info = collect_return_info
self.get_exception_breakpoint = get_exception_breakpoint
self._dont_trace_get_file_type = DONT_TRACE.get
self._dont_trace_dirs_get_file_type = DONT_TRACE_DIRS.get
self.PYDEV_FILE = PYDEV_FILE
self.LIB_FILE = LIB_FILE
self._in_project_scope_cache = {}
self._exclude_by_filter_cache = {}
self._apply_filter_cache = {}
self._ignore_system_exit_codes = set()
# DAP related
self._dap_messages_listeners = []
if set_as_global:
# Set as the global instance only after it's initialized.
set_global_debugger(self)
# Stop the tracing as the last thing before the actual shutdown for a clean exit.
atexit.register(stoptrace)
def setup_auto_reload_watcher(self, enable_auto_reload, watch_dirs, poll_target_time, exclude_patterns, include_patterns):
try:
with self._lock_create_fs_notify:
# When setting up, dispose of the previous one (if any).
if self._fsnotify_thread is not None:
self._fsnotify_thread.do_kill_pydev_thread()
self._fsnotify_thread = None
if not enable_auto_reload:
return
exclude_patterns = tuple(exclude_patterns)
include_patterns = tuple(include_patterns)
def accept_directory(absolute_filename, cache={}):
try:
return cache[absolute_filename]
except:
if absolute_filename and absolute_filename[-1] not in ('/', '\\'):
# I.e.: for directories we always end with '/' or '\\' so that
# we match exclusions such as "**/node_modules/**"
absolute_filename += os.path.sep
# First include what we want
for include_pattern in include_patterns:
if glob_matches_path(absolute_filename, include_pattern):
cache[absolute_filename] = True
return True
# Then exclude what we don't want
for exclude_pattern in exclude_patterns:
if glob_matches_path(absolute_filename, exclude_pattern):
cache[absolute_filename] = False
return False
# By default track all directories not excluded.
cache[absolute_filename] = True
return True
def accept_file(absolute_filename, cache={}):
try:
return cache[absolute_filename]
except:
# First include what we want
for include_pattern in include_patterns:
if glob_matches_path(absolute_filename, include_pattern):
cache[absolute_filename] = True
return True
# Then exclude what we don't want
for exclude_pattern in exclude_patterns:
if glob_matches_path(absolute_filename, exclude_pattern):
cache[absolute_filename] = False
return False
# By default don't track files not included.
cache[absolute_filename] = False
return False
self._fsnotify_thread = FSNotifyThread(self, PyDevdAPI(), watch_dirs)
watcher = self._fsnotify_thread.watcher
watcher.accept_directory = accept_directory
watcher.accept_file = accept_file
watcher.target_time_for_single_scan = poll_target_time
watcher.target_time_for_notification = poll_target_time
self._fsnotify_thread.start()
except:
pydev_log.exception('Error setting up auto-reload.')
def get_arg_ppid(self):
try:
setup = SetupHolder.setup
if setup:
return int(setup.get('ppid', 0))
except:
pydev_log.exception('Error getting ppid.')
return 0
def wait_for_ready_to_run(self):
while not self.ready_to_run:
# busy wait until we receive run command
self.process_internal_commands()
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(0.1)
def on_initialize(self):
'''
Note: only called when using the DAP (Debug Adapter Protocol).
'''
self._on_configuration_done_event.clear()
def on_configuration_done(self):
'''
Note: only called when using the DAP (Debug Adapter Protocol).
'''
self._on_configuration_done_event.set()
self._py_db_command_thread_event.set()
def is_attached(self):
return self._on_configuration_done_event.is_set()
def on_disconnect(self):
'''
Note: only called when using the DAP (Debug Adapter Protocol).
'''
self.authentication.logout()
self._on_configuration_done_event.clear()
def set_ignore_system_exit_codes(self, ignore_system_exit_codes):
assert isinstance(ignore_system_exit_codes, (list, tuple, set))
self._ignore_system_exit_codes = set(ignore_system_exit_codes)
def ignore_system_exit_code(self, system_exit_exc):
if hasattr(system_exit_exc, 'code'):
return system_exit_exc.code in self._ignore_system_exit_codes
else:
return system_exit_exc in self._ignore_system_exit_codes
def block_until_configuration_done(self, cancel=None):
if cancel is None:
cancel = NULL
while not cancel.is_set():
if self._on_configuration_done_event.is_set():
cancel.set() # Set cancel to prevent reuse
return
self.process_internal_commands()
self._py_db_command_thread_event.clear()
self._py_db_command_thread_event.wait(1 / 15.)
def add_fake_frame(self, thread_id, frame_id, frame):
self.suspended_frames_manager.add_fake_frame(thread_id, frame_id, frame)
def handle_breakpoint_condition(self, info, pybreakpoint, new_frame):
condition = pybreakpoint.condition
try:
if pybreakpoint.handle_hit_condition(new_frame):
return True
if not condition:
return False
return eval(condition, new_frame.f_globals, new_frame.f_locals)
except Exception as e:
if IS_PY2:
# Must be bytes on py2.
if isinstance(condition, unicode): # noqa
condition = condition.encode('utf-8')
if not isinstance(e, self.skip_print_breakpoint_exception):
sys.stderr.write('Error while evaluating expression: %s\n' % (condition,))
etype, value, tb = sys.exc_info()
traceback.print_exception(etype, value, tb.tb_next)
if not isinstance(e, self.skip_suspend_on_breakpoint_exception):
try:
# add exception_type and stacktrace into thread additional info
etype, value, tb = sys.exc_info()
error = ''.join(traceback.format_exception_only(etype, value))
stack = traceback.extract_stack(f=tb.tb_frame.f_back)
# On self.set_suspend(thread, CMD_SET_BREAK) this info will be
# sent to the client.
info.conditional_breakpoint_exception = \
('Condition:\n' + condition + '\n\nError:\n' + error, stack)
except:
pydev_log.exception()
return True
return False
finally:
etype, value, tb = None, None, None
def handle_breakpoint_expression(self, pybreakpoint, info, new_frame):
try:
try:
val = eval(pybreakpoint.expression, new_frame.f_globals, new_frame.f_locals)
except:
val = sys.exc_info()[1]
finally:
if val is not None:
info.pydev_message = str(val)
def _internal_get_file_type(self, abs_real_path_and_basename):
basename = abs_real_path_and_basename[-1]
if (
basename.startswith(IGNORE_BASENAMES_STARTING_WITH) or
abs_real_path_and_basename[0].startswith(IGNORE_BASENAMES_STARTING_WITH)
):
# Note: these are the files that are completely ignored (they aren't shown to the user
# as user nor library code as it's usually just noise in the frame stack).
return self.PYDEV_FILE
file_type = self._dont_trace_get_file_type(basename)
if file_type is not None:
return file_type
if basename.startswith('__init__.py'):
# i.e.: ignore the __init__ files inside pydevd (the other
# files are ignored just by their name).
abs_path = abs_real_path_and_basename[0]
i = max(abs_path.rfind('/'), abs_path.rfind('\\'))
if i:
abs_path = abs_path[0:i]
i = max(abs_path.rfind('/'), abs_path.rfind('\\'))
if i:
dirname = abs_path[i + 1:]
# At this point, something as:
# "my_path\_pydev_runfiles\__init__.py"
# is now "_pydev_runfiles".
return self._dont_trace_dirs_get_file_type(dirname)
return None
def dont_trace_external_files(self, abs_path):
'''
:param abs_path:
The result from get_abs_path_real_path_and_base_from_file or
get_abs_path_real_path_and_base_from_frame.
:return
True :
If files should NOT be traced.
False:
If files should be traced.
'''
# By default all external files are traced. Note: this function is expected to
# be changed for another function in PyDevdAPI.set_dont_trace_start_end_patterns.
return False
def get_file_type(self, frame, abs_real_path_and_basename=None, _cache_file_type=_CACHE_FILE_TYPE):
'''
:param abs_real_path_and_basename:
The result from get_abs_path_real_path_and_base_from_file or
get_abs_path_real_path_and_base_from_frame.
:return
_pydevd_bundle.pydevd_dont_trace_files.PYDEV_FILE:
If it's a file internal to the debugger which shouldn't be
traced nor shown to the user.
_pydevd_bundle.pydevd_dont_trace_files.LIB_FILE:
If it's a file in a library which shouldn't be traced.
None:
If it's a regular user file which should be traced.
'''
if abs_real_path_and_basename is None:
try:
# Make fast path faster!
abs_real_path_and_basename = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_frame(frame)
# Note 1: we have to take into account that we may have files as '<string>', and that in
# this case the cache key can't rely only on the filename. With the current cache, there's
# still a potential miss if 2 functions which have exactly the same content are compiled
# with '<string>', but in practice as we only separate the one from python -c from the rest
# this shouldn't be a problem in practice.
# Note 2: firstlineno added to make misses faster in the first comparison.
# Note 3: this cache key is repeated in pydevd_frame_evaluator.pyx:get_func_code_info (for
# speedups).
cache_key = (frame.f_code.co_firstlineno, abs_real_path_and_basename[0], frame.f_code)
try:
return _cache_file_type[cache_key]
except:
if abs_real_path_and_basename[0] == '<string>':
# Consider it an untraceable file unless there's no back frame (ignoring
# internal files and runpy.py).
f = frame.f_back
while f is not None:
if (self.get_file_type(f) != self.PYDEV_FILE and
pydevd_file_utils.basename(f.f_code.co_filename) not in ('runpy.py', '<string>')):
# We found some back frame that's not internal, which means we must consider
# this a library file.
# This is done because we only want to trace files as <string> if they don't
# have any back frame (which is the case for python -c ...), for all other
# cases we don't want to trace them because we can't show the source to the
# user (at least for now...).
# Note that we return as a LIB_FILE and not PYDEV_FILE because we still want
# to show it in the stack.
_cache_file_type[cache_key] = LIB_FILE
return LIB_FILE
f = f.f_back
else:
# This is a top-level file (used in python -c), so, trace it as usual... we
# still won't be able to show the sources, but some tests require this to work.
_cache_file_type[cache_key] = None
return None
file_type = self._internal_get_file_type(abs_real_path_and_basename)
if file_type is None:
if self.dont_trace_external_files(abs_real_path_and_basename[0]):
file_type = PYDEV_FILE
_cache_file_type[cache_key] = file_type
return file_type
def is_cache_file_type_empty(self):
return not _CACHE_FILE_TYPE
def get_cache_file_type(self, _cache=_CACHE_FILE_TYPE): # i.e.: Make it local.
return _cache
def get_thread_local_trace_func(self):
try:
thread_trace_func = self._local_thread_trace_func.thread_trace_func
except AttributeError:
thread_trace_func = self.trace_dispatch
return thread_trace_func
def enable_tracing(self, thread_trace_func=None, apply_to_all_threads=False):
'''
Enables tracing.
If in regular mode (tracing), will set the tracing function to the tracing
function for this thread -- by default it's `PyDB.trace_dispatch`, but after
`PyDB.enable_tracing` is called with a `thread_trace_func`, the given function will
be the default for the given thread.
:param bool apply_to_all_threads:
If True we'll set the tracing function in all threads, not only in the current thread.
If False only the tracing for the current function should be changed.
In general apply_to_all_threads should only be true if this is the first time
this function is called on a multi-threaded program (either programmatically or attach
to pid).
'''
if self.frame_eval_func is not None:
self.frame_eval_func()
pydevd_tracing.SetTrace(self.dummy_trace_dispatch)
if IS_CPYTHON and apply_to_all_threads:
pydevd_tracing.set_trace_to_threads(self.dummy_trace_dispatch)
return
if apply_to_all_threads:
# If applying to all threads, don't use the local thread trace function.
assert thread_trace_func is not None
else:
if thread_trace_func is None:
thread_trace_func = self.get_thread_local_trace_func()
else:
self._local_thread_trace_func.thread_trace_func = thread_trace_func
pydevd_tracing.SetTrace(thread_trace_func)
if IS_CPYTHON and apply_to_all_threads:
pydevd_tracing.set_trace_to_threads(thread_trace_func)
def disable_tracing(self):
pydevd_tracing.SetTrace(None)
def on_breakpoints_changed(self, removed=False):
'''
When breakpoints change, we have to re-evaluate all the assumptions we've made so far.
'''
if not self.ready_to_run:
# No need to do anything if we're still not running.
return
self.mtime += 1
if not removed:
# When removing breakpoints we can leave tracing as was, but if a breakpoint was added
# we have to reset the tracing for the existing functions to be re-evaluated.
self.set_tracing_for_untraced_contexts()
def set_tracing_for_untraced_contexts(self):
# Enable the tracing for existing threads (because there may be frames being executed that
# are currently untraced).
if IS_CPYTHON:
# Note: use sys._current_frames instead of threading.enumerate() because this way
# we also see C/C++ threads, not only the ones visible to the threading module.
tid_to_frame = sys._current_frames()
ignore_thread_ids = set(
t.ident for t in threadingEnumerate()
if getattr(t, 'is_pydev_daemon_thread', False) or getattr(t, 'pydev_do_not_trace', False)
)
for thread_id, frame in tid_to_frame.items():
if thread_id not in ignore_thread_ids:
self.set_trace_for_frame_and_parents(frame)
else:
try:
threads = threadingEnumerate()
for t in threads:
if getattr(t, 'is_pydev_daemon_thread', False) or getattr(t, 'pydev_do_not_trace', False):
continue
additional_info = set_additional_thread_info(t)
frame = additional_info.get_topmost_frame(t)
try:
if frame is not None:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
finally:
frame = None
t = None
threads = None
additional_info = None
@property
def multi_threads_single_notification(self):
return self._threads_suspended_single_notification.multi_threads_single_notification
@multi_threads_single_notification.setter
def multi_threads_single_notification(self, notify):
self._threads_suspended_single_notification.multi_threads_single_notification = notify
@property
def threads_suspended_single_notification(self):
return self._threads_suspended_single_notification
def get_plugin_lazy_init(self):
if self.plugin is None and SUPPORT_PLUGINS:
self.plugin = PluginManager(self)
return self.plugin
def in_project_scope(self, frame, absolute_filename=None):
'''
Note: in general this method should not be used (apply_files_filter should be used
in most cases as it also handles the project scope check).
:param frame:
The frame we want to check.
:param absolute_filename:
Must be the result from get_abs_path_real_path_and_base_from_frame(frame)[0] (can
be used to speed this function a bit if it's already available to the caller, but
in general it's not needed).
'''
try:
if absolute_filename is None:
try:
# Make fast path faster!
abs_real_path_and_basename = NORM_PATHS_AND_BASE_CONTAINER[frame.f_code.co_filename]
except:
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_frame(frame)
absolute_filename = abs_real_path_and_basename[0]
cache_key = (frame.f_code.co_firstlineno, absolute_filename, frame.f_code)
return self._in_project_scope_cache[cache_key]
except KeyError:
cache = self._in_project_scope_cache
try:
abs_real_path_and_basename # If we've gotten it previously, use it again.
except NameError:
abs_real_path_and_basename = get_abs_path_real_path_and_base_from_frame(frame)
# pydevd files are never considered to be in the project scope.
file_type = self.get_file_type(frame, abs_real_path_and_basename)
if file_type == self.PYDEV_FILE:
cache[cache_key] = False
elif absolute_filename == '<string>':
# Special handling for '<string>'
if file_type == self.LIB_FILE:
cache[cache_key] = False
else:
cache[cache_key] = True
elif self.source_mapping.has_mapping_entry(absolute_filename):
cache[cache_key] = True
else:
cache[cache_key] = self._files_filtering.in_project_roots(absolute_filename)
return cache[cache_key]
def in_project_roots_filename_uncached(self, absolute_filename):
return self._files_filtering.in_project_roots(absolute_filename)
def _clear_filters_caches(self):
self._in_project_scope_cache.clear()
self._exclude_by_filter_cache.clear()
self._apply_filter_cache.clear()
self._exclude_filters_enabled = self._files_filtering.use_exclude_filters()
self._is_libraries_filter_enabled = self._files_filtering.use_libraries_filter()
self.is_files_filter_enabled = self._exclude_filters_enabled or self._is_libraries_filter_enabled
def clear_dont_trace_start_end_patterns_caches(self):
# When start/end patterns are changed we must clear all caches which would be
# affected by a change in get_file_type() and reset the tracing function
# as places which were traced may no longer need to be traced and vice-versa.
self.on_breakpoints_changed()
_CACHE_FILE_TYPE.clear()
self._clear_filters_caches()
self._clear_skip_caches()
def _exclude_by_filter(self, frame, absolute_filename):
'''
:return: True if it should be excluded, False if it should be included and None
if no rule matched the given file.
:note: it'll be normalized as needed inside of this method.
'''
cache_key = (absolute_filename, frame.f_code.co_name, frame.f_code.co_firstlineno)
try:
return self._exclude_by_filter_cache[cache_key]
except KeyError:
cache = self._exclude_by_filter_cache
# pydevd files are always filtered out
if self.get_file_type(frame) == self.PYDEV_FILE:
cache[cache_key] = True
else:
module_name = None
if self._files_filtering.require_module:
module_name = frame.f_globals.get('__name__', '')
cache[cache_key] = self._files_filtering.exclude_by_filter(absolute_filename, module_name)
return cache[cache_key]
def apply_files_filter(self, frame, original_filename, force_check_project_scope):
'''
Should only be called if `self.is_files_filter_enabled == True` or `force_check_project_scope == True`.
Note that it covers both the filter by specific paths includes/excludes as well
as the check which filters out libraries if not in the project scope.
:param original_filename:
Note can either be the original filename or the absolute version of that filename.
:param force_check_project_scope:
Check that the file is in the project scope even if the global setting
is off.
:return bool:
True if it should be excluded when stepping and False if it should be
included.
'''
cache_key = (frame.f_code.co_firstlineno, original_filename, force_check_project_scope, frame.f_code)
try:
return self._apply_filter_cache[cache_key]
except KeyError:
if self.plugin is not None and (self.has_plugin_line_breaks or self.has_plugin_exception_breaks):
# If it's explicitly needed by some plugin, we can't skip it.
if not self.plugin.can_skip(self, frame):
pydev_log.debug_once('File traced (included by plugins): %s', original_filename)
self._apply_filter_cache[cache_key] = False
return False
if self._exclude_filters_enabled:
absolute_filename = pydevd_file_utils.absolute_path(original_filename)
exclude_by_filter = self._exclude_by_filter(frame, absolute_filename)
if exclude_by_filter is not None:
if exclude_by_filter:
# ignore files matching stepping filters
pydev_log.debug_once('File not traced (excluded by filters): %s', original_filename)
self._apply_filter_cache[cache_key] = True
return True
else:
pydev_log.debug_once('File traced (explicitly included by filters): %s', original_filename)
self._apply_filter_cache[cache_key] = False
return False
if (self._is_libraries_filter_enabled or force_check_project_scope) and not self.in_project_scope(frame):
# ignore library files while stepping
self._apply_filter_cache[cache_key] = True
if force_check_project_scope:
pydev_log.debug_once('File not traced (not in project): %s', original_filename)
else:
pydev_log.debug_once('File not traced (not in project - force_check_project_scope): %s', original_filename)
return True
if force_check_project_scope:
pydev_log.debug_once('File traced: %s (force_check_project_scope)', original_filename)
else:
pydev_log.debug_once('File traced: %s', original_filename)
self._apply_filter_cache[cache_key] = False
return False
def exclude_exception_by_filter(self, exception_breakpoint, trace):
if not exception_breakpoint.ignore_libraries and not self._exclude_filters_enabled:
return False
if trace is None:
return True
ignore_libraries = exception_breakpoint.ignore_libraries
exclude_filters_enabled = self._exclude_filters_enabled
if (ignore_libraries and not self.in_project_scope(trace.tb_frame)) \
or (exclude_filters_enabled and self._exclude_by_filter(
trace.tb_frame,
pydevd_file_utils.absolute_path(trace.tb_frame.f_code.co_filename))):
return True
return False
def set_project_roots(self, project_roots):
self._files_filtering.set_project_roots(project_roots)
self._clear_skip_caches()
self._clear_filters_caches()
def set_exclude_filters(self, exclude_filters):
self._files_filtering.set_exclude_filters(exclude_filters)
self._clear_skip_caches()
self._clear_filters_caches()
def set_use_libraries_filter(self, use_libraries_filter):
self._files_filtering.set_use_libraries_filter(use_libraries_filter)
self._clear_skip_caches()
self._clear_filters_caches()
def get_use_libraries_filter(self):
return self._files_filtering.use_libraries_filter()
def get_require_module_for_filters(self):
return self._files_filtering.require_module
def has_user_threads_alive(self):
for t in pydevd_utils.get_non_pydevd_threads():
if isinstance(t, PyDBDaemonThread):
pydev_log.error_once(
'Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.\n')
if is_thread_alive(t):
if not t.isDaemon() or hasattr(t, "__pydevd_main_thread"):
return True
return False
def initialize_network(self, sock, terminate_on_socket_close=True):
assert sock is not None
try:
sock.settimeout(None) # infinite, no timeouts from now on - jython does not have it
except:
pass
curr_reader = getattr(self, 'reader', None)
curr_writer = getattr(self, 'writer', None)
if curr_reader:
curr_reader.do_kill_pydev_thread()
if curr_writer:
curr_writer.do_kill_pydev_thread()
self.writer = WriterThread(sock, self, terminate_on_socket_close=terminate_on_socket_close)
self.reader = ReaderThread(
sock,
self,
PyDevJsonCommandProcessor=PyDevJsonCommandProcessor,
process_net_command=process_net_command,
terminate_on_socket_close=terminate_on_socket_close
)
self.writer.start()
self.reader.start()
time.sleep(0.1) # give threads time to start
def connect(self, host, port):
if host:
s = start_client(host, port)
else:
s = start_server(port)
self.initialize_network(s)
def create_wait_for_connection_thread(self):
if self._waiting_for_connection_thread is not None:
raise AssertionError('There is already another thread waiting for a connection.')
self._server_socket_ready_event.clear()
self._waiting_for_connection_thread = self._WaitForConnectionThread(self)
self._waiting_for_connection_thread.start()
def set_server_socket_ready(self):
self._server_socket_ready_event.set()
def wait_for_server_socket_ready(self):
self._server_socket_ready_event.wait()
@property
def dap_messages_listeners(self):
return self._dap_messages_listeners
def add_dap_messages_listener(self, listener):
self._dap_messages_listeners.append(listener)
class _WaitForConnectionThread(PyDBDaemonThread):
def __init__(self, py_db):
PyDBDaemonThread.__init__(self, py_db)
self._server_socket = None
def run(self):
host = SetupHolder.setup['client']
port = SetupHolder.setup['port']
self._server_socket = create_server_socket(host=host, port=port)
self.py_db._server_socket_name = self._server_socket.getsockname()
self.py_db.set_server_socket_ready()
while not self._kill_received:
try:
s = self._server_socket
if s is None:
return
s.listen(1)
new_socket, _addr = s.accept()
if self._kill_received:
pydev_log.info("Connection (from wait_for_attach) accepted but ignored as kill was already received.")
return
pydev_log.info("Connection (from wait_for_attach) accepted.")
reader = getattr(self.py_db, 'reader', None)
if reader is not None:
# This is needed if a new connection is done without the client properly
# sending a disconnect for the previous connection.
api = PyDevdAPI()
api.request_disconnect(self.py_db, resume_threads=False)
self.py_db.initialize_network(new_socket, terminate_on_socket_close=False)
except:
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 0:
pydev_log.exception()
pydev_log.debug("Exiting _WaitForConnectionThread: %s\n", port)
def do_kill_pydev_thread(self):
PyDBDaemonThread.do_kill_pydev_thread(self)
s = self._server_socket
if s is not None:
try:
s.close()
except:
pass
self._server_socket = None
def get_internal_queue(self, thread_id):
""" returns internal command queue for a given thread.
if new queue is created, notify the RDB about it """
if thread_id.startswith('__frame__'):
thread_id = thread_id[thread_id.rfind('|') + 1:]
return self._cmd_queue[thread_id]
def post_method_as_internal_command(self, thread_id, method, *args, **kwargs):
if thread_id == '*':
internal_cmd = InternalThreadCommandForAnyThread(thread_id, method, *args, **kwargs)
else:
internal_cmd = InternalThreadCommand(thread_id, method, *args, **kwargs)
self.post_internal_command(internal_cmd, thread_id)
if thread_id == '*':
# Notify so that the command is handled as soon as possible.
self._py_db_command_thread_event.set()
def post_internal_command(self, int_cmd, thread_id):
""" if thread_id is *, post to the '*' queue"""
queue = self.get_internal_queue(thread_id)
queue.put(int_cmd)
def enable_output_redirection(self, redirect_stdout, redirect_stderr):
global _global_redirect_stdout_to_server
global _global_redirect_stderr_to_server
_global_redirect_stdout_to_server = redirect_stdout
_global_redirect_stderr_to_server = redirect_stderr
self.redirect_output = redirect_stdout or redirect_stderr
if _global_redirect_stdout_to_server:
_init_stdout_redirect()
if _global_redirect_stderr_to_server:
_init_stderr_redirect()
def check_output_redirect(self):
global _global_redirect_stdout_to_server
global _global_redirect_stderr_to_server
if _global_redirect_stdout_to_server:
_init_stdout_redirect()
if _global_redirect_stderr_to_server:
_init_stderr_redirect()
def init_matplotlib_in_debug_console(self):
# import hook and patches for matplotlib support in debug console
from _pydev_bundle.pydev_import_hook import import_hook_manager
if is_current_thread_main_thread():
for module in dict_keys(self.mpl_modules_for_patching):
import_hook_manager.add_module_name(module, self.mpl_modules_for_patching.pop(module))
def init_matplotlib_support(self):
# prepare debugger for integration with matplotlib GUI event loop
from pydev_ipython.matplotlibtools import activate_matplotlib, activate_pylab, activate_pyplot, do_enable_gui
# enable_gui_function in activate_matplotlib should be called in main thread. Unlike integrated console,
# in the debug console we have no interpreter instance with exec_queue, but we run this code in the main
# thread and can call it directly.
class _MatplotlibHelper:
_return_control_osc = False
def return_control():
# Some of the input hooks (e.g. Qt4Agg) check return control without doing
# a single operation, so we don't return True on every
# call when the debug hook is in place to allow the GUI to run
_MatplotlibHelper._return_control_osc = not _MatplotlibHelper._return_control_osc
return _MatplotlibHelper._return_control_osc
from pydev_ipython.inputhook import set_return_control_callback
set_return_control_callback(return_control)
self.mpl_modules_for_patching = {"matplotlib": lambda: activate_matplotlib(do_enable_gui),
"matplotlib.pyplot": activate_pyplot,
"pylab": activate_pylab }
def _activate_mpl_if_needed(self):
if len(self.mpl_modules_for_patching) > 0:
if is_current_thread_main_thread(): # Note that we call only in the main thread.
for module in dict_keys(self.mpl_modules_for_patching):
if module in sys.modules:
activate_function = self.mpl_modules_for_patching.pop(module, None)
if activate_function is not None:
activate_function()
self.mpl_in_use = True
def _call_mpl_hook(self):
try:
from pydev_ipython.inputhook import get_inputhook
inputhook = get_inputhook()
if inputhook:
inputhook()
except:
pass
def notify_skipped_step_in_because_of_filters(self, frame):
self.writer.add_command(self.cmd_factory.make_skipped_step_in_because_of_filters(self, frame))
def notify_thread_created(self, thread_id, thread, use_lock=True):
if self.writer is None:
# Protect about threads being created before the communication structure is in place
# (note that they will appear later on anyways as pydevd does reconcile live/dead threads
# when processing internal commands, albeit it may take longer and in general this should
# not be usual as it's expected that the debugger is live before other threads are created).
return
with self._lock_running_thread_ids if use_lock else NULL:
if not self._enable_thread_notifications:
return
if thread_id in self._running_thread_ids:
return
additional_info = set_additional_thread_info(thread)
if additional_info.pydev_notify_kill:
# After we notify it should be killed, make sure we don't notify it's alive (on a racing condition
# this could happen as we may notify before the thread is stopped internally).
return
self._running_thread_ids[thread_id] = thread
self.writer.add_command(self.cmd_factory.make_thread_created_message(thread))
def notify_thread_not_alive(self, thread_id, use_lock=True):
""" if thread is not alive, cancel trace_dispatch processing """
if self.writer is None:
return
with self._lock_running_thread_ids if use_lock else NULL:
if not self._enable_thread_notifications:
return
thread = self._running_thread_ids.pop(thread_id, None)
if thread is None:
return
additional_info = set_additional_thread_info(thread)
was_notified = additional_info.pydev_notify_kill
if not was_notified:
additional_info.pydev_notify_kill = True
self.writer.add_command(self.cmd_factory.make_thread_killed_message(thread_id))
def set_enable_thread_notifications(self, enable):
with self._lock_running_thread_ids:
if self._enable_thread_notifications != enable:
self._enable_thread_notifications = enable
if enable:
# As it was previously disabled, we have to notify about existing threads again
# (so, clear the cache related to that).
self._running_thread_ids = {}
def process_internal_commands(self):
'''
This function processes internal commands.
'''
# If this method is being called before the debugger is ready to run we should not notify
# about threads and should only process commands sent to all threads.
ready_to_run = self.ready_to_run
dispose = False
with self._main_lock:
program_threads_alive = {}
if ready_to_run:
self.check_output_redirect()
all_threads = threadingEnumerate()
program_threads_dead = []
with self._lock_running_thread_ids:
reset_cache = not self._running_thread_ids
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False):
pass # I.e.: skip the DummyThreads created from pydev daemon threads
elif isinstance(t, PyDBDaemonThread):
pydev_log.error_once('Error in debugger: Found PyDBDaemonThread not marked with is_pydev_daemon_thread=True.')
elif is_thread_alive(t):
if reset_cache:
# Fix multiprocessing debug with breakpoints in both main and child processes
# (https://youtrack.jetbrains.com/issue/PY-17092) When the new process is created, the main
# thread in the new process already has the attribute 'pydevd_id', so the new thread doesn't
# get new id with its process number and the debugger loses access to both threads.
# Therefore we should update thread_id for every main thread in the new process.
clear_cached_thread_id(t)
thread_id = get_thread_id(t)
program_threads_alive[thread_id] = t
self.notify_thread_created(thread_id, t, use_lock=False)
# Compute and notify about threads which are no longer alive.
thread_ids = list(self._running_thread_ids.keys())
for thread_id in thread_ids:
if thread_id not in program_threads_alive:
program_threads_dead.append(thread_id)
for thread_id in program_threads_dead:
self.notify_thread_not_alive(thread_id, use_lock=False)
cmds_to_execute = []
# Without self._lock_running_thread_ids
if len(program_threads_alive) == 0 and ready_to_run:
dispose = True
else:
# Actually process the commands now (make sure we don't have a lock for _lock_running_thread_ids
# acquired at this point as it could lead to a deadlock if some command evaluated tried to
# create a thread and wait for it -- which would try to notify about it getting that lock).
curr_thread_id = get_current_thread_id(threadingCurrentThread())
if ready_to_run:
process_thread_ids = (curr_thread_id, '*')
else:
process_thread_ids = ('*',)
for thread_id in process_thread_ids:
queue = self.get_internal_queue(thread_id)
# some commands must be processed by the thread itself... if that's the case,
# we will re-add the commands to the queue after executing.
cmds_to_add_back = []
try:
while True:
int_cmd = queue.get(False)
if not self.mpl_hooks_in_debug_console and isinstance(int_cmd, InternalConsoleExec):
# add import hooks for matplotlib patches if only debug console was started
try:
self.init_matplotlib_in_debug_console()
self.mpl_in_use = True
except:
pydev_log.debug("Matplotlib support in debug console failed", traceback.format_exc())
self.mpl_hooks_in_debug_console = True
if int_cmd.can_be_executed_by(curr_thread_id):
cmds_to_execute.append(int_cmd)
else:
pydev_log.verbose("NOT processing internal command: %s ", int_cmd)
cmds_to_add_back.append(int_cmd)
except _queue.Empty: # @UndefinedVariable
# this is how we exit
for int_cmd in cmds_to_add_back:
queue.put(int_cmd)
if dispose:
# Note: must be called without the main lock to avoid deadlocks.
self.dispose_and_kill_all_pydevd_threads()
else:
# Actually execute the commands without the main lock!
for int_cmd in cmds_to_execute:
pydev_log.verbose("processing internal command: %s", int_cmd)
try:
int_cmd.do_it(self)
except:
pydev_log.exception('Error processing internal command.')
def consolidate_breakpoints(self, canonical_normalized_filename, id_to_breakpoint, breakpoints):
break_dict = {}
for _breakpoint_id, pybreakpoint in dict_iter_items(id_to_breakpoint):
break_dict[pybreakpoint.line] = pybreakpoint
breakpoints[canonical_normalized_filename] = break_dict
self._clear_skip_caches()
def _clear_skip_caches(self):
global_cache_skips.clear()
global_cache_frame_skips.clear()
def add_break_on_exception(
self,
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries=False
):
try:
eb = ExceptionBreakpoint(
exception,
condition,
expression,
notify_on_handled_exceptions,
notify_on_unhandled_exceptions,
notify_on_user_unhandled_exceptions,
notify_on_first_raise_only,
ignore_libraries
)
except ImportError:
pydev_log.critical("Error unable to add break on exception for: %s (exception could not be imported).", exception)
return None
if eb.notify_on_unhandled_exceptions:
cp = self.break_on_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.critical("Exceptions to hook on terminate: %s.", cp)
self.break_on_uncaught_exceptions = cp
if eb.notify_on_handled_exceptions:
cp = self.break_on_caught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.critical("Exceptions to hook always: %s.", cp)
self.break_on_caught_exceptions = cp
if eb.notify_on_user_unhandled_exceptions:
cp = self.break_on_user_uncaught_exceptions.copy()
cp[exception] = eb
if DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS > 0:
pydev_log.critical("Exceptions to hook on user uncaught code: %s.", cp)
self.break_on_user_uncaught_exceptions = cp
return eb
def set_suspend(self, thread, stop_reason, suspend_other_threads=False, is_pause=False, original_step_cmd=-1):
'''
:param thread:
The thread which should be suspended.
:param stop_reason:
Reason why the thread was suspended.
:param suspend_other_threads:
Whether to force other threads to be suspended (i.e.: when hitting a breakpoint
with a suspend all threads policy).
:param is_pause:
If this is a pause to suspend all threads, any thread can be considered as the 'main'
thread paused.
:param original_step_cmd:
If given we may change the stop reason to this.
'''
self._threads_suspended_single_notification.increment_suspend_time()
if is_pause:
self._threads_suspended_single_notification.on_pause()
info = mark_thread_suspended(thread, stop_reason, original_step_cmd=original_step_cmd)
if is_pause:
# Must set tracing after setting the state to suspend.
frame = info.get_topmost_frame(thread)
if frame is not None:
try:
self.set_trace_for_frame_and_parents(frame)
finally:
frame = None
# If conditional breakpoint raises any exception during evaluation send the details to the client.
if stop_reason == CMD_SET_BREAK and info.conditional_breakpoint_exception is not None:
conditional_breakpoint_exception_tuple = info.conditional_breakpoint_exception
info.conditional_breakpoint_exception = None
self._send_breakpoint_condition_exception(thread, conditional_breakpoint_exception_tuple)
if not suspend_other_threads and self.multi_threads_single_notification:
# In the mode which gives a single notification when all threads are
# stopped, stop all threads whenever a set_suspend is issued.
suspend_other_threads = True
if suspend_other_threads:
# Suspend all except the current one (which we're currently suspending already).
suspend_all_threads(self, except_thread=thread)
def _send_breakpoint_condition_exception(self, thread, conditional_breakpoint_exception_tuple):
"""If conditional breakpoint raises an exception during evaluation
send exception details to java
"""
thread_id = get_thread_id(thread)
# conditional_breakpoint_exception_tuple - should contain 2 values (exception_type, stacktrace)
if conditional_breakpoint_exception_tuple and len(conditional_breakpoint_exception_tuple) == 2:
exc_type, stacktrace = conditional_breakpoint_exception_tuple
int_cmd = InternalGetBreakpointException(thread_id, exc_type, stacktrace)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack(self, thread, arg, curr_frame_id):
"""Sends details on the exception which was caught (and where we stopped) to the java side.
arg is: exception type, description, traceback object
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTrace(thread_id, arg, curr_frame_id)
self.post_internal_command(int_cmd, thread_id)
def send_caught_exception_stack_proceeded(self, thread):
"""Sends that some thread was resumed and is no longer showing an exception trace.
"""
thread_id = get_thread_id(thread)
int_cmd = InternalSendCurrExceptionTraceProceeded(thread_id)
self.post_internal_command(int_cmd, thread_id)
self.process_internal_commands()
def send_process_created_message(self):
"""Sends a message that a new process has been created.
"""
if self.writer is None or self.cmd_factory is None:
return
cmd = self.cmd_factory.make_process_created_message()
self.writer.add_command(cmd)
def set_next_statement(self, frame, event, func_name, next_line):
stop = False
response_msg = ""
old_line = frame.f_lineno
if event == 'line' or event == 'exception':
# If we're already in the correct context, we have to stop it now, because we can act only on
# line events -- if a return was the next statement it wouldn't work (so, we have this code
# repeated at pydevd_frame).
curr_func_name = frame.f_code.co_name
# global context is set with an empty name
if curr_func_name in ('?', '<module>'):
curr_func_name = ''
if func_name == '*' or curr_func_name == func_name:
line = next_line
frame.f_trace = self.trace_dispatch
frame.f_lineno = line
stop = True
else:
response_msg = "jump is available only within the bottom frame"
return stop, old_line, response_msg
def cancel_async_evaluation(self, thread_id, frame_id):
with self._main_lock:
try:
all_threads = threadingEnumerate()
for t in all_threads:
if getattr(t, 'is_pydev_daemon_thread', False) and hasattr(t, 'cancel_event') and t.thread_id == thread_id and \
t.frame_id == frame_id:
t.cancel_event.set()
except:
pydev_log.exception()
def find_frame(self, thread_id, frame_id):
""" returns a frame on the thread that has a given frame_id """
return self.suspended_frames_manager.find_frame(thread_id, frame_id)
def do_wait_suspend(self, thread, frame, event, arg, exception_type=None): # @UnusedVariable
""" busy waits until the thread state changes to RUN
it expects thread's state as attributes of the thread.
Upon running, processes any outstanding Stepping commands.
:param exception_type:
If pausing due to an exception, its type.
"""
if USE_CUSTOM_SYS_CURRENT_FRAMES_MAP:
constructed_tid_to_last_frame[thread.ident] = sys._getframe()
self.process_internal_commands()
thread_id = get_current_thread_id(thread)
# print('do_wait_suspend %s %s %s %s %s %s (%s)' % (frame.f_lineno, frame.f_code.co_name, frame.f_code.co_filename, event, arg, constant_to_str(thread.additional_info.pydev_step_cmd), constant_to_str(thread.additional_info.pydev_original_step_cmd)))
# Send the suspend message
message = thread.additional_info.pydev_message
suspend_type = thread.additional_info.trace_suspend_type
thread.additional_info.trace_suspend_type = 'trace' # Reset to trace mode for next call.
stop_reason = thread.stop_reason
frames_list = None
if arg is not None and event == 'exception':
# arg must be the exception info (tuple(exc_type, exc, traceback))
exc_type, exc_desc, trace_obj = arg
if trace_obj is not None:
frames_list = pydevd_frame_utils.create_frames_list_from_traceback(trace_obj, frame, exc_type, exc_desc, exception_type=exception_type)
if frames_list is None:
frames_list = pydevd_frame_utils.create_frames_list_from_frame(frame)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
pydev_log.debug(
'PyDB.do_wait_suspend\nname: %s (line: %s)\n file: %s\n event: %s\n arg: %s\n step: %s (original step: %s)\n thread: %s, thread id: %s, id(thread): %s',
frame.f_code.co_name,
frame.f_lineno,
frame.f_code.co_filename,
event,
arg,
constant_to_str(thread.additional_info.pydev_step_cmd),
constant_to_str(thread.additional_info.pydev_original_step_cmd),
thread,
thread_id,
id(thread),
)
for f in frames_list:
pydev_log.debug(' Stack: %s, %s, %s', f.f_code.co_filename, f.f_code.co_name, f.f_lineno)
with self.suspended_frames_manager.track_frames(self) as frames_tracker:
frames_tracker.track(thread_id, frames_list)
cmd = frames_tracker.create_thread_suspend_command(thread_id, stop_reason, message, suspend_type)
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable
from_this_thread = []
for frame_custom_thread_id, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
if custom_frame.thread_id == thread.ident:
frames_tracker.track(thread_id, pydevd_frame_utils.create_frames_list_from_frame(custom_frame.frame), frame_custom_thread_id=frame_custom_thread_id)
# print('Frame created as thread: %s' % (frame_custom_thread_id,))
self.writer.add_command(self.cmd_factory.make_custom_frame_created_message(
frame_custom_thread_id, custom_frame.name))
self.writer.add_command(
frames_tracker.create_thread_suspend_command(frame_custom_thread_id, CMD_THREAD_SUSPEND, "", suspend_type))
from_this_thread.append(frame_custom_thread_id)
with self._threads_suspended_single_notification.notify_thread_suspended(thread_id, stop_reason):
keep_suspended = self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker)
frames_list = None
if keep_suspended:
# This means that we should pause again after a set next statement.
self._threads_suspended_single_notification.increment_suspend_time()
self.do_wait_suspend(thread, frame, event, arg, exception_type)
if DebugInfoHolder.DEBUG_TRACE_LEVEL > 2:
pydev_log.debug('Leaving PyDB.do_wait_suspend: %s (%s) %s', thread, thread_id, id(thread))
def _do_wait_suspend(self, thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker):
info = thread.additional_info
info.step_in_initial_location = None
keep_suspended = False
with self._main_lock: # Use lock to check if suspended state changed
activate_matplotlib = info.pydev_state == STATE_SUSPEND and not self.pydb_disposed
in_main_thread = is_current_thread_main_thread()
if activate_matplotlib and in_main_thread:
# before every stop check if matplotlib modules were imported inside script code
self._activate_mpl_if_needed()
while True:
with self._main_lock: # Use lock to check if suspended state changed
if info.pydev_state != STATE_SUSPEND or (self.pydb_disposed and not self.terminate_requested):
# Note: we can't exit here if terminate was requested while a breakpoint was hit.
break
if in_main_thread and self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
self.cancel_async_evaluation(get_current_thread_id(thread), str(id(frame)))
# process any stepping instructions
if info.pydev_step_cmd in (CMD_STEP_INTO, CMD_STEP_INTO_MY_CODE):
info.step_in_initial_location = (frame, frame.f_lineno)
if frame.f_code.co_flags & 0x80: # CO_COROUTINE = 0x80
# When in a coroutine we switch to CMD_STEP_INTO_COROUTINE.
info.pydev_step_cmd = CMD_STEP_INTO_COROUTINE
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
info.pydev_step_stop = None
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd in (CMD_STEP_OVER, CMD_STEP_OVER_MY_CODE, CMD_SMART_STEP_INTO):
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
elif info.pydev_step_cmd == CMD_RUN_TO_LINE or info.pydev_step_cmd == CMD_SET_NEXT_STATEMENT:
info.pydev_step_stop = None
self.set_trace_for_frame_and_parents(frame)
stop = False
response_msg = ""
try:
stop, _old_line, response_msg = self.set_next_statement(frame, event, info.pydev_func_name, info.pydev_next_line)
except ValueError as e:
response_msg = "%s" % e
finally:
seq = info.pydev_message
cmd = self.cmd_factory.make_set_next_stmnt_status_message(seq, stop, response_msg)
self.writer.add_command(cmd)
info.pydev_message = ''
if stop:
# Uninstall the current frames tracker before running it.
frames_tracker.untrack_all()
cmd = self.cmd_factory.make_thread_run_message(get_current_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_SET_NEXT_STATEMENT
keep_suspended = True
else:
# Set next did not work...
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_state = STATE_SUSPEND
thread.stop_reason = CMD_THREAD_SUSPEND
# return to the suspend state and wait for other command (without sending any
# additional notification to the client).
return self._do_wait_suspend(thread, frame, event, arg, suspend_type, from_this_thread, frames_tracker)
elif info.pydev_step_cmd in (CMD_STEP_RETURN, CMD_STEP_RETURN_MY_CODE):
back_frame = frame.f_back
force_check_project_scope = info.pydev_step_cmd == CMD_STEP_RETURN_MY_CODE
if force_check_project_scope or self.is_files_filter_enabled:
while back_frame is not None:
if self.apply_files_filter(back_frame, back_frame.f_code.co_filename, force_check_project_scope):
frame = back_frame
back_frame = back_frame.f_back
else:
break
if back_frame is not None:
# steps back to the same frame (in a return call it will stop in the 'back frame' for the user)
info.pydev_step_stop = frame
self.set_trace_for_frame_and_parents(frame)
else:
# No back frame?!? -- this happens in jython when we have some frame created from an awt event
# (the previous frame would be the awt event, but this doesn't make part of 'jython', only 'java')
# so, if we're doing a step return in this situation, it's the same as just making it run
info.pydev_step_stop = None
info.pydev_original_step_cmd = -1
info.pydev_step_cmd = -1
info.pydev_state = STATE_RUN
del frame
cmd = self.cmd_factory.make_thread_run_message(get_current_thread_id(thread), info.pydev_step_cmd)
self.writer.add_command(cmd)
with CustomFramesContainer.custom_frames_lock:
# The ones that remained on last_running must now be removed.
for frame_id in from_this_thread:
# print('Removing created frame: %s' % (frame_id,))
self.writer.add_command(self.cmd_factory.make_thread_killed_message(frame_id))
return keep_suspended
def do_stop_on_unhandled_exception(self, thread, frame, frames_byid, arg):
pydev_log.debug("We are stopping in unhandled exception.")
try:
add_exception_to_frame(frame, arg)
self.send_caught_exception_stack(thread, arg, id(frame))
try:
self.set_suspend(thread, CMD_ADD_EXCEPTION_BREAK)
self.do_wait_suspend(thread, frame, 'exception', arg, EXCEPTION_TYPE_UNHANDLED)
except:
self.send_caught_exception_stack_proceeded(thread)
except:
pydev_log.exception("We've got an error while stopping in unhandled exception: %s.", arg[0])
finally:
remove_exception_from_frame(frame)
frame = None
def set_trace_for_frame_and_parents(self, frame, **kwargs):
disable = kwargs.pop('disable', False)
assert not kwargs
while frame is not None:
# Don't change the tracing on debugger-related files
file_type = self.get_file_type(frame)
if file_type is None:
if disable:
pydev_log.debug('Disable tracing of frame: %s - %s', frame.f_code.co_filename, frame.f_code.co_name)
if frame.f_trace is not None and frame.f_trace is not NO_FTRACE:
frame.f_trace = NO_FTRACE
elif frame.f_trace is not self.trace_dispatch:
pydev_log.debug('Set tracing of frame: %s - %s', frame.f_code.co_filename, frame.f_code.co_name)
frame.f_trace = self.trace_dispatch
else:
pydev_log.debug('SKIP set tracing of frame: %s - %s', frame.f_code.co_filename, frame.f_code.co_name)
frame = frame.f_back
del frame
def _create_pydb_command_thread(self):
curr_pydb_command_thread = self.py_db_command_thread
if curr_pydb_command_thread is not None:
curr_pydb_command_thread.do_kill_pydev_thread()
new_pydb_command_thread = self.py_db_command_thread = PyDBCommandThread(self)
new_pydb_command_thread.start()
def _create_check_output_thread(self):
curr_output_checker_thread = self.check_alive_thread
if curr_output_checker_thread is not None:
curr_output_checker_thread.do_kill_pydev_thread()
check_alive_thread = self.check_alive_thread = CheckAliveThread(self)
check_alive_thread.start()
def start_auxiliary_daemon_threads(self):
self._create_pydb_command_thread()
self._create_check_output_thread()
def __wait_for_threads_to_finish(self, timeout):
try:
with self._wait_for_threads_to_finish_called_lock:
wait_for_threads_to_finish_called = self._wait_for_threads_to_finish_called
self._wait_for_threads_to_finish_called = True
if wait_for_threads_to_finish_called:
# Make sure that we wait for the previous call to be finished.
self._wait_for_threads_to_finish_called_event.wait(timeout=timeout)
else:
try:
def get_pydb_daemon_threads_to_wait():
pydb_daemon_threads = set(dict_keys(self.created_pydb_daemon_threads))
pydb_daemon_threads.discard(self.check_alive_thread)
pydb_daemon_threads.discard(threading.current_thread())
return pydb_daemon_threads
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads waiting for pydb daemon threads to finish")
started_at = time.time()
# Note: we wait for all except the check_alive_thread (which is not really a daemon
# thread and it can call this method itself).
while time.time() < started_at + timeout:
if len(get_pydb_daemon_threads_to_wait()) == 0:
break
time.sleep(1 / 10.)
else:
thread_names = [t.getName() for t in get_pydb_daemon_threads_to_wait()]
if thread_names:
pydev_log.debug("The following pydb threads may not have finished correctly: %s",
', '.join(thread_names))
finally:
self._wait_for_threads_to_finish_called_event.set()
except:
pydev_log.exception()
def dispose_and_kill_all_pydevd_threads(self, wait=True, timeout=.5):
'''
When this method is called we finish the debug session, terminate threads
and if this was registered as the global instance, unregister it -- afterwards
it should be possible to create a new instance and set as global to start
a new debug session.
:param bool wait:
If True we'll wait for the threads to be actually finished before proceeding
(based on the available timeout).
Note that this must be thread-safe and if one thread is waiting the other thread should
also wait.
'''
try:
back_frame = sys._getframe().f_back
pydev_log.debug(
'PyDB.dispose_and_kill_all_pydevd_threads (called from: File "%s", line %s, in %s)',
back_frame.f_code.co_filename, back_frame.f_lineno, back_frame.f_code.co_name
)
back_frame = None
with self._disposed_lock:
disposed = self.pydb_disposed
self.pydb_disposed = True
if disposed:
if wait:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads (already disposed - wait)")
self.__wait_for_threads_to_finish(timeout)
else:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads (already disposed - no wait)")
return
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads (first call)")
# Wait until a time when there are no commands being processed to kill the threads.
started_at = time.time()
while time.time() < started_at + timeout:
with self._main_lock:
writer = self.writer
if writer is None or writer.empty():
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads no commands being processed.")
break
else:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads timed out waiting for writer to be empty.")
pydb_daemon_threads = set(dict_keys(self.created_pydb_daemon_threads))
for t in pydb_daemon_threads:
if hasattr(t, 'do_kill_pydev_thread'):
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads killing thread: %s", t)
t.do_kill_pydev_thread()
if wait:
self.__wait_for_threads_to_finish(timeout)
else:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads: no wait")
py_db = get_global_debugger()
if py_db is self:
set_global_debugger(None)
except:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads: exception")
try:
if DebugInfoHolder.DEBUG_TRACE_LEVEL >= 3:
pydev_log.exception()
except:
pass
finally:
pydev_log.debug("PyDB.dispose_and_kill_all_pydevd_threads: finished")
def prepare_to_run(self):
''' Shared code to prepare debugging by installing traces and registering threads '''
self.patch_threads()
self.start_auxiliary_daemon_threads()
def patch_threads(self):
try:
# not available in jython!
threading.settrace(self.trace_dispatch) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import patch_thread_modules
patch_thread_modules()
def run(self, file, globals=None, locals=None, is_module=False, set_trace=True):
module_name = None
entry_point_fn = ''
if is_module:
# When launching with `python -m <module>`, python automatically adds
# an empty path to the PYTHONPATH which resolves files in the current
# directory, so, depending how pydevd itself is launched, we may need
# to manually add such an entry to properly resolve modules in the
# current directory (see: https://github.com/Microsoft/ptvsd/issues/1010).
if '' not in sys.path:
sys.path.insert(0, '')
file, _, entry_point_fn = file.partition(':')
module_name = file
filename = get_fullname(file)
if filename is None:
mod_dir = get_package_dir(module_name)
if mod_dir is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
filename = get_fullname("%s.__main__" % module_name)
if filename is None:
sys.stderr.write("No module named %s\n" % file)
return
else:
file = filename
else:
file = filename
mod_dir = os.path.dirname(filename)
main_py = os.path.join(mod_dir, '__main__.py')
main_pyc = os.path.join(mod_dir, '__main__.pyc')
if filename.endswith('__init__.pyc'):
if os.path.exists(main_pyc):
filename = main_pyc
elif os.path.exists(main_py):
filename = main_py
elif filename.endswith('__init__.py'):
if os.path.exists(main_pyc) and not os.path.exists(main_py):
filename = main_pyc
elif os.path.exists(main_py):
filename = main_py
sys.argv[0] = filename
if os.path.isdir(file):
new_target = os.path.join(file, '__main__.py')
if os.path.isfile(new_target):
file = new_target
m = None
if globals is None:
m = save_main_module(file, 'pydevd')
globals = m.__dict__
try:
globals['__builtins__'] = __builtins__
except NameError:
pass # Not there on Jython...
if locals is None:
locals = globals
# Predefined (writable) attributes: __name__ is the module's name;
# __doc__ is the module's documentation string, or None if unavailable;
# __file__ is the pathname of the file from which the module was loaded,
# if it was loaded from a file. The __file__ attribute is not present for
# C modules that are statically linked into the interpreter; for extension modules
# loaded dynamically from a shared library, it is the pathname of the shared library file.
# I think this is an ugly hack, bug it works (seems to) for the bug that says that sys.path should be the same in
# debug and run.
if sys.path[0] != '' and m is not None and m.__file__.startswith(sys.path[0]):
# print >> sys.stderr, 'Deleting: ', sys.path[0]
del sys.path[0]
if not is_module:
# now, the local directory has to be added to the pythonpath
# sys.path.insert(0, os.getcwd())
# Changed: it's not the local directory, but the directory of the file launched
# The file being run must be in the pythonpath (even if it was not before)
sys.path.insert(0, os.path.split(os_path_abspath(file))[0])
if set_trace:
self.wait_for_ready_to_run()
# call prepare_to_run when we already have all information about breakpoints
self.prepare_to_run()
t = threadingCurrentThread()
thread_id = get_current_thread_id(t)
if self.thread_analyser is not None:
wrap_threads()
self.thread_analyser.set_start_time(cur_time())
send_concurrency_message("threading_event", 0, t.getName(), thread_id, "thread", "start", file, 1, None, parent=thread_id)
if self.asyncio_analyser is not None:
# we don't have main thread in asyncio graph, so we should add a fake event
send_concurrency_message("asyncio_event", 0, "Task", "Task", "thread", "stop", file, 1, frame=None, parent=None)
try:
if INTERACTIVE_MODE_AVAILABLE:
self.init_matplotlib_support()
except:
sys.stderr.write("Matplotlib support in debugger failed\n")
pydev_log.exception()
if hasattr(sys, 'exc_clear'):
# we should clean exception information in Python 2, before user's code execution
sys.exc_clear()
# Notify that the main thread is created.
self.notify_thread_created(thread_id, t)
# Note: important: set the tracing right before calling _exec.
if set_trace:
self.enable_tracing()
return self._exec(is_module, entry_point_fn, module_name, file, globals, locals)
def _exec(self, is_module, entry_point_fn, module_name, file, globals, locals):
'''
This function should have frames tracked by unhandled exceptions (the `_exec` name is important).
'''
if not is_module:
pydev_imports.execfile(file, globals, locals) # execute the script
else:
# treat ':' as a separator between module and entry point function
# if there is no entry point we run we same as with -m switch. Otherwise we perform
# an import and execute the entry point
if entry_point_fn:
mod = __import__(module_name, level=0, fromlist=[entry_point_fn], globals=globals, locals=locals)
func = getattr(mod, entry_point_fn)
func()
else:
# Run with the -m switch
import runpy
if hasattr(runpy, '_run_module_as_main'):
# Newer versions of Python actually use this when the -m switch is used.
if sys.version_info[:2] <= (2, 6):
runpy._run_module_as_main(module_name, set_argv0=False)
else:
runpy._run_module_as_main(module_name, alter_argv=False)
else:
runpy.run_module(module_name)
return globals
def wait_for_commands(self, globals):
self._activate_mpl_if_needed()
thread = threading.currentThread()
from _pydevd_bundle import pydevd_frame_utils
frame = pydevd_frame_utils.Frame(None, -1, pydevd_frame_utils.FCode("Console",
os.path.abspath(os.path.dirname(__file__))), globals, globals)
thread_id = get_current_thread_id(thread)
self.add_fake_frame(thread_id, id(frame), frame)
cmd = self.cmd_factory.make_show_console_message(self, thread_id, frame)
if self.writer is not None:
self.writer.add_command(cmd)
while True:
if self.mpl_in_use:
# call input hooks if only matplotlib is in use
self._call_mpl_hook()
self.process_internal_commands()
time.sleep(0.01)
class IDAPMessagesListener(object):
def before_send(self, message_as_dict):
'''
Called just before a message is sent to the IDE.
:type message_as_dict: dict
'''
def after_receive(self, message_as_dict):
'''
Called just after a message is received from the IDE.
:type message_as_dict: dict
'''
def add_dap_messages_listener(dap_messages_listener):
'''
Adds a listener for the DAP (debug adapter protocol) messages.
:type dap_messages_listener: IDAPMessagesListener
:note: messages from the xml backend are not notified through this API.
:note: the notifications are sent from threads and they are not synchronized (so,
it's possible that a message is sent and received from different threads at the same time).
'''
py_db = get_global_debugger()
if py_db is None:
raise AssertionError('PyDB is still not setup.')
py_db.add_dap_messages_listener(dap_messages_listener)
def send_json_message(msg):
'''
API to send some custom json message.
:param dict|pydevd_schema.BaseSchema msg:
The custom message to be sent.
:return bool:
True if the message was added to the queue to be sent and False otherwise.
'''
py_db = get_global_debugger()
if py_db is None:
return False
writer = py_db.writer
if writer is None:
return False
cmd = NetCommand(-1, 0, msg, is_json=True)
writer.add_command(cmd)
return True
def set_debug(setup):
setup['DEBUG_RECORD_SOCKET_READS'] = True
setup['DEBUG_TRACE_BREAKPOINTS'] = 1
setup['DEBUG_TRACE_LEVEL'] = 3
def enable_qt_support(qt_support_mode):
from _pydev_bundle import pydev_monkey_qt
pydev_monkey_qt.patch_qt(qt_support_mode)
def start_dump_threads_thread(filename_template, timeout, recurrent):
'''
Helper to dump threads after a timeout.
:param filename_template:
A template filename, such as 'c:/temp/thread_dump_%s.txt', where the %s will
be replaced by the time for the dump.
:param timeout:
The timeout (in seconds) for the dump.
:param recurrent:
If True we'll keep on doing thread dumps.
'''
assert filename_template.count('%s') == 1, \
'Expected one %%s to appear in: %s' % (filename_template,)
def _threads_on_timeout():
try:
while True:
time.sleep(timeout)
filename = filename_template % (time.time(),)
try:
os.makedirs(os.path.dirname(filename))
except Exception:
pass
with open(filename, 'w') as stream:
dump_threads(stream)
if not recurrent:
return
except Exception:
pydev_log.exception()
t = threading.Thread(target=_threads_on_timeout)
mark_as_pydevd_daemon_thread(t)
t.start()
def dump_threads(stream=None):
'''
Helper to dump thread info (default is printing to stderr).
'''
pydevd_utils.dump_threads(stream)
def usage(doExit=0):
sys.stdout.write('Usage:\n')
sys.stdout.write('pydevd.py --port N [(--client hostname) | --server] --file executable [file_options]\n')
if doExit:
sys.exit(0)
def _init_stdout_redirect():
pydevd_io.redirect_stream_to_pydb_io_messages(std='stdout')
def _init_stderr_redirect():
pydevd_io.redirect_stream_to_pydb_io_messages(std='stderr')
def _enable_attach(
address,
dont_trace_start_patterns=(),
dont_trace_end_patterns=(),
patch_multiprocessing=False,
access_token=None,
client_access_token=None,
):
'''
Starts accepting connections at the given host/port. The debugger will not be initialized nor
configured, it'll only start accepting connections (and will have the tracing setup in this
thread).
Meant to be used with the DAP (Debug Adapter Protocol) with _wait_for_attach().
:param address: (host, port)
:type address: tuple(str, int)
'''
host = address[0]
port = int(address[1])
if SetupHolder.setup is not None:
if port != SetupHolder.setup['port']:
raise AssertionError('Unable to listen in port: %s (already listening in port: %s)' % (port, SetupHolder.setup['port']))
settrace(
host=host,
port=port,
suspend=False,
wait_for_ready_to_run=False,
block_until_connected=False,
dont_trace_start_patterns=dont_trace_start_patterns,
dont_trace_end_patterns=dont_trace_end_patterns,
patch_multiprocessing=patch_multiprocessing,
access_token=access_token,
client_access_token=client_access_token,
)
py_db = get_global_debugger()
py_db.wait_for_server_socket_ready()
return py_db._server_socket_name
def _wait_for_attach(cancel=None):
'''
Meant to be called after _enable_attach() -- the current thread will only unblock after a
connection is in place and the DAP (Debug Adapter Protocol) sends the ConfigurationDone
request.
'''
py_db = get_global_debugger()
if py_db is None:
raise AssertionError('Debugger still not created. Please use _enable_attach() before using _wait_for_attach().')
py_db.block_until_configuration_done(cancel=cancel)
def _is_attached():
'''
Can be called any time to check if the connection was established and the DAP (Debug Adapter Protocol) has sent
the ConfigurationDone request.
'''
py_db = get_global_debugger()
return (py_db is not None) and py_db.is_attached()
#=======================================================================================================================
# settrace
#=======================================================================================================================
def settrace(
host=None,
stdout_to_server=False,
stderr_to_server=False,
port=5678,
suspend=True,
trace_only_current_thread=False,
overwrite_prev_trace=False,
patch_multiprocessing=False,
stop_at_frame=None,
block_until_connected=True,
wait_for_ready_to_run=True,
dont_trace_start_patterns=(),
dont_trace_end_patterns=(),
access_token=None,
client_access_token=None,
notify_stdin=True,
**kwargs
):
'''Sets the tracing function with the pydev debug function and initializes needed facilities.
:param host: the user may specify another host, if the debug server is not in the same machine (default is the local
host)
:param stdout_to_server: when this is true, the stdout is passed to the debug server
:param stderr_to_server: when this is true, the stderr is passed to the debug server
so that they are printed in its console and not in this process console.
:param port: specifies which port to use for communicating with the server (note that the server must be started
in the same port). @note: currently it's hard-coded at 5678 in the client
:param suspend: whether a breakpoint should be emulated as soon as this function is called.
:param trace_only_current_thread: determines if only the current thread will be traced or all current and future
threads will also have the tracing enabled.
:param overwrite_prev_trace: deprecated
:param patch_multiprocessing: if True we'll patch the functions which create new processes so that launched
processes are debugged.
:param stop_at_frame: if passed it'll stop at the given frame, otherwise it'll stop in the function which
called this method.
:param wait_for_ready_to_run: if True settrace will block until the ready_to_run flag is set to True,
otherwise, it'll set ready_to_run to True and this function won't block.
Note that if wait_for_ready_to_run == False, there are no guarantees that the debugger is synchronized
with what's configured in the client (IDE), the only guarantee is that when leaving this function
the debugger will be already connected.
:param dont_trace_start_patterns: if set, then any path that starts with one fo the patterns in the collection
will not be traced
:param dont_trace_end_patterns: if set, then any path that ends with one fo the patterns in the collection
will not be traced
:param access_token: token to be sent from the client (i.e.: IDE) to the debugger when a connection
is established (verified by the debugger).
:param client_access_token: token to be sent from the debugger to the client (i.e.: IDE) when
a connection is established (verified by the client).
:param notify_stdin:
If True sys.stdin will be patched to notify the client when a message is requested
from the IDE. This is done so that when reading the stdin the client is notified.
Clients may need this to know when something that is being written should be interpreted
as an input to the process or as a command to be evaluated.
Note that parallel-python has issues with this (because it tries to assert that sys.stdin
is of a given type instead of just checking that it has what it needs).
'''
stdout_to_server = stdout_to_server or kwargs.get('stdoutToServer', False) # Backward compatibility
stderr_to_server = stderr_to_server or kwargs.get('stderrToServer', False) # Backward compatibility
# Internal use (may be used to set the setup info directly for subprocesess).
__setup_holder__ = kwargs.get('__setup_holder__')
with _set_trace_lock:
_locked_settrace(
host,
stdout_to_server,
stderr_to_server,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
block_until_connected,
wait_for_ready_to_run,
dont_trace_start_patterns,
dont_trace_end_patterns,
access_token,
client_access_token,
__setup_holder__=__setup_holder__,
notify_stdin=notify_stdin,
)
_set_trace_lock = ForkSafeLock()
def _locked_settrace(
host,
stdout_to_server,
stderr_to_server,
port,
suspend,
trace_only_current_thread,
patch_multiprocessing,
stop_at_frame,
block_until_connected,
wait_for_ready_to_run,
dont_trace_start_patterns,
dont_trace_end_patterns,
access_token,
client_access_token,
__setup_holder__,
notify_stdin,
):
if patch_multiprocessing:
try:
from _pydev_bundle import pydev_monkey
except:
pass
else:
pydev_monkey.patch_new_process_functions()
if host is None:
from _pydev_bundle import pydev_localhost
host = pydev_localhost.get_localhost()
global _global_redirect_stdout_to_server
global _global_redirect_stderr_to_server
py_db = get_global_debugger()
if __setup_holder__:
SetupHolder.setup = __setup_holder__
if py_db is None:
py_db = PyDB()
pydevd_vm_type.setup_type()
if SetupHolder.setup is None:
setup = {
'client': host, # dispatch expects client to be set to the host address when server is False
'server': False,
'port': int(port),
'multiprocess': patch_multiprocessing,
'skip-notify-stdin': not notify_stdin,
}
SetupHolder.setup = setup
if access_token is not None:
py_db.authentication.access_token = access_token
SetupHolder.setup['access-token'] = access_token
if client_access_token is not None:
py_db.authentication.client_access_token = client_access_token
SetupHolder.setup['client-access-token'] = client_access_token
if block_until_connected:
py_db.connect(host, port) # Note: connect can raise error.
else:
# Create a dummy writer and wait for the real connection.
py_db.writer = WriterThread(NULL, py_db, terminate_on_socket_close=False)
py_db.create_wait_for_connection_thread()
if dont_trace_start_patterns or dont_trace_end_patterns:
PyDevdAPI().set_dont_trace_start_end_patterns(py_db, dont_trace_start_patterns, dont_trace_end_patterns)
_global_redirect_stdout_to_server = stdout_to_server
_global_redirect_stderr_to_server = stderr_to_server
if _global_redirect_stdout_to_server:
_init_stdout_redirect()
if _global_redirect_stderr_to_server:
_init_stderr_redirect()
if notify_stdin:
patch_stdin()
t = threadingCurrentThread()
additional_info = set_additional_thread_info(t)
if not wait_for_ready_to_run:
py_db.ready_to_run = True
py_db.wait_for_ready_to_run()
py_db.start_auxiliary_daemon_threads()
if trace_only_current_thread:
py_db.enable_tracing()
else:
# Trace future threads.
py_db.patch_threads()
py_db.enable_tracing(py_db.trace_dispatch, apply_to_all_threads=True)
# As this is the first connection, also set tracing for any untraced threads
py_db.set_tracing_for_untraced_contexts()
py_db.set_trace_for_frame_and_parents(get_frame().f_back)
with CustomFramesContainer.custom_frames_lock: # @UndefinedVariable
for _frameId, custom_frame in dict_iter_items(CustomFramesContainer.custom_frames):
py_db.set_trace_for_frame_and_parents(custom_frame.frame)
else:
# ok, we're already in debug mode, with all set, so, let's just set the break
if access_token is not None:
py_db.authentication.access_token = access_token
if client_access_token is not None:
py_db.authentication.client_access_token = client_access_token
py_db.set_trace_for_frame_and_parents(get_frame().f_back)
t = threadingCurrentThread()
additional_info = set_additional_thread_info(t)
if trace_only_current_thread:
py_db.enable_tracing()
else:
# Trace future threads.
py_db.patch_threads()
py_db.enable_tracing(py_db.trace_dispatch, apply_to_all_threads=True)
# Suspend as the last thing after all tracing is in place.
if suspend:
if stop_at_frame is not None:
# If the step was set we have to go to run state and
# set the proper frame for it to stop.
additional_info.pydev_state = STATE_RUN
additional_info.pydev_original_step_cmd = CMD_STEP_OVER
additional_info.pydev_step_cmd = CMD_STEP_OVER
additional_info.pydev_step_stop = stop_at_frame
additional_info.suspend_type = PYTHON_SUSPEND
else:
# Ask to break as soon as possible.
py_db.set_suspend(t, CMD_SET_BREAK)
def stoptrace():
pydev_log.debug("pydevd.stoptrace()")
pydevd_tracing.restore_sys_set_trace_func()
sys.settrace(None)
try:
# not available in jython!
threading.settrace(None) # for all future threads
except:
pass
from _pydev_bundle.pydev_monkey import undo_patch_thread_modules
undo_patch_thread_modules()
# Either or both standard streams can be closed at this point,
# in which case flush() will fail.
try:
sys.stdout.flush()
except:
pass
try:
sys.stderr.flush()
except:
pass
py_db = get_global_debugger()
if py_db is not None:
py_db.dispose_and_kill_all_pydevd_threads()
class Dispatcher(object):
def __init__(self):
self.port = None
def connect(self, host, port):
self.host = host
self.port = port
self.client = start_client(self.host, self.port)
self.reader = DispatchReader(self)
self.reader.pydev_do_not_trace = False # we run reader in the same thread so we don't want to loose tracing
self.reader.run()
def close(self):
try:
self.reader.do_kill_pydev_thread()
except:
pass
class DispatchReader(ReaderThread):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
ReaderThread.__init__(
self,
get_global_debugger(),
self.dispatcher.client,
PyDevJsonCommandProcessor=PyDevJsonCommandProcessor,
process_net_command=process_net_command,
)
@overrides(ReaderThread._on_run)
def _on_run(self):
dummy_thread = threading.currentThread()
dummy_thread.is_pydev_daemon_thread = False
return ReaderThread._on_run(self)
@overrides(PyDBDaemonThread.do_kill_pydev_thread)
def do_kill_pydev_thread(self):
if not self._kill_received:
ReaderThread.do_kill_pydev_thread(self)
try:
self.sock.shutdown(SHUT_RDWR)
except:
pass
try:
self.sock.close()
except:
pass
def process_command(self, cmd_id, seq, text):
if cmd_id == 99:
self.dispatcher.port = int(text)
self._kill_received = True
DISPATCH_APPROACH_NEW_CONNECTION = 1 # Used by PyDev
DISPATCH_APPROACH_EXISTING_CONNECTION = 2 # Used by PyCharm
DISPATCH_APPROACH = DISPATCH_APPROACH_NEW_CONNECTION
def dispatch():
setup = SetupHolder.setup
host = setup['client']
port = setup['port']
if DISPATCH_APPROACH == DISPATCH_APPROACH_EXISTING_CONNECTION:
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
port = dispatcher.port
finally:
dispatcher.close()
return host, port
def settrace_forked(setup_tracing=True):
'''
When creating a fork from a process in the debugger, we need to reset the whole debugger environment!
'''
from _pydevd_bundle.pydevd_constants import GlobalDebuggerHolder
py_db = GlobalDebuggerHolder.global_dbg
if py_db is not None:
py_db.created_pydb_daemon_threads = {} # Just making sure we won't touch those (paused) threads.
py_db = None
GlobalDebuggerHolder.global_dbg = None
threading.current_thread().additional_info = None
# Make sure that we keep the same access tokens for subprocesses started through fork.
setup = SetupHolder.setup
if setup is None:
setup = {}
else:
# i.e.: Get the ppid at this point as it just changed.
# If we later do an exec() it should remain the same ppid.
setup[pydevd_constants.ARGUMENT_PPID] = PyDevdAPI().get_ppid()
access_token = setup.get('access-token')
client_access_token = setup.get('client-access-token')
if setup_tracing:
from _pydevd_frame_eval.pydevd_frame_eval_main import clear_thread_local_info
host, port = dispatch()
import pydevd_tracing
pydevd_tracing.restore_sys_set_trace_func()
if setup_tracing:
if port is not None:
custom_frames_container_init()
if clear_thread_local_info is not None:
clear_thread_local_info()
settrace(
host,
port=port,
suspend=False,
trace_only_current_thread=False,
overwrite_prev_trace=True,
patch_multiprocessing=True,
access_token=access_token,
client_access_token=client_access_token,
)
@contextmanager
def skip_subprocess_arg_patch():
'''
May be used to skip the monkey-patching that pydevd does to
skip changing arguments to embed the debugger into child processes.
i.e.:
with pydevd.skip_subprocess_arg_patch():
subprocess.call(...)
'''
from _pydev_bundle import pydev_monkey
with pydev_monkey.skip_subprocess_arg_patch():
yield
def add_dont_terminate_child_pid(pid):
'''
May be used to ask pydevd to skip the termination of some process
when it's asked to terminate (debug adapter protocol only).
:param int pid:
The pid to be ignored.
i.e.:
process = subprocess.Popen(...)
pydevd.add_dont_terminate_child_pid(process.pid)
'''
py_db = get_global_debugger()
if py_db is not None:
py_db.dont_terminate_child_pids.add(pid)
class SetupHolder:
setup = None
def apply_debugger_options(setup_options):
"""
:type setup_options: dict[str, bool]
"""
default_options = {'save-signatures': False, 'qt-support': ''}
default_options.update(setup_options)
setup_options = default_options
debugger = get_global_debugger()
if setup_options['save-signatures']:
if pydevd_vm_type.get_vm_type() == pydevd_vm_type.PydevdVmType.JYTHON:
sys.stderr.write("Collecting run-time type information is not supported for Jython\n")
else:
# Only import it if we're going to use it!
from _pydevd_bundle.pydevd_signature import SignatureFactory
debugger.signature_factory = SignatureFactory()
if setup_options['qt-support']:
enable_qt_support(setup_options['qt-support'])
@call_only_once
def patch_stdin():
_internal_patch_stdin(None, sys, getpass_mod)
def _internal_patch_stdin(py_db=None, sys=None, getpass_mod=None):
'''
Note: don't use this function directly, use `patch_stdin()` instead.
(this function is only meant to be used on test-cases to avoid patching the actual globals).
'''
# Patch stdin so that we notify when readline() is called.
original_sys_stdin = sys.stdin
debug_console_stdin = DebugConsoleStdIn(py_db, original_sys_stdin)
sys.stdin = debug_console_stdin
_original_getpass = getpass_mod.getpass
@functools.wraps(_original_getpass)
def getpass(*args, **kwargs):
with DebugConsoleStdIn.notify_input_requested(debug_console_stdin):
try:
curr_stdin = sys.stdin
if curr_stdin is debug_console_stdin:
sys.stdin = original_sys_stdin
return _original_getpass(*args, **kwargs)
finally:
sys.stdin = curr_stdin
getpass_mod.getpass = getpass
# Dispatch on_debugger_modules_loaded here, after all primary py_db modules are loaded
for handler in pydevd_extension_utils.extensions_of_type(DebuggerEventHandler):
handler.on_debugger_modules_loaded(debugger_version=__version__)
#=======================================================================================================================
# main
#=======================================================================================================================
def main():
# parse the command line. --file is our last argument that is required
pydev_log.debug("Initial arguments: %s", (sys.argv,))
pydev_log.debug("Current pid: %s", os.getpid())
try:
from _pydevd_bundle.pydevd_command_line_handling import process_command_line
setup = process_command_line(sys.argv)
SetupHolder.setup = setup
except ValueError:
pydev_log.exception()
usage(1)
if setup['print-in-debugger-startup']:
try:
pid = ' (pid: %s)' % os.getpid()
except:
pid = ''
sys.stderr.write("pydev debugger: starting%s\n" % pid)
pydev_log.debug("Executing file %s", setup['file'])
pydev_log.debug("arguments: %s", (sys.argv,))
pydevd_vm_type.setup_type(setup.get('vm_type', None))
if SHOW_DEBUG_INFO_ENV:
set_debug(setup)
DebugInfoHolder.DEBUG_RECORD_SOCKET_READS = setup.get('DEBUG_RECORD_SOCKET_READS', DebugInfoHolder.DEBUG_RECORD_SOCKET_READS)
DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS = setup.get('DEBUG_TRACE_BREAKPOINTS', DebugInfoHolder.DEBUG_TRACE_BREAKPOINTS)
DebugInfoHolder.DEBUG_TRACE_LEVEL = setup.get('DEBUG_TRACE_LEVEL', DebugInfoHolder.DEBUG_TRACE_LEVEL)
port = setup['port']
host = setup['client']
f = setup['file']
fix_app_engine_debug = False
debugger = get_global_debugger()
if debugger is None:
debugger = PyDB()
try:
from _pydev_bundle import pydev_monkey
except:
pass # Not usable on jython 2.1
else:
if setup['multiprocess']: # PyDev
pydev_monkey.patch_new_process_functions()
elif setup['multiproc']: # PyCharm
pydev_log.debug("Started in multiproc mode\n")
global DISPATCH_APPROACH
DISPATCH_APPROACH = DISPATCH_APPROACH_EXISTING_CONNECTION
dispatcher = Dispatcher()
try:
dispatcher.connect(host, port)
if dispatcher.port is not None:
port = dispatcher.port
pydev_log.debug("Received port %d\n", port)
pydev_log.info("pydev debugger: process %d is connecting\n" % os.getpid())
try:
pydev_monkey.patch_new_process_functions()
except:
pydev_log.exception("Error patching process functions.")
else:
pydev_log.critical("pydev debugger: couldn't get port for new debug process.")
finally:
dispatcher.close()
else:
try:
pydev_monkey.patch_new_process_functions_with_warning()
except:
pydev_log.exception("Error patching process functions.")
# Only do this patching if we're not running with multiprocess turned on.
if f.find('dev_appserver.py') != -1:
if os.path.basename(f).startswith('dev_appserver.py'):
appserver_dir = os.path.dirname(f)
version_file = os.path.join(appserver_dir, 'VERSION')
if os.path.exists(version_file):
try:
stream = open(version_file, 'r')
try:
for line in stream.read().splitlines():
line = line.strip()
if line.startswith('release:'):
line = line[8:].strip()
version = line.replace('"', '')
version = version.split('.')
if int(version[0]) > 1:
fix_app_engine_debug = True
elif int(version[0]) == 1:
if int(version[1]) >= 7:
# Only fix from 1.7 onwards
fix_app_engine_debug = True
break
finally:
stream.close()
except:
pydev_log.exception()
try:
# In the default run (i.e.: run directly on debug mode), we try to patch stackless as soon as possible
# on a run where we have a remote debug, we may have to be more careful because patching stackless means
# that if the user already had a stackless.set_schedule_callback installed, he'd loose it and would need
# to call it again (because stackless provides no way of getting the last function which was registered
# in set_schedule_callback).
#
# So, ideally, if there's an application using stackless and the application wants to use the remote debugger
# and benefit from stackless debugging, the application itself must call:
#
# import pydevd_stackless
# pydevd_stackless.patch_stackless()
#
# itself to be able to benefit from seeing the tasklets created before the remote debugger is attached.
from _pydevd_bundle import pydevd_stackless
pydevd_stackless.patch_stackless()
except:
# It's ok not having stackless there...
try:
if hasattr(sys, 'exc_clear'):
sys.exc_clear() # the exception information should be cleaned in Python 2
except:
pass
is_module = setup['module']
if not setup['skip-notify-stdin']:
patch_stdin()
if setup[pydevd_constants.ARGUMENT_JSON_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, JSON_PROTOCOL)
elif setup[pydevd_constants.ARGUMENT_HTTP_JSON_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, HTTP_JSON_PROTOCOL)
elif setup[pydevd_constants.ARGUMENT_HTTP_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, pydevd_constants.HTTP_PROTOCOL)
elif setup[pydevd_constants.ARGUMENT_QUOTED_LINE_PROTOCOL]:
PyDevdAPI().set_protocol(debugger, 0, pydevd_constants.QUOTED_LINE_PROTOCOL)
access_token = setup['access-token']
if access_token:
debugger.authentication.access_token = access_token
client_access_token = setup['client-access-token']
if client_access_token:
debugger.authentication.client_access_token = client_access_token
if fix_app_engine_debug:
sys.stderr.write("pydev debugger: google app engine integration enabled\n")
curr_dir = os.path.dirname(__file__)
app_engine_startup_file = os.path.join(curr_dir, 'pydev_app_engine_debug_startup.py')
sys.argv.insert(1, '--python_startup_script=' + app_engine_startup_file)
import json
setup['pydevd'] = __file__
sys.argv.insert(2, '--python_startup_args=%s' % json.dumps(setup),)
sys.argv.insert(3, '--automatic_restart=no')
sys.argv.insert(4, '--max_module_instances=1')
# Run the dev_appserver
debugger.run(setup['file'], None, None, is_module, set_trace=False)
else:
if setup['save-threading']:
debugger.thread_analyser = ThreadingLogger()
if setup['save-asyncio']:
if IS_PY34_OR_GREATER:
debugger.asyncio_analyser = AsyncioLogger()
apply_debugger_options(setup)
try:
debugger.connect(host, port)
except:
sys.stderr.write("Could not connect to %s: %s\n" % (host, port))
pydev_log.exception()
sys.exit(1)
globals = debugger.run(setup['file'], None, None, is_module)
if setup['cmd-line']:
debugger.wait_for_commands(globals)
if __name__ == '__main__':
main()
| mit |
datapythonista/pandas | pandas/_testing/asserters.py | 1 | 47744 | from __future__ import annotations
from typing import cast
import warnings
import numpy as np
from pandas._libs.lib import (
NoDefault,
no_default,
)
from pandas._libs.missing import is_matching_na
import pandas._libs.testing as _testing
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.dtypes import PandasDtype
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
PeriodIndex,
Series,
TimedeltaIndex,
)
from pandas.core.algorithms import (
safe_sort,
take_nd,
)
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.formats.printing import pprint_thing
def assert_almost_equal(
left,
right,
check_dtype: bool | str = "equiv",
check_less_precise: bool | int | NoDefault = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
# type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
rtol = atol = _get_tol_from_less_precise(
check_less_precise # type: ignore[arg-type]
)
if isinstance(left, Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
# if we have "equiv", this becomes True
check_dtype = bool(check_dtype)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _get_tol_from_less_precise(check_less_precise: bool | int) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def assert_index_equal(
left: Index,
right: Index,
exact: bool | str = "equiv",
check_names: bool = True,
check_less_precise: bool | int | NoDefault = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
Examples
--------
>>> from pandas.testing import assert_index_equal
>>> a = pd.Index([1, 2, 3])
>>> b = pd.Index([1, 2, 3])
>>> assert_index_equal(a, b)
"""
__tracebackhide__ = True
def _check_types(left, right, obj="Index"):
if not exact:
return
assert_class_equal(left, right, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", left, right, obj=obj)
if is_categorical_dtype(left.dtype) and is_categorical_dtype(right.dtype):
assert_index_equal(left.categories, right.categories, exact=exact)
# allow string-like to have different inferred_types
if left.inferred_type in ("string"):
assert right.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", left, right, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_nd(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
# error: Argument 1 to "_get_tol_from_less_precise" has incompatible
# type "Union[bool, int, NoDefault]"; expected "Union[bool, int]"
rtol = atol = _get_tol_from_less_precise(
check_less_precise # type: ignore[arg-type]
)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = Index(safe_sort(left))
right = Index(safe_sort(right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = (
np.sum((left._values != right._values).astype(int)) * 100.0 / len(left)
)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
# if we have "equiv", this becomes True
exact_bool = bool(exact)
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact_bool,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: bool | str = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif is_matching_na(left_attr, right_attr):
# e.g. both np.nan, both NaT, both pd.NA, ...
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if (left_attr is pd.NA) ^ (right_attr is pd.NA):
result = False
elif not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left) or isinstance(left, PandasDtype):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right) or isinstance(right, PandasDtype):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for left_arr, right_arr in zip(left, right):
# count up differences
if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
Examples
--------
>>> from pandas.testing import assert_extension_array_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b, c = a.array, a.array
>>> assert_extension_array_equal(b, c)
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
*,
check_index=True,
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
check_index : bool, default True
Whether to check index equivalence. If False, then compare only values.
.. versionadded:: 1.3.0
Examples
--------
>>> from pandas.testing import assert_series_equal
>>> a = pd.Series([1, 2, 3, 4])
>>> b = pd.Series([1, 2, 3, 4])
>>> assert_series_equal(a, b)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
if check_index:
# GH #38183
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
left_values = left._values
right_values = right._values
# Only check exact if dtype is numeric
if isinstance(left_values, ExtensionArray) and isinstance(
right_values, ExtensionArray
):
assert_extension_array_equal(
left_values,
right_values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
assert_numpy_array_equal(
left_values,
right_values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype_and_needs_i8_conversion(
left.dtype, right.dtype
) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
.. versionadded:: 1.1.0
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
# GH #38183
# use check_index=False, because we do not want to run
# assert_index_equal for each column,
# as we already checked it for the whole dataframe before.
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
check_index=False,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (DatetimeIndex, TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def is_extension_array_dtype_and_needs_i8_conversion(left_dtype, right_dtype) -> bool:
"""
Checks that we have the combination of an ExtensionArraydtype and
a dtype that should be converted to int64
Returns
-------
bool
Related to issue #37609
"""
return is_extension_array_dtype(left_dtype) and needs_i8_conversion(right_dtype)
| bsd-3-clause |
tedunderwood/biographies | topicmodel/interpret/condense_rolethemes.py | 1 | 2854 | # condense_rolethemes.py
# This script plays the same role for my custom topic model
# that condense_doctopics plays for MALLET:
# we're creating a portable subset of the doct-topic table
# that answers preregistered hypotheses.
# There are differences here because my doctopic file has a
# slightly different format, and especially because I haven't
# normalized the vectors yet. Also, I add authors to the output.
import sys, csv, os
import numpy as np
import pandas as pd
def getdoc(anid):
'''
Gets the docid part of a character id
'''
if '|' in anid:
thedoc = anid.split('|')[0]
else:
print('error', anid)
thedoc = anid
return thedoc
# MAIN starts here
args = sys.argv
doctopic_path = args[1]
themect = input("How many themes? ")
outpath = doctopic_path.replace('_doctopics.tsv', '_' + str(themect) + 'themes.tsv')
print(outpath)
if os.path.isfile(outpath):
print(outpath, ' already exists')
user = input('Ok to overwrite (y for yes): ')
if user != 'y':
sys.exit(0)
# Read metadata in order to create lists of documents linked
# by an author or by a year.
print('Reading metadata and hypotheses.')
meta = pd.read_csv('../../metadata/filtered_fiction_plus_18c.tsv', sep = '\t', index_col = 'docid')
meta = meta[~meta.index.duplicated(keep = 'first')]
docsbyauthor = dict()
groupedbyauthor = meta.groupby('author')
for auth, group in groupedbyauthor:
docsbyauthor[auth] = group.index.tolist()
docsbyyear = dict()
groupedbyyear = meta.groupby('inferreddate')
for yr, group in groupedbyyear:
docsbyyear[yr] = group.index.tolist()
significant_vols = set()
with open('../../evaluation/hypotheses.tsv', encoding = 'utf-8') as f:
reader = csv.DictReader(f, delimiter = '\t')
for row in reader:
ids = [row['firstsim'], row['secondsim'], row['distractor']]
for anid in ids:
docid = getdoc(anid)
significant_vols.add(docid)
print()
print('Reading the doctopics file.')
outlines = []
vectorsbydoc = dict()
significant_authors = set()
with open(doctopic_path, encoding = 'utf-8') as f:
for line in f:
fields = line.strip().split('\t')
if fields[0] != 'char':
continue
charid = fields[1]
docid = getdoc(charid)
if docid in significant_vols:
vector = np.array(fields[3 : ], dtype = 'float32')
total = np.sum(vector)
if total < 1:
continue
vector = vector / total
author = meta.loc[docid, 'author']
line = [author, charid]
line.extend([str(x) for x in vector])
outlines.append(line)
print()
print('Writing condensed volumes.')
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
for line in outlines:
f.write('\t'.join(line) + '\n')
| mit |
Barmaley-exe/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <[email protected]>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
BlueBrain/NEST | topology/doc/user_manual_scripts/layers.py | 13 | 10381 | # -*- coding: utf-8 -*-
#
# layers.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
# Run as python layers.py > layers.log
import matplotlib.pyplot as plt
import nest
import numpy as np
def beautify_layer(l, fig=plt.gcf(), xlabel=None, ylabel=None,
xlim=None, ylim=None, xticks=None, yticks=None, dx=0, dy=0):
"""Assume either x and ylims/ticks given or none"""
top = nest.GetStatus(l)[0]['topology']
ctr = top['center']
ext = top['extent']
if xticks == None:
if 'rows' in top:
dx = float(ext[0]) / top['columns']
dy = float(ext[1]) / top['rows']
xticks = ctr[0]-ext[0]/2.+dx/2. + dx*np.arange(top['columns'])
yticks = ctr[1]-ext[1]/2.+dy/2. + dy*np.arange(top['rows'])
if xlim == None:
xlim = [ctr[0]-ext[0]/2.-dx/2., ctr[0]+ext[0]/2.+dx/2.] # extra space so extent is visible
ylim = [ctr[1]-ext[1]/2.-dy/2., ctr[1]+ext[1]/2.+dy/2.]
else:
ext = [xlim[1]-xlim[0], ylim[1]-ylim[0]]
ax = fig.gca()
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_aspect('equal', 'box')
ax.set_xticks(xticks)
ax.set_yticks(yticks)
ax.grid(True)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return
# --------------------------------------------------
nest.ResetKernel()
#{ layer1 #}
import nest.topology as tp
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'elements': 'iaf_neuron'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx=[]
for r in range(5):
tx.append(ax.text( 0.65, 0.4-r*0.2, str(r),
horizontalalignment='center',verticalalignment='center'))
tx.append(ax.text(-0.4+r*0.2, 0.65, str(r),
horizontalalignment='center',verticalalignment='center'))
# For bbox_extra_artists, see http://old.nabble.com/bbox_inches%3D'tight'-issue-with-text-outside-axes-td28767991.html
plt.savefig('../user_manual_figures/layer1.png', bbox_inches='tight', bbox_extra_artists=tx)
print("#{ layer1s.log #}")
#{ layer1s #}
print(nest.GetStatus(l)[0]['topology'])
#{ end #}
print("#{ end.log #}")
print("#{ layer1p.log #}")
#{ layer1p #}
nest.PrintNetwork(depth=2)
#{ end #}
print("#{ end.log #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer2 #}
l = tp.CreateLayer({'rows' : 5,
'columns' : 5,
'extent' : [2.0, 0.5],
'elements': 'iaf_neuron'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)')
ax = fig.gca()
tx = []
for r in range(5):
tx.append(fig.gca().text( 1.25, 0.2-r*0.1, str(r),
horizontalalignment='center',verticalalignment='center'))
tx.append(fig.gca().text(-0.8+r*0.4, 0.35, str(r),
horizontalalignment='center',verticalalignment='center'))
# for bbox_extra_artists, see http://old.nabble.com/bbox_inches%3D'tight'-issue-with-text-outside-axes-td28767991.html
plt.savefig('../user_manual_figures/layer2.png', bbox_inches='tight', bbox_extra_artists=tx)
# --------------------------------------------------
nest.ResetKernel()
#{ layer3 #}
l1 = tp.CreateLayer({'rows': 5, 'columns' : 5, 'elements': 'iaf_neuron'})
l2 = tp.CreateLayer({'rows': 5, 'columns' : 5, 'elements': 'iaf_neuron',
'center': [-1.,1.]})
l3 = tp.CreateLayer({'rows': 5, 'columns' : 5, 'elements': 'iaf_neuron',
'center': [1.5,0.5]})
#{ end #}
fig = tp.PlotLayer(l1, nodesize=50)
tp.PlotLayer(l2, nodesize=50, nodecolor='g', fig=fig)
tp.PlotLayer(l3, nodesize=50, nodecolor='r', fig=fig)
beautify_layer(l1, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-1.6, 2.1], ylim=[-0.6,1.6],
xticks=np.arange(-1.4,2.05,0.2),
yticks=np.arange(-0.4,1.45,0.2))
plt.savefig('../user_manual_figures/layer3.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer3a #}
nc, nr = 5, 3
d = 0.1
l = tp.CreateLayer({'columns': nc, 'rows': nr, 'elements': 'iaf_neuron',
'extent': [nc*d, nr*d], 'center': [nc*d/2., 0.]})
#{ end #}
fig = tp.PlotLayer(l, nodesize=100)
plt.plot(0,0,'x',markersize=20, c='k', mew=3)
plt.plot(nc*d/2,0,'o',markersize=20, c='k', mew=3,mfc='none',
zorder=100)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xticks=np.arange(0.,0.501,0.05),
yticks=np.arange(-0.15,0.151,0.05),
xlim=[-0.05, 0.55], ylim=[-0.2,0.2])
plt.savefig('../user_manual_figures/layer3a.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4 #}
import numpy as np
pos = [[np.random.uniform(-0.5,0.5),np.random.uniform(-0.5,0.5)]
for j in range(50)]
l = tp.CreateLayer({'positions': pos,
'elements': 'iaf_neuron'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
beautify_layer(l, fig, xlabel='x-axis (columns)', ylabel='y-axis (rows)',
xlim=[-0.55,0.55],ylim=[-0.55,0.55],
xticks=[-0.5,0.,0.5],yticks=[-0.5,0.,0.5])
plt.savefig('../user_manual_figures/layer4.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer4_3d #}
import numpy as np
pos = [[np.random.uniform(-0.5,0.5),np.random.uniform(-0.5,0.5),
np.random.uniform(-0.5,0.5)] for j in range(200)]
l = tp.CreateLayer({'positions': pos,
'elements': 'iaf_neuron'})
#{ end #}
fig = tp.PlotLayer(l, nodesize=50)
plt.savefig('../user_manual_figures/layer4_3d.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ player #}
lp = tp.CreateLayer({'rows': 1, 'columns': 5, 'extent': [5., 1.],
'elements': 'iaf_neuron',
'edge_wrap': True})
#{ end #}
# fake plot with layer on line and circle
import matplotlib.cm as cm
clist = [(0,0,1),(0.35,0,1),(0.6,0,1),(0.8,0,1),(1.0,0,1)]
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax1.plot([0.5, 5.5], [0,0], 'k-', lw=2)
ax1.scatter(range(1,6), [0]*5, s=200, c=clist) #=range(1,6), cmap=cm.jet)
#c=np.array([(w,w,1) for w in np.arange(0,0.95,0.18)]))
ax1.set_xlim([0,6])
ax1.set_ylim([-0.5, 1.25])
ax1.set_aspect('equal', 'box')
ax1.set_xticks([])
ax1.set_yticks([])
for j in range(1,6):
ax1.text(j, 0.5, str('(%d,0)'%(j-3)),
horizontalalignment='center',verticalalignment='bottom')
ax1a = fig.add_subplot(223)
ax1a.plot([0.5, 5.5], [0,0], 'k-', lw=2)
ax1a.scatter(range(1,6), [0]*5, s=200, c=[clist[0],clist[1],clist[2],clist[2],clist[1]])
#c=np.array([(w,w,1) for w in np.arange(0,0.95,0.18)]))
ax1a.set_xlim([0,6])
ax1a.set_ylim([-0.5, 1.25])
ax1a.set_aspect('equal', 'box')
ax1a.set_xticks([])
ax1a.set_yticks([])
for j in range(1,6):
ax1a.text(j, 0.5, str('(%d,0)'%(j-3)),
horizontalalignment='center',verticalalignment='bottom')
ax2 = fig.add_subplot(122)
phic = np.arange(0., 2*np.pi+0.5, 0.1)
r = 5. / (2*np.pi)
ax2.plot(r*np.cos(phic), r*np.sin(phic), 'k-', lw=2)
phin = np.arange(0., 4.1, 1.) * 2*np.pi/5
ax2.scatter(r*np.sin(phin), r*np.cos(phin), s=200, c=[clist[0],clist[1],clist[2],clist[2],clist[1]])
#[1,2,3,3,2], cmap=cm.jet)
# c=np.array([(w,w,1) for w in [0., 0.18, 0.36, 0.36, 0.18]]))
ax2.set_xlim([-1.3,1.3])
ax2.set_ylim([-1.2,1.2])
ax2.set_aspect('equal', 'box')
ax2.set_xticks([])
ax2.set_yticks([])
for j in range(5):
ax2.text(1.4*r*np.sin(phin[j]), 1.4*r*np.cos(phin[j]), str('(%d,0)' % (j+1-3)),
horizontalalignment='center',verticalalignment='center')
plt.savefig('../user_manual_figures/player.png', bbox_inches='tight')
# --------------------------------------------------
nest.ResetKernel()
#{ layer6 #}
l = tp.CreateLayer({'rows': 1, 'columns': 2,
'elements': ['iaf_cond_alpha', 'poisson_generator']})
#{ end #}
print("#{ layer6 #}")
nest.PrintNetwork(depth=3)
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer7 #}
l = tp.CreateLayer({'rows': 1, 'columns': 2,
'elements': ['iaf_cond_alpha', 10, 'poisson_generator',
'noise_generator', 2]})
#{ end #}
print("#{ layer7 #}")
nest.PrintNetwork(depth=3)
print("#{ end #}")
# --------------------------------------------------
nest.ResetKernel()
#{ layer10 #}
for lyr in ['L23', 'L4', 'L56']:
nest.CopyModel('iaf_neuron', lyr+'pyr')
nest.CopyModel('iaf_neuron', lyr+'in', {'V_th': -52.})
l = tp.CreateLayer({'rows': 20, 'columns' : 20, 'extent': [0.5, 0.5],
'elements': ['L23pyr', 3, 'L23in',
'L4pyr', 3, 'L4in',
'L56pyr', 3, 'L56in']})
#{ end #}
# --------------------------------------------------
nest.ResetKernel()
#{ vislayer #}
l = tp.CreateLayer({'rows': 21, 'columns' : 21,
'elements': 'iaf_neuron'})
conndict = {'connection_type': 'divergent',
'mask' : {'circular': {'radius': 0.4}},
'kernel': {'gaussian': {'p_center': 1.0, 'sigma': 0.15}}}
tp.ConnectLayers(l, l, conndict)
fig = tp.PlotLayer(l, nodesize=80)
ctr = tp.FindCenterElement(l)
tp.PlotTargets(ctr, l, fig=fig,
mask=conndict['mask'], kernel=conndict['kernel'],
src_size=250, tgt_color='red', tgt_size=20,
kernel_color='green')
#{ end #}
plt.savefig('../user_manual_figures/vislayer.png', bbox_inches='tight')
| gpl-2.0 |
josemao/nilmtk | nilmtk/electric.py | 1 | 36233 | from __future__ import print_function, division
import pandas as pd
import numpy as np
from collections import Counter
from itertools import izip
from warnings import warn
import scipy.spatial as ss
from scipy import fft
from pandas.tools.plotting import lag_plot, autocorrelation_plot
from scipy.special import digamma,gamma
from math import log,pi
import numpy.random as nr
import matplotlib.pyplot as plt
import numpy as np
from datetime import timedelta
import gc
import pytz
from .timeframe import TimeFrame
from .measurement import select_best_ac_type
from .utils import (offset_alias_to_seconds, convert_to_timestamp,
flatten_2d_list, append_or_extend_list,
timedelta64_to_secs, safe_resample)
from .plots import plot_series
from .preprocessing import Apply
from nilmtk.stats.histogram import histogram_from_generator
from nilmtk.appliance import DEFAULT_ON_POWER_THRESHOLD
MAX_SIZE_ENTROPY = 10000
class Electric(object):
"""Common implementations of methods shared by ElecMeter and MeterGroup.
"""
def when_on(self, on_power_threshold=None, **load_kwargs):
"""Are the connected appliances appliance is on (True) or off (False)?
Uses `self.on_power_threshold()` if `on_power_threshold` not provided.
Parameters
----------
on_power_threshold : number, optional
Defaults to self.on_power_threshold()
**load_kwargs : key word arguments
Passed to self.power_series()
Returns
-------
generator of pd.Series
index is the same as for chunk returned by `self.power_series()`
values are booleans
"""
if on_power_threshold is None:
on_power_threshold = self.on_power_threshold()
for chunk in self.power_series(**load_kwargs):
yield chunk >= on_power_threshold
def on_power_threshold(self):
"""Returns the minimum `on_power_threshold` across all appliances
immediately downstream of this meter. If any appliance
does not have an `on_power_threshold` then default to 10 watts."""
if not self.appliances:
return DEFAULT_ON_POWER_THRESHOLD
on_power_thresholds = [a.on_power_threshold() for a in self.appliances]
return min(on_power_thresholds)
def min_on_duration(self):
return self._aggregate_metadata_attribute('min_on_duration')
def min_off_duration(self):
return self._aggregate_metadata_attribute('min_off_duration')
def _aggregate_metadata_attribute(self, attr, agg_func=np.max,
default_value=0,
from_type_metadata=True):
attr_values = []
for a in self.appliances:
if from_type_metadata:
attr_value = a.type.get(attr)
else:
attr_value = a.metadata.get(attr)
if attr_value is not None:
attr_values.append(attr_value)
if len(attr_values) == 0:
return default_value
else:
return agg_func(attr_values)
def matches_appliances(self, key):
"""
Parameters
----------
key : dict
Returns
-------
True if all key:value pairs in `key` match any appliance
in `self.appliances`.
"""
for appliance in self.appliances:
if appliance.matches(key):
return True
return False
def power_series_all_data(self, **kwargs):
chunks = []
for series in self.power_series(**kwargs):
if len(series) > 0:
chunks.append(series)
if chunks:
# Get rid of overlapping indicies
prev_end = None
for i, chunk in enumerate(chunks):
if i > 0:
if chunk.index[0] <= prev_end:
chunks[i] = chunk.iloc[1:]
prev_end = chunk.index[-1]
all_data = pd.concat(chunks)
else:
all_data = None
return all_data
def _prep_kwargs_for_sample_period_and_resample(self, sample_period=None,
resample=False,
resample_kwargs=None,
**kwargs):
if 'preprocessing' in kwargs:
warn("If you are using `preprocessing` to resample then please"
" do not! Instead, please use the `sample_period` parameter"
" and set `resample=True`.")
if sample_period is None:
sample_period = self.sample_period()
elif resample != False:
resample = True
sample_period = int(round(sample_period))
if resample:
if resample_kwargs is None:
resample_kwargs = {}
def resample_func(df):
resample_kwargs['rule'] = '{:d}S'.format(sample_period)
return safe_resample(df, **resample_kwargs)
kwargs.setdefault('preprocessing', []).append(
Apply(func=resample_func))
return kwargs
def _replace_none_with_meter_timeframe(self, start=None, end=None):
if start is None or end is None:
timeframe_for_meter = self.get_timeframe()
if start is None:
start = timeframe_for_meter.start
if end is None:
end = timeframe_for_meter.end
return start, end
def plot(self, ax=None, timeframe=None, plot_legend=True, unit='W',
plot_kwargs=None, **kwargs):
"""
Parameters
----------
width : int, optional
Number of points on the x axis required
ax : matplotlib.axes, optional
plot_legend : boolean, optional
Defaults to True. Set to False to not plot legend.
unit : {'W', 'kW'}
**kwargs
"""
# Get start and end times for the plot
timeframe = self.get_timeframe() if timeframe is None else timeframe
if not timeframe:
return ax
kwargs['sections'] = [timeframe]
kwargs = self._set_sample_period(timeframe, **kwargs)
power_series = self.power_series_all_data(**kwargs)
if power_series is None or power_series.empty:
return ax
if unit == 'kW':
power_series /= 1000
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault('label', self.label())
# Pandas 0.16.1 has a bug where 'label' isn't respected. See:
# https://github.com/nilmtk/nilmtk/issues/407
# The following line is a work around for this bug.
power_series.name = self.label()
ax = power_series.plot(ax=ax, **plot_kwargs)
ax.set_ylabel('Power ({})'.format(unit))
if plot_legend:
plt.legend()
return ax
def _set_sample_period(self, timeframe, width=800, **kwargs):
# Calculate the resolution for the x axis
duration = timeframe.timedelta.total_seconds()
secs_per_pixel = int(round(duration / width))
kwargs.update({'sample_period': secs_per_pixel, 'resample': True})
return kwargs
def proportion_of_upstream(self, **load_kwargs):
"""Returns a value in the range [0,1] specifying the proportion of
the upstream meter's total energy used by this meter.
"""
upstream = self.upstream_meter()
upstream_good_sects = upstream.good_sections(**load_kwargs)
proportion_of_energy = (self.total_energy(sections=upstream_good_sects) /
upstream.total_energy(sections=upstream_good_sects))
if isinstance(proportion_of_energy, pd.Series):
best_ac_type = select_best_ac_type(proportion_of_energy.keys())
return proportion_of_energy[best_ac_type]
else:
return proportion_of_energy
def vampire_power(self, **load_kwargs):
# TODO: this might be a naive approach to calculating vampire power.
power_series = self.power_series_all_data(**load_kwargs)
return get_vampire_power(power_series)
def uptime(self, **load_kwargs):
"""
Returns
-------
timedelta: total duration of all good sections.
"""
good_sections = self.good_sections(**load_kwargs)
return good_sections.uptime()
def average_energy_per_period(self, offset_alias='D', use_uptime=True, **load_kwargs):
"""Calculate the average energy per period. e.g. the average
energy per day.
Parameters
----------
offset_alias : str
A Pandas `offset alias`. See:
pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
use_uptime : bool
Returns
-------
pd.Series
Keys are AC types.
Values are energy in kWh per period.
"""
if 'sections' in load_kwargs:
raise RuntimeError("Please do not pass in 'sections' into"
" 'average_energy_per_period'. Instead"
" use 'use_uptime' param.")
if use_uptime:
td = self.uptime(**load_kwargs)
else:
td = self.get_timeframe().timedelta
if not td:
return np.NaN
uptime_secs = td.total_seconds()
periods = uptime_secs / offset_alias_to_seconds(offset_alias)
energy = self.total_energy(**load_kwargs)
return energy / periods
def proportion_of_energy(self, other, **loader_kwargs):
"""Compute the proportion of energy of self compared to `other`.
By default, only uses other.good_sections(). You may want to set
`sections=self.good_sections().intersection(other.good_sections())`
Parameters
----------
other : nilmtk.MeteGroup or ElecMeter
Typically this will be mains.
Returns
-------
float [0,1] or NaN if other.total_energy == 0
"""
good_other_sections = other.good_sections(**loader_kwargs)
loader_kwargs.setdefault('sections', good_other_sections)
# TODO test effect of setting `sections` for other
other_total_energy = other.total_energy(**loader_kwargs)
if other_total_energy.sum() == 0:
return np.NaN
total_energy = self.total_energy(**loader_kwargs)
if total_energy.empty:
return 0.0
other_ac_types = other_total_energy.keys()
self_ac_types = total_energy.keys()
shared_ac_types = set(other_ac_types).intersection(self_ac_types)
n_shared_ac_types = len(shared_ac_types)
if n_shared_ac_types > 1:
return (total_energy[shared_ac_types] /
other_total_energy[shared_ac_types]).mean()
elif n_shared_ac_types == 0:
ac_type = select_best_ac_type(self_ac_types)
other_ac_type = select_best_ac_type(other_ac_types)
warn("No shared AC types. Using '{:s}' for submeter"
" and '{:s}' for other.".format(ac_type, other_ac_type))
elif n_shared_ac_types == 1:
ac_type = list(shared_ac_types)[0]
other_ac_type = ac_type
return total_energy[ac_type] / other_total_energy[other_ac_type]
def correlation(self, other, **load_kwargs):
"""
Finds the correlation between the two ElecMeters. Both the ElecMeters
should be perfectly aligned
Adapted from:
http://www.johndcook.com/blog/2008/11/05/how-to-calculate-pearson-correlation-accurately/
Parameters
----------
other : an ElecMeter or MeterGroup object
Returns
-------
float : [-1, 1]
"""
sample_period = max(self.sample_period(), other.sample_period())
load_kwargs.setdefault('sample_period', sample_period)
def sum_and_count(electric):
n = 0
cumulator = 0.0
for power in electric.power_series(**load_kwargs):
n += len(power.index)
cumulator += power.sum()
return n, cumulator
x_n, x_sum = sum_and_count(self)
if x_n <= 1:
return np.NaN
y_n, y_sum = sum_and_count(other)
if y_n <= 1:
return np.NaN
# we're using Python 3's division (which returns a float)
x_bar = x_sum / x_n
y_bar = y_sum / y_n
# Second pass is used to find x_s and y_s (std.devs)
def stdev(electric, mean, n):
s_square_sum = 0.0
for power in electric.power_series(**load_kwargs):
s_square_sum += ((power - mean) * (power - mean)).sum()
s_square = s_square_sum / (n - 1)
return np.sqrt(s_square)
x_s = stdev(self, x_bar, x_n)
y_s = stdev(other, y_bar, y_n)
numerator = 0.0
for (x_power, y_power) in izip(self.power_series(**load_kwargs),
other.power_series(**load_kwargs)):
xi_minus_xbar = x_power - x_bar
del x_power
gc.collect()
yi_minus_ybar = y_power - y_bar
del y_power
gc.collect()
numerator += (xi_minus_xbar * yi_minus_ybar).sum()
del xi_minus_xbar
del yi_minus_ybar
gc.collect()
denominator = (x_n - 1) * x_s * y_s
corr = numerator / denominator
return corr
def plot_lag(self, lag=1, ax=None):
"""
Plots a lag plot of power data
http://www.itl.nist.gov/div898/handbook/eda/section3/lagplot.htm
Returns
-------
matplotlib.axis
"""
if ax is None:
ax = plt.gca()
for power in self.power_series():
lag_plot(power, lag, ax=ax)
return ax
def plot_spectrum(self, ax=None):
"""
Plots spectral plot of power data
http://www.itl.nist.gov/div898/handbook/eda/section3/spectrum.htm
Code borrowed from:
http://glowingpython.blogspot.com/2011/08/how-to-plot-frequency-spectrum-with.html
Returns
-------
matplotlib.axis
"""
if ax is None:
ax = plt.gca()
Fs = 1.0/self.sample_period()
for power in self.power_series():
n = len(power.values) # length of the signal
k = np.arange(n)
T = n/Fs
frq = k/T # two sides frequency range
frq = frq[range(n//2)] # one side frequency range
Y = fft(power)/n # fft computing and normalization
Y = Y[range(n//2)]
ax.plot(frq,abs(Y)) # plotting the spectrum
ax.set_xlabel('Freq (Hz)')
ax.set_ylabel('|Y(freq)|')
return ax
def plot_autocorrelation(self, ax=None):
"""
Plots autocorrelation of power data
Reference:
http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
Returns
-------
matplotlib.axis
"""
if ax is None:
ax = plt.gca()
for power in self.power_series():
autocorrelation_plot(power, ax=ax)
return ax
def plot_power_histogram(self, ax=None, load_kwargs=None,
plot_kwargs=None, range=None, **hist_kwargs):
"""
Parameters
----------
ax : axes
load_kwargs : dict
plot_kwargs : dict
range : None or tuple
if range=(None, x) then on_power_threshold will be used as minimum.
**hist_kwargs
Returns
-------
ax
"""
if ax is None:
ax = plt.gca()
if load_kwargs is None:
load_kwargs = {}
if plot_kwargs is None:
plot_kwargs = {}
generator = self.power_series(**load_kwargs)
# set range
if range is None or range[0] is None:
maximum = None if range is None else range[1]
range = (self.on_power_threshold(), maximum)
hist, bins = histogram_from_generator(generator, range=range,
**hist_kwargs)
# Plot
plot_kwargs.setdefault('linewidth', 0.1)
ax.fill_between(bins[:-1], 0, hist, **plot_kwargs)
first_bin_width = bins[1] - bins[0]
ax.set_xlim([bins[0]-(first_bin_width/2), bins[-1]])
ax.set_xlabel('Power (watts)')
ax.set_ylabel('Count')
return ax
def switch_times(self, threshold=40):
"""
Returns an array of pd.DateTime when a switch occurs as defined by threshold
Parameters
----------
threshold: int, threshold in Watts between succcessive readings
to amount for an appliance state change
"""
datetime_switches = []
for power in self.power_series():
delta_power = power.diff()
delta_power_absolute = delta_power.abs()
datetime_switches.append(delta_power_absolute[(delta_power_absolute>threshold)].index.values.tolist())
return flatten_2d_list(datetime_switches)
def entropy(self, k=3, base=2):
"""
This implementation is provided courtesy NPEET toolbox,
the authors kindly allowed us to directly use their code.
As a courtesy procedure, you may wish to cite their paper,
in case you use this function.
This fails if there is a large number of records. Need to
ask the authors what to do about the same!
The classic K-L k-nearest neighbor continuous entropy estimator
x should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
"""
def kdtree_entropy(z):
assert k <= len(z)-1, "Set k smaller than num. samples - 1"
d = len(z[0])
N = len(z)
#small noise to break degeneracy, see doc.
intens = 1e-10
z = [list(p + intens*nr.rand(len(z[0]))) for p in z]
tree = ss.cKDTree(z)
nn = [tree.query(point, k+1, p=float('inf'))[0][k] for point in z]
const = digamma(N)-digamma(k) + d*log(2)
return (const + d*np.mean(map(log, nn)))/log(base)
out = []
for power in self.power_series():
x = power.values
num_elements = len(x)
x = x.reshape((num_elements, 1))
if num_elements > MAX_SIZE_ENTROPY:
splits = num_elements/MAX_SIZE_ENTROPY + 1
y = np.array_split(x, splits)
for z in y:
out.append(kdtree_entropy(z))
else:
out.append(kdtree_entropy(x))
return sum(out)/len(out)
def mutual_information(self, other, k=3, base=2):
"""
Mutual information of two ElecMeters
x,y should be a list of vectors, e.g. x = [[1.3],[3.7],[5.1],[2.4]]
if x is a one-dimensional scalar and we have four samples
Parameters
----------
other : ElecMeter or MeterGroup
"""
def kdtree_mi(x, y, k, base):
intens = 1e-10 #small noise to break degeneracy, see doc.
x = [list(p + intens*nr.rand(len(x[0]))) for p in x]
y = [list(p + intens*nr.rand(len(y[0]))) for p in y]
points = zip2(x,y)
# Find nearest neighbors in joint space, p=inf means max-norm
tree = ss.cKDTree(points)
dvec = [tree.query(point, k+1, p=float('inf'))[0][k] for point in points]
a, b, c, d = avgdigamma(x, dvec), avgdigamma(y, dvec), digamma(k), digamma(len(x))
return (-a-b+c+d)/log(base)
def zip2(*args):
# zip2(x,y) takes the lists of vectors and makes it a list of vectors in a joint space
# E.g. zip2([[1],[2],[3]],[[4],[5],[6]]) = [[1,4],[2,5],[3,6]]
return [sum(sublist, []) for sublist in zip(*args)]
def avgdigamma(points, dvec):
#This part finds number of neighbors in some radius in the marginal space
#returns expectation value of <psi(nx)>
N = len(points)
tree = ss.cKDTree(points)
avg = 0.
for i in range(N):
dist = dvec[i]
#subtlety, we don't include the boundary point,
#but we are implicitly adding 1 to kraskov def bc center point is included
num_points = len(tree.query_ball_point(points[i],dist-1e-15,p=float('inf')))
avg += digamma(num_points)/N
return avg
out = []
for power_x, power_y in izip(self.power_series(), other.power_series()):
power_x_val = power_x.values
power_y_val = power_y.values
num_elements = len(power_x_val)
power_x_val = power_x_val.reshape((num_elements, 1))
power_y_val = power_y_val.reshape((num_elements, 1))
if num_elements>MAX_SIZE_ENTROPY:
splits = num_elements/MAX_SIZE_ENTROPY + 1
x_split = np.array_split(power_x_val, splits)
y_split = np.array_split(power_y_val, splits)
for x, y in izip(x_split, y_split):
out.append(kdtree_mi(x, y, k, base))
else:
out.append(kdtree_mi(power_x_val, power_y_val, k, base))
return sum(out)/len(out)
def available_power_ac_types(self):
"""Finds available alternating current types from power measurements.
Returns
-------
list of strings e.g. ['apparent', 'active']
.. note:: Deprecated in NILMTK v0.3
`available_power_ac_types` should not be used. Instead please
use `available_ac_types('power').`
"""
warn("`available_power_ac_types` is deprecated. Please use"
" `available_ac_types('power')` instead.", DeprecationWarning)
return self.available_ac_types('power')
def load_series(self, **kwargs):
"""
Parameters
----------
ac_type : str
physical_quantity : str
We sum across ac_types of this physical quantity.
**kwargs : passed through to load().
Returns
-------
generator of pd.Series. If a single ac_type is found for the
physical_quantity then the series.name will be a normal tuple.
If more than 1 ac_type is found then the ac_type will be a string
of the ac_types with '+' in between. e.g. 'active+apparent'.
"""
# Pull data through preprocessing pipeline
physical_quantity = kwargs['physical_quantity']
generator = self.load(**kwargs)
for chunk in generator:
if chunk.empty:
yield chunk
continue
chunk_to_yield = chunk[physical_quantity].sum(axis=1)
ac_types = '+'.join(chunk[physical_quantity].columns)
chunk_to_yield.name = (physical_quantity, ac_types)
chunk_to_yield.timeframe = getattr(chunk, 'timeframe', None)
chunk_to_yield.look_ahead = getattr(chunk, 'look_ahead', None)
yield chunk_to_yield
def power_series(self, **kwargs):
"""Get power Series.
Parameters
----------
ac_type : str, defaults to 'best'
**kwargs :
Any other key word arguments are passed to self.load()
Returns
-------
generator of pd.Series of power measurements.
"""
# Select power column:
kwargs['physical_quantity'] = 'power'
kwargs.setdefault('ac_type', 'best')
return self.load_series(**kwargs)
def activity_histogram(self, period='D', bin_duration='H', **kwargs):
"""Return a histogram vector showing when activity occurs.
e.g. to see when, over the course of an average day, activity occurs
then use `bin_duration='H'` and `period='D'`.
Parameters
----------
period : str. Pandas period alias.
bin_duration : str. Pandas period alias e.g. 'H' = hourly; 'D' = daily.
Width of each bin of the histogram. `bin_duration` must exactly
divide the chosen `period`.
Returns
-------
hist : np.ndarray
length will be `period / bin_duration`
"""
n_bins = (offset_alias_to_seconds(period) /
offset_alias_to_seconds(bin_duration))
if n_bins != int(n_bins):
raise ValueError('`bin_duration` must exactly divide the'
' chosen `period`')
n_bins = int(n_bins)
# Resample to `bin_duration` and load
kwargs['sample_period'] = offset_alias_to_seconds(bin_duration)
kwargs['resample_kwargs'] = {'how': 'max'}
when_on = self.when_on(**kwargs)
# Calculate histogram...
hist = np.zeros(n_bins, dtype=int)
for on_chunk in when_on:
if len(on_chunk) < 5:
continue
on_chunk = on_chunk.fillna(0).astype(int)
# Trick using resample to 'normalise' start and end dates
# to natural boundaries of 'period'.
resampled_to_period = on_chunk.iloc[[0, -1]].resample(period).index
start = resampled_to_period[0]
end = resampled_to_period[-1] + 1
# Reindex chunk
new_index = pd.date_range(start, end, freq=bin_duration, closed='left')
on_chunk = on_chunk.reindex(new_index, fill_value=0, copy=False)
# reshape
matrix = on_chunk.reshape((-1, n_bins))
# histogram
hist += matrix.sum(axis=0)
return hist
def plot_activity_histogram(self, ax=None, period='D', bin_duration='H',
plot_kwargs=None, **kwargs):
if ax is None:
ax = plt.gca()
hist = self.activity_histogram(bin_duration=bin_duration,
period=period, **kwargs)
if plot_kwargs is None:
plot_kwargs = {}
n_bins = len(hist)
plot_kwargs.setdefault('align', 'center')
plot_kwargs.setdefault('linewidth', 0)
ax.bar(range(n_bins), hist, np.ones(n_bins), **plot_kwargs)
ax.set_xlim([-0.5, n_bins])
ax.set_title('Activity distribution')
ax.set_xlabel(bin_duration + ' of ' + period)
ax.set_ylabel('Count')
return ax
def activation_series(self, *args, **kwargs):
"""Returns runs of an appliance.
Most appliances spend a lot of their time off. This function finds
periods when the appliance is on.
Parameters
----------
min_off_duration : int
If min_off_duration > 0 then ignore 'off' periods less than
min_off_duration seconds of sub-threshold power consumption
(e.g. a washing machine might draw no power for a short
period while the clothes soak.) Defaults value from metadata or,
if metadata absent, defaults to 0.
min_on_duration : int
Any activation lasting less seconds than min_on_duration will be
ignored. Defaults value from metadata or, if metadata absent,
defaults to 0.
border : int
Number of rows to include before and after the detected activation
on_power_threshold : int or float
Defaults to self.on_power_threshold()
**kwargs : kwargs for self.power_series()
Returns
-------
list of pd.Series. Each series contains one activation.
.. note:: Deprecated
`activation_series` will be removed in NILMTK v0.3.
Please use `get_activations` instead.
"""
warn("`activation_series()` is deprecated."
" Please use `get_activations()` instead!", DeprecationWarning)
return self.get_activations(*args, **kwargs)
def get_activations(self, min_off_duration=None, min_on_duration=None,
border=1, on_power_threshold=None, **kwargs):
"""Returns runs of an appliance.
Most appliances spend a lot of their time off. This function finds
periods when the appliance is on.
Parameters
----------
min_off_duration : int
If min_off_duration > 0 then ignore 'off' periods less than
min_off_duration seconds of sub-threshold power consumption
(e.g. a washing machine might draw no power for a short
period while the clothes soak.) Defaults value from metadata or,
if metadata absent, defaults to 0.
min_on_duration : int
Any activation lasting less seconds than min_on_duration will be
ignored. Defaults value from metadata or, if metadata absent,
defaults to 0.
border : int
Number of rows to include before and after the detected activation
on_power_threshold : int or float
Defaults to self.on_power_threshold()
**kwargs : kwargs for self.power_series()
Returns
-------
list of pd.Series. Each series contains one activation.
"""
if on_power_threshold is None:
on_power_threshold = self.on_power_threshold()
if min_off_duration is None:
min_off_duration = self.min_off_duration()
if min_on_duration is None:
min_on_duration = self.min_on_duration()
activations = []
kwargs.setdefault('resample', True)
for chunk in self.power_series(**kwargs):
activations_for_chunk = get_activations(
chunk=chunk, min_off_duration=min_off_duration,
min_on_duration=min_on_duration, border=border,
on_power_threshold=on_power_threshold)
activations.extend(activations_for_chunk)
return activations
def align_two_meters(master, slave, func='power_series'):
"""Returns a generator of 2-column pd.DataFrames. The first column is from
`master`, the second from `slave`.
Takes the sample rate and good_periods of `master` and applies to `slave`.
Parameters
----------
master, slave : ElecMeter or MeterGroup instances
"""
sample_period = master.sample_period()
period_alias = '{:d}S'.format(sample_period)
sections = master.good_sections()
master_generator = getattr(master, func)(sections=sections)
for master_chunk in master_generator:
if len(master_chunk) < 2:
return
chunk_timeframe = TimeFrame(master_chunk.index[0],
master_chunk.index[-1])
slave_generator = getattr(slave, func)(sections=[chunk_timeframe])
slave_chunk = next(slave_generator)
# TODO: do this resampling in the pipeline?
slave_chunk = slave_chunk.resample(period_alias)
if slave_chunk.empty:
continue
master_chunk = master_chunk.resample(period_alias)
yield pd.DataFrame({'master': master_chunk, 'slave': slave_chunk})
def activation_series_for_chunk(*args, **kwargs):
"""Returns runs of an appliance.
Most appliances spend a lot of their time off. This function finds
periods when the appliance is on.
Parameters
----------
chunk : pd.Series
min_off_duration : int
If min_off_duration > 0 then ignore 'off' periods less than
min_off_duration seconds of sub-threshold power consumption
(e.g. a washing machine might draw no power for a short
period while the clothes soak.) Defaults to 0.
min_on_duration : int
Any activation lasting less seconds than min_on_duration will be
ignored. Defaults to 0.
border : int
Number of rows to include before and after the detected activation
on_power_threshold : int or float
Watts
Returns
-------
list of pd.Series. Each series contains one activation.
.. note:: Deprecated
`activation_series` will be removed in NILMTK v0.3.
Please use `get_activations` instead.
"""
warn("`activation_series_for_chunk()` is deprecated."
" Please use `get_activations()` instead!", DeprecationWarning)
return get_activations(*args, **kwargs)
def get_activations(chunk, min_off_duration=0, min_on_duration=0,
border=1, on_power_threshold=5):
"""Returns runs of an appliance.
Most appliances spend a lot of their time off. This function finds
periods when the appliance is on.
Parameters
----------
chunk : pd.Series
min_off_duration : int
If min_off_duration > 0 then ignore 'off' periods less than
min_off_duration seconds of sub-threshold power consumption
(e.g. a washing machine might draw no power for a short
period while the clothes soak.) Defaults to 0.
min_on_duration : int
Any activation lasting less seconds than min_on_duration will be
ignored. Defaults to 0.
border : int
Number of rows to include before and after the detected activation
on_power_threshold : int or float
Watts
Returns
-------
list of pd.Series. Each series contains one activation.
"""
when_on = chunk >= on_power_threshold
# Find state changes
state_changes = when_on.astype(np.int8).diff()
del when_on
switch_on_events = np.where(state_changes == 1)[0]
switch_off_events = np.where(state_changes == -1)[0]
del state_changes
if len(switch_on_events) == 0 or len(switch_off_events) == 0:
return []
# Make sure events align
if switch_off_events[0] < switch_on_events[0]:
switch_off_events = switch_off_events[1:]
if len(switch_off_events) == 0:
return []
if switch_on_events[-1] > switch_off_events[-1]:
switch_on_events = switch_on_events[:-1]
if len(switch_on_events) == 0:
return []
assert len(switch_on_events) == len(switch_off_events)
# Smooth over off-durations less than min_off_duration
if min_off_duration > 0:
off_durations = (chunk.index[switch_on_events[1:]].values -
chunk.index[switch_off_events[:-1]].values)
off_durations = timedelta64_to_secs(off_durations)
above_threshold_off_durations = np.where(
off_durations >= min_off_duration)[0]
# Now remove off_events and on_events
switch_off_events = switch_off_events[
np.concatenate([above_threshold_off_durations,
[len(switch_off_events)-1]])]
switch_on_events = switch_on_events[
np.concatenate([[0], above_threshold_off_durations+1])]
assert len(switch_on_events) == len(switch_off_events)
activations = []
for on, off in zip(switch_on_events, switch_off_events):
duration = (chunk.index[off] - chunk.index[on]).total_seconds()
if duration < min_on_duration:
continue
on -= 1 + border
if on < 0:
on = 0
off += border
activation = chunk.iloc[on:off]
# throw away any activation with any NaN values
if not activation.isnull().values.any():
activations.append(activation)
return activations
def get_vampire_power(power_series):
# This is a very simple function at the moment but
# in the future we may want to implement a more
# sophisticated vampire power function, so it is worth
# calling `get_vampire_power` instead of just doing
# power_series.min() in case we get round to building
# a better vampire power function!
return power_series.min()
| apache-2.0 |
amandersillinois/landlab | landlab/grid/hex.py | 3 | 21190 | #! /usr/env/python
"""Python implementation of HexModelGrid, a grid class used to create and
manage structured Voronoi-Delaunay grids for 2D numerical models.
Do NOT add new documentation here. Grid documentation is now built in a
semi- automated fashion. To modify the text seen on the web, edit the
files `docs/text_for_[gridfile].py.txt`.
"""
import numpy
import xarray as xr
from ..core.utils import as_id_array
from ..graph import DualHexGraph
from .base import ModelGrid
class HexModelGrid(DualHexGraph, ModelGrid):
"""A grid of hexagonal cells.
This inherited class implements a regular 2D grid with hexagonal cells and
triangular patches. It is a special type of VoronoiDelaunay grid in which
the initial set of points is arranged in a triangular/hexagonal lattice.
Examples
--------
Create a hex grid with 2 rows of nodes. The first and third rows will
have 2 nodes, and the second nodes.
>>> from landlab import HexModelGrid
>>> grid = HexModelGrid((3, 2), spacing=1.0)
>>> grid.number_of_nodes
7
>>> grid = HexModelGrid((3, 3), node_layout="rect", spacing=2.0)
>>> grid.status_at_node
array([1, 1, 1, 1, 0, 1, 1, 1, 1], dtype=uint8)
>>> grid = HexModelGrid((3, 3), node_layout="rect", orientation="vertical")
>>> grid.status_at_node
array([1, 1, 1, 1, 1, 0, 1, 1, 1], dtype=uint8)
>>> grid = HexModelGrid((4, 4), node_layout='rect', orientation="vertical")
>>> grid.status_at_node
array([1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1], dtype=uint8)
>>> grid.boundary_nodes
array([ 0, 1, 2, 3, 4, 7, 8, 11, 12, 13, 14, 15])
>>> grid = HexModelGrid((3, 4), node_layout="rect")
>>> grid.status_at_node
array([1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1], dtype=uint8)
"""
def __init__(
self,
shape,
spacing=1.0,
xy_of_lower_left=(0.0, 0.0),
orientation="horizontal",
node_layout="hex",
reorient_links=True,
xy_of_reference=(0.0, 0.0),
xy_axis_name=("x", "y"),
xy_axis_units="-",
):
"""Create a grid of hexagonal cells.
Create a regular 2D grid with hexagonal cells and triangular patches.
It is a special type of VoronoiDelaunay grid in which the initial set
of points is arranged in a triangular/hexagonal lattice.
Parameters
----------
shape : tuple of int
Number of rows and columns of nodes.
spacing : float, optional
Node spacing.
xy_of_lower_left : tuple, optional
Minimum x-of-node and y-of-node values. Depending on the grid
no node may be present at this coordinate. Default is (0., 0.).
xy_of_reference : tuple, optional
Coordinate value in projected space of the reference point,
`xy_of_lower_left`. Default is (0., 0.)
orientation : string, optional
One of the 3 cardinal directions in the grid, either 'horizontal'
(default) or 'vertical'
node_layout : {"hex", "rect"}
The grid layout of nodes.
reorient_links : bool, optional
Whether or not to re-orient all links to point between -45 deg
and +135 deg clockwise from "north" (i.e., along y axis). default
is True.
Returns
-------
HexModelGrid
A newly-created grid.
Examples
--------
Create a hex grid with 2 rows of nodes. The first and third rows will
have 2 nodes, and the second nodes.
>>> from landlab import HexModelGrid
>>> hmg = HexModelGrid((3, 2), spacing=1.0)
>>> hmg.number_of_nodes
7
"""
self._xy_of_lower_left = tuple(numpy.asfarray(xy_of_lower_left))
DualHexGraph.__init__(
self,
shape,
spacing=spacing,
xy_of_lower_left=self.xy_of_lower_left,
orientation=orientation,
node_layout=node_layout,
sort=True,
)
ModelGrid.__init__(
self,
xy_axis_name=xy_axis_name,
xy_axis_units=xy_axis_units,
xy_of_reference=xy_of_reference,
)
self._node_status = numpy.full(
self.number_of_nodes, self.BC_NODE_IS_CORE, dtype=numpy.uint8
)
self._node_status[self.perimeter_nodes] = self.BC_NODE_IS_FIXED_VALUE
@classmethod
def from_dict(cls, kwds):
args = (kwds.pop("shape"),)
return cls(*args, **kwds)
@classmethod
def from_dataset(cls, dataset):
return cls(
tuple(dataset["shape"].values),
spacing=dataset["spacing"],
xy_of_lower_left=dataset["xy_of_lower_left"],
orientation=dataset.attrs["orientation"],
node_layout=dataset.attrs["node_layout"],
)
def as_dataset(self, include="*", exclude=None):
dataset = xr.Dataset(
{
"shape": (("dim",), list(self.shape)),
"spacing": self.spacing,
"xy_of_lower_left": (("dim",), list(self.xy_of_lower_left)),
},
attrs={
"grid_type": "triangular",
"node_layout": self.node_layout,
"orientation": self.orientation,
},
)
return dataset.update(
super(HexModelGrid, self).as_dataset(include=include, exclude=exclude)
)
@property
def xy_of_lower_left(self):
"""Return (x, y) of the reference point."""
return self._xy_of_lower_left
@xy_of_lower_left.setter
def xy_of_lower_left(self, xy_of_lower_left):
"""Set a new value for the xy_of_lower_left."""
dx = self.xy_of_lower_left[0] - xy_of_lower_left[0]
dy = self.xy_of_lower_left[1] - xy_of_lower_left[1]
# self._xy_of_node -= (dx, dy)
with self.thawed():
self.x_of_node[:] -= dx
self.y_of_node[:] -= dy
self._xy_of_lower_left = tuple(xy_of_lower_left)
@property
def number_of_node_columns(self):
"""Number of node columns hex grid.
Number of node columns in a rectangular-shaped and/or
vertically oriented hex grid.
Returns the number of columns, including boundaries.
Notes
-----
Will generate an error if called with a hex-shaped, horizontally
aligned grid.
Examples
--------
>>> from landlab import HexModelGrid
>>> grid = HexModelGrid((5, 5), node_layout="rect")
>>> grid.number_of_node_columns
5
LLCATS: GINF NINF
"""
return self.shape[1]
@property
def number_of_node_rows(self):
"""Number of node rows in a rectangular-shaped and/or horizontally
oriented hex grid.
Returns the number of rows, including boundaries.
Notes
-----
Will generate an error if called with a hex-shaped, vertically
aligned grid.
Examples
--------
>>> from landlab import HexModelGrid
>>> grid = HexModelGrid((5, 5), node_layout="rect")
>>> grid.number_of_node_rows
5
LLCATS: GINF NINF
"""
return self._shape[0]
def node_row_and_column(self, node_id):
"""Row and column from node ID, FOR VERT RECT CONFIGURATION ONLY.
Examples
--------
>>> from landlab import HexModelGrid
>>> grid = HexModelGrid((3, 4), node_layout='rect', orientation="vertical")
>>> grid.node_row_and_column(5)
(1, 2)
>>> grid = HexModelGrid((3, 5), node_layout='rect', orientation="vertical")
>>> grid.node_row_and_column(13)
(2, 1)
"""
assert self.orientation[0] == "v", "grid orientation must be vertical"
try:
(nr, nc) = self._shape
except AttributeError:
raise AttributeError(
"Only rectangular Hex grids have defined rows and columns."
)
row = node_id // nc
n_mod_nc = node_id % nc
half_nc = (nc + 1) // 2
col = 2 * (n_mod_nc % half_nc) + n_mod_nc // half_nc
return (row, col)
def _configure_hexplot(self, data, data_label=None, color_map=None):
"""Sets up necessary information for making plots of the hexagonal grid
colored by a given data element.
Parameters
----------
data : str OR node array (1d numpy array with number_of_nodes entries)
Data field to be colored
data_label : str, optional
Label for colorbar
color_map : matplotlib colormap object, None
Color map to apply (defaults to "jet")
Returns
-------
(none)
Notes
-----
Creates and stores a PatchCollection representing the hexagons. Also
stores a handle to the current plotting axis. Both of these are then
used by hexplot().
"""
import matplotlib
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from numpy import array, sqrt, zeros
# color
if color_map is None:
color_map = matplotlib.cm.jet
# geometry
apothem = self.spacing / 2.0
# distance from node to each hexagon cell vertex
radius = 2.0 * apothem / sqrt(3.0)
# offsets from node x,y position
offsets = zeros((6, 2))
poly_verts = zeros((6, 2))
# Figure out whether the orientation is horizontal or vertical
if self.orientation[0] == "h": # horizontal
offsets[:, 0] = array([0.0, apothem, apothem, 0.0, -apothem, -apothem])
offsets[:, 1] = array(
[
radius,
radius / 2.0,
-radius / 2.0,
-radius,
-radius / 2.0,
radius / 2.0,
]
)
else: # vertical
offsets[:, 0] = array(
[
radius / 2.0,
radius,
radius / 2.0,
-radius / 2.0,
-radius,
-radius / 2.0,
]
)
offsets[:, 1] = array([apothem, 0.0, -apothem, -apothem, 0.0, apothem])
patches = []
for i in range(self.number_of_nodes):
poly_verts[:, 0] = self.node_x[i] + offsets[:, 0]
poly_verts[:, 1] = self.node_y[i] + offsets[:, 1]
p = Polygon(poly_verts, True)
patches.append(p)
self._hexplot_pc = PatchCollection(
patches, cmap=color_map, edgecolor="none", linewidth=0.0
)
self._hexplot_configured = True
def hexplot(self, data, data_label=None, color_map=None):
"""Create a plot of the grid elements.
Creates a plot of the grid and one node-data field, showing hexagonal
cells colored by values in the field.
Parameters
----------
data : str or node array (1d numpy array with number_of_nodes entries)
Data field to be colored.
data_label : str, optional
Label for colorbar.
color_map : matplotlib colormap object, None
Color map to apply (defaults to "jet")
See also
--------
plot.imshow_grid
Another Landlab function capable of producing hexplots, with a
fuller-featured set of options.
LLCATS: GINF
"""
import copy
import matplotlib.pyplot as plt
from numpy import amax, amin, array
try:
self._hexplot_configured
except AttributeError:
self._configure_hexplot(data, data_label, color_map)
else:
if self._hexplot_pc.cmap != color_map:
self._configure_hexplot(data, data_label, color_map)
# Handle *data*: if it's a numpy array, then we consider it the
# data to be plotted. If it's a string, we consider it the name of the
# node-field to plot, and we fetch it.
if type(data) is str:
data_label = data
data = self.at_node[data]
ax = plt.gca()
self._hexplot_pc.set_array(array(data))
copy_of_pc = copy.copy(self._hexplot_pc)
ax.add_collection(copy_of_pc)
plt.xlim([amin(self.node_x) - self.spacing, amax(self.node_x) + self.spacing])
plt.ylim([amin(self.node_y) - self.spacing, amax(self.node_y) + self.spacing])
return ax
def set_watershed_boundary_condition_outlet_id(
self, outlet_id, node_data, nodata_value=-9999.0
):
"""Set the boundary conditions for a watershed on a HexModelGrid.
All nodes with nodata_value are set to BC_NODE_IS_CLOSED.
All nodes with data values are set to BC_NODE_IS_CORE, with the
exception that the outlet node is set to a BC_NODE_IS_FIXED_VALUE.
Note that the outer ring of the HexModelGrid is set to BC_NODE_IS_CLOSED, even
if there are nodes that have values. The only exception to this would
be if the outlet node is on the boundary, which is acceptable.
Assumes that the id of the outlet is already known.
This assumes that the grid has a single watershed. If this is not
the case this will not work.
Parameters
----------
outlet_id : integer
id of the outlet node
node_data : field name or ndarray
At-node field name or at-node data values to use for identifying
watershed location.
nodata_value : float, optional
Value that indicates an invalid value.
Examples
--------
The example will use a *HexModelGrid* with node data values
as illustrated::
1. , 2. , 3. , 4. ,
0.5, 1.5, 2.5, 3.5, 4.5,
0. , 1. , 2. , 3. , 4. , 5.,
0.5, 1.5, 2.5, 3.5, 4.5,
1. , 2. , 3. , 4.
>>> from landlab import HexModelGrid
>>> hmg = HexModelGrid((5, 4))
>>> z = hmg.add_zeros("topographic__elevation", at="node")
>>> z += hmg.x_of_node + 1.0
>>> hmg.status_at_node
array([1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1,
1], dtype=uint8)
>>> outlet = hmg.set_watershed_boundary_condition_outlet_id(9, z, -9999.)
>>> hmg.status_at_node
array([4, 4, 4, 4, 4, 0, 0, 0, 4, 1, 0, 0, 0, 0, 4, 4, 0, 0, 0, 4, 4, 4, 4,
4], dtype=uint8)
LLCATS: BC
"""
# get node_data if a field name
node_data = self.return_array_or_field_values("node", node_data)
# make ring of no data nodes
self.status_at_node[self.boundary_nodes] = self.BC_NODE_IS_CLOSED
# set no data nodes to inactive boundaries
self.set_nodata_nodes_to_closed(node_data, nodata_value)
# set the boundary condition (fixed value) at the outlet_node
self.status_at_node[outlet_id] = self.BC_NODE_IS_FIXED_VALUE
def set_watershed_boundary_condition(
self, node_data, nodata_value=-9999.0, return_outlet_id=False
):
"""Finds the node adjacent to a boundary node with the smallest value.
This node is set as the outlet. The outlet node must have a data
value. Can return the outlet id as a one element numpy array if
return_outlet_id is set to True.
All nodes with nodata_value are set to `NodeStatus.CLOSED`
(grid.status_at_node == 4). All nodes with data values are set to
`NodeStatus.CORE` (grid.status_at_node == 0), with the exception that the
outlet node is set to a `NodeStatus.FIXED_VALUE` (grid.status_at_node == 1).
Note that the outer ring (perimeter) of the grid is set to
`NodeStatus.CLOSED`, even if there are nodes that have values. The only
exception to this would be if the outlet node is on the perimeter, which
is acceptable.
This routine assumes that all of the nodata_values are on the outside of
the data values. In other words, there are no islands of nodata_values
surrounded by nodes with data.
This also assumes that the grid has a single watershed (that is a single
outlet node).
Parameters
----------
node_data : field name or ndarray
At-node field name or at-node data values to use for identifying
watershed location.
nodata_value : float, optional
Value that indicates an invalid value.
return_outlet_id : boolean, optional
Indicates whether or not to return the id of the found outlet
Examples
--------
The example will use a HexModelGrid with node data values
as illustrated::
1. , 2. , 3. , 4. ,
0.5, 1.5, 2.5, 3.5, 4.5,
0. , 1. , 2. , 3. , 4. , 5.,
0.5, 1.5, 2.5, 3.5, 4.5,
1. , 2. , 3. , 4.
>>> from landlab import HexModelGrid
>>> hmg = HexModelGrid((5, 4))
>>> z = hmg.add_zeros("topographic__elevation", at="node")
>>> z += hmg.x_of_node + 1.0
>>> out_id = hmg.set_watershed_boundary_condition(z, -9999., True)
>>> out_id
array([9])
>>> hmg.status_at_node
array([4, 4, 4, 4, 4, 0, 0, 0, 4, 1, 0, 0, 0, 0, 4, 4, 0, 0, 0, 4, 4, 4, 4,
4], dtype=uint8)
LLCATS: BC
"""
# get node_data if a field name
node_data = self.return_array_or_field_values("node", node_data)
# make ring of no data nodes
self.status_at_node[self.boundary_nodes] = self.BC_NODE_IS_CLOSED
# set no data nodes to inactive boundaries
self.set_nodata_nodes_to_closed(node_data, nodata_value)
# locs is a list that contains locations where
# node data is not equal to the nodata value
locs = numpy.where(node_data != nodata_value)
if len(locs) < 1:
raise ValueError("All data values are no_data values")
# now find minimum of the data values
min_val = numpy.min(node_data[locs])
# now find where minimum values are
min_locs = numpy.where(node_data == min_val)[0]
# check all the locations with the minimum value to see if one
not_found = True
while not_found:
# now check the min locations to see if any are next to
# a boundary node
local_not_found = True
# next_to_boundary = []
# check all nodes rather than selecting the first node that meets
# the criteria
# for i in range(len(min_locs)):
# next_to_boundary.append(self.node_has_boundary_neighbor()[min_locs[i])]
next_to_boundary = self.node_has_boundary_neighbor()[(min_locs,)]
# if any of those nodes were adjacent to the boundary, check
# that there is only one. If only one, set as outlet loc, else,
# raise a value error
if numpy.any(next_to_boundary):
local_not_found = False
if sum(next_to_boundary) > 1:
potential_locs = min_locs[
numpy.where(numpy.asarray(next_to_boundary))[0]
]
raise ValueError(
(
"Grid has two potential outlet nodes."
"They have the following node IDs: \n"
+ str(potential_locs)
+ "\nUse the method set_watershed_boundary_condition_outlet_id "
"to explicitly select one of these "
"IDs as the outlet node."
)
)
else:
outlet_loc = min_locs[numpy.where(next_to_boundary)[0][0]]
# checked all of the min vals, (so done with inner while)
# and none of the min values were outlet candidates
if local_not_found:
# need to find the next largest minimum value
# first find the locations of all values greater
# than the old minimum
# not done with outer while
locs = numpy.where((node_data > min_val) & (node_data != nodata_value))
# now find new minimum of these values
min_val = numpy.min(node_data[locs])
min_locs = numpy.where(node_data == min_val)[0]
else:
# if locally found, it is also globally found
# so done with outer while
not_found = False
# set outlet boundary condition
self.status_at_node[outlet_loc] = self.BC_NODE_IS_FIXED_VALUE
if return_outlet_id:
return as_id_array(numpy.array([outlet_loc]))
| mit |
ravindrapanda/tensorflow | tensorflow/contrib/losses/python/metric_learning/metric_loss_ops.py | 24 | 40534 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements various metric learning losses."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.summary import summary
try:
# pylint: disable=g-import-not-at-top
from sklearn import metrics
HAS_SKLEARN = True
except ImportError:
HAS_SKLEARN = False
def pairwise_distance(feature, squared=False):
"""Computes the pairwise distance matrix with numerical stability.
output[i, j] = || feature[i, :] - feature[j, :] ||_2
Args:
feature: 2-D Tensor of size [number of data, feature dimension].
squared: Boolean, whether or not to square the pairwise distances.
Returns:
pairwise_distances: 2-D Tensor of size [number of data, number of data].
"""
pairwise_distances_squared = math_ops.add(
math_ops.reduce_sum(
math_ops.square(feature),
axis=[1],
keep_dims=True),
math_ops.reduce_sum(
math_ops.square(
array_ops.transpose(feature)),
axis=[0],
keep_dims=True)) - 2.0 * math_ops.matmul(
feature, array_ops.transpose(feature))
# Deal with numerical inaccuracies. Set small negatives to zero.
pairwise_distances_squared = math_ops.maximum(pairwise_distances_squared, 0.0)
# Get the mask where the zero distances are at.
error_mask = math_ops.less_equal(pairwise_distances_squared, 0.0)
# Optionally take the sqrt.
if squared:
pairwise_distances = pairwise_distances_squared
else:
pairwise_distances = math_ops.sqrt(
pairwise_distances_squared + math_ops.to_float(error_mask) * 1e-16)
# Undo conditionally adding 1e-16.
pairwise_distances = math_ops.multiply(
pairwise_distances, math_ops.to_float(math_ops.logical_not(error_mask)))
num_data = array_ops.shape(feature)[0]
# Explicitly set diagonals to zero.
mask_offdiagonals = array_ops.ones_like(pairwise_distances) - array_ops.diag(
array_ops.ones([num_data]))
pairwise_distances = math_ops.multiply(pairwise_distances, mask_offdiagonals)
return pairwise_distances
def contrastive_loss(labels, embeddings_anchor, embeddings_positive,
margin=1.0):
"""Computes the contrastive loss.
This loss encourages the embedding to be close to each other for
the samples of the same label and the embedding to be far apart at least
by the margin constant for the samples of different labels.
See: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
binary labels indicating positive vs negative pair.
embeddings_anchor: 2-D float `Tensor` of embedding vectors for the anchor
images. Embeddings should be l2 normalized.
embeddings_positive: 2-D float `Tensor` of embedding vectors for the
positive images. Embeddings should be l2 normalized.
margin: margin term in the loss definition.
Returns:
contrastive_loss: tf.float32 scalar.
"""
# Get per pair distances
distances = math_ops.sqrt(
math_ops.reduce_sum(
math_ops.square(embeddings_anchor - embeddings_positive), 1))
# Add contrastive loss for the siamese network.
# label here is {0,1} for neg, pos.
return math_ops.reduce_mean(
math_ops.to_float(labels) * math_ops.square(distances) +
(1. - math_ops.to_float(labels)) *
math_ops.square(math_ops.maximum(margin - distances, 0.)),
name='contrastive_loss')
def masked_maximum(data, mask, dim=1):
"""Computes the axis wise maximum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the maximum.
Returns:
masked_maximums: N-D `Tensor`.
The maximized dimension is of size 1 after the operation.
"""
axis_minimums = math_ops.reduce_min(data, dim, keep_dims=True)
masked_maximums = math_ops.reduce_max(
math_ops.multiply(
data - axis_minimums, mask), dim, keep_dims=True) + axis_minimums
return masked_maximums
def masked_minimum(data, mask, dim=1):
"""Computes the axis wise minimum over chosen elements.
Args:
data: 2-D float `Tensor` of size [n, m].
mask: 2-D Boolean `Tensor` of size [n, m].
dim: The dimension over which to compute the minimum.
Returns:
masked_minimums: N-D `Tensor`.
The minimized dimension is of size 1 after the operation.
"""
axis_maximums = math_ops.reduce_max(data, dim, keep_dims=True)
masked_minimums = math_ops.reduce_min(
math_ops.multiply(
data - axis_maximums, mask), dim, keep_dims=True) + axis_maximums
return masked_minimums
def triplet_semihard_loss(labels, embeddings, margin=1.0):
"""Computes the triplet loss with semi-hard negative mining.
The loss encourages the positive distances (between a pair of embeddings with
the same labels) to be smaller than the minimum negative distance among
which are at least greater than the positive distance plus the margin constant
(called semi-hard negative) in the mini-batch. If no such negative exists,
uses the largest negative distance instead.
See: https://arxiv.org/abs/1503.03832.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
triplet_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pdist_matrix = pairwise_distance(embeddings, squared=True)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
# Compute the mask.
pdist_matrix_tile = array_ops.tile(pdist_matrix, [batch_size, 1])
mask = math_ops.logical_and(
array_ops.tile(adjacency_not, [batch_size, 1]),
math_ops.greater(
pdist_matrix_tile, array_ops.reshape(
array_ops.transpose(pdist_matrix), [-1, 1])))
mask_final = array_ops.reshape(
math_ops.greater(
math_ops.reduce_sum(
math_ops.cast(
mask, dtype=dtypes.float32), 1, keep_dims=True),
0.0), [batch_size, batch_size])
mask_final = array_ops.transpose(mask_final)
adjacency_not = math_ops.cast(adjacency_not, dtype=dtypes.float32)
mask = math_ops.cast(mask, dtype=dtypes.float32)
# negatives_outside: smallest D_an where D_an > D_ap.
negatives_outside = array_ops.reshape(
masked_minimum(pdist_matrix_tile, mask), [batch_size, batch_size])
negatives_outside = array_ops.transpose(negatives_outside)
# negatives_inside: largest D_an.
negatives_inside = array_ops.tile(
masked_maximum(pdist_matrix, adjacency_not), [1, batch_size])
semi_hard_negatives = array_ops.where(
mask_final, negatives_outside, negatives_inside)
loss_mat = math_ops.add(margin, pdist_matrix - semi_hard_negatives)
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# In lifted-struct, the authors multiply 0.5 for upper triangular
# in semihard, they take all positive pairs except the diagonal.
num_positives = math_ops.reduce_sum(mask_positives)
triplet_loss = math_ops.truediv(
math_ops.reduce_sum(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0)),
num_positives,
name='triplet_semihard_loss')
return triplet_loss
# pylint: disable=line-too-long
def npairs_loss(labels, embeddings_anchor, embeddings_positive,
reg_lambda=0.002, print_losses=False):
"""Computes the npairs loss.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels.
See: http://www.nec-labs.com/uploads/images/Department-Images/MediaAnalytics/papers/nips16_npairmetriclearning.pdf
Args:
labels: 1-D tf.int32 `Tensor` of shape [batch_size/2].
embeddings_anchor: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D Tensor of shape [batch_size/2, embedding_dim] for the
embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
"""
# pylint: enable=line-too-long
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(
0.25 * reg_lambda, reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
labels_remapped = math_ops.to_float(
math_ops.equal(labels, array_ops.transpose(labels)))
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def _build_multilabel_adjacency(sparse_labels):
"""Builds multilabel adjacency matrix.
As of March 14th, 2017, there's no op for the dot product between
two sparse tensors in TF. However, there is `sparse_minimum` op which is
equivalent to an AND op between two sparse boolean tensors.
This computes the dot product between two sparse boolean inputs.
Args:
sparse_labels: List of 1-D boolean sparse tensors.
Returns:
adjacency_matrix: 2-D dense `Tensor`.
"""
num_pairs = len(sparse_labels)
adjacency_matrix = array_ops.zeros([num_pairs, num_pairs])
for i in range(num_pairs):
for j in range(num_pairs):
sparse_dot_product = math_ops.to_float(
sparse_ops.sparse_reduce_sum(sparse_ops.sparse_minimum(
sparse_labels[i], sparse_labels[j])))
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 0)
sparse_dot_product = array_ops.expand_dims(sparse_dot_product, 1)
one_hot_matrix = array_ops.pad(sparse_dot_product,
[[i, num_pairs-i-1],
[j, num_pairs-j-1]], 'CONSTANT')
adjacency_matrix += one_hot_matrix
return adjacency_matrix
def npairs_loss_multilabel(sparse_labels, embeddings_anchor,
embeddings_positive, reg_lambda=0.002,
print_losses=False):
r"""Computes the npairs loss with multilabel data.
Npairs loss expects paired data where a pair is composed of samples from the
same labels and each pairs in the minibatch have different labels. The loss
has two components. The first component is the L2 regularizer on the
embedding vectors. The second component is the sum of cross entropy loss
which takes each row of the pair-wise similarity matrix as logits and
the remapped one-hot labels as labels. Here, the similarity is defined by the
dot product between two embedding vectors. S_{i,j} = f(x_i)^T f(x_j)
To deal with multilabel inputs, we use the count of label intersection
i.e. L_{i,j} = | set_of_labels_for(i) \cap set_of_labels_for(j) |
Then we normalize each rows of the count based label matrix so that each row
sums to one.
Args:
sparse_labels: List of 1-D Boolean `SparseTensor` of dense_shape
[batch_size/2, num_classes] labels for the anchor-pos pairs.
embeddings_anchor: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the anchor images. Embeddings should not be
l2 normalized.
embeddings_positive: 2-D `Tensor` of shape [batch_size/2, embedding_dim] for
the embedding vectors for the positive images. Embeddings should not be
l2 normalized.
reg_lambda: Float. L2 regularization term on the embedding vectors.
print_losses: Boolean. Option to print the xent and l2loss.
Returns:
npairs_loss: tf.float32 scalar.
Raises:
TypeError: When the specified sparse_labels is not a `SparseTensor`.
"""
if False in [isinstance(
l, sparse_tensor.SparseTensor) for l in sparse_labels]:
raise TypeError(
'sparse_labels must be a list of SparseTensors, but got %s' % str(
sparse_labels))
with ops.name_scope('NpairsLossMultiLabel'):
# Add the regularizer on the embedding.
reg_anchor = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_anchor), 1))
reg_positive = math_ops.reduce_mean(
math_ops.reduce_sum(math_ops.square(embeddings_positive), 1))
l2loss = math_ops.multiply(0.25 * reg_lambda,
reg_anchor + reg_positive, name='l2loss')
# Get per pair similarities.
similarity_matrix = math_ops.matmul(
embeddings_anchor, embeddings_positive, transpose_a=False,
transpose_b=True)
# TODO(coreylynch): need to check the sparse values
# TODO(coreylynch): are composed only of 0's and 1's.
multilabel_adjacency_matrix = _build_multilabel_adjacency(sparse_labels)
labels_remapped = math_ops.to_float(multilabel_adjacency_matrix)
labels_remapped /= math_ops.reduce_sum(labels_remapped, 1, keep_dims=True)
# Add the softmax loss.
xent_loss = nn.softmax_cross_entropy_with_logits(
logits=similarity_matrix, labels=labels_remapped)
xent_loss = math_ops.reduce_mean(xent_loss, name='xentropy')
if print_losses:
xent_loss = logging_ops.Print(
xent_loss, ['cross entropy:', xent_loss, 'l2loss:', l2loss])
return l2loss + xent_loss
def lifted_struct_loss(labels, embeddings, margin=1.0):
"""Computes the lifted structured loss.
The loss encourages the positive distances (between a pair of embeddings
with the same labels) to be smaller than any negative distances (between a
pair of embeddings with different labels) in the mini-batch in a way
that is differentiable with respect to the embedding vectors.
See: https://arxiv.org/abs/1511.06452.
Args:
labels: 1-D tf.int32 `Tensor` with shape [batch_size] of
multiclass integer labels.
embeddings: 2-D float `Tensor` of embedding vectors. Embeddings should not
be l2 normalized.
margin: Float, margin term in the loss definition.
Returns:
lifted_loss: tf.float32 scalar.
"""
# Reshape [batch_size] label tensor to a [batch_size, 1] label tensor.
lshape = array_ops.shape(labels)
assert lshape.shape == 1
labels = array_ops.reshape(labels, [lshape[0], 1])
# Build pairwise squared distance matrix.
pairwise_distances = pairwise_distance(embeddings)
# Build pairwise binary adjacency matrix.
adjacency = math_ops.equal(labels, array_ops.transpose(labels))
# Invert so we can select negatives only.
adjacency_not = math_ops.logical_not(adjacency)
batch_size = array_ops.size(labels)
diff = margin - pairwise_distances
mask = math_ops.cast(adjacency_not, dtype=dtypes.float32)
# Safe maximum: Temporarily shift negative distances
# above zero before taking max.
# this is to take the max only among negatives.
row_minimums = math_ops.reduce_min(diff, 1, keep_dims=True)
row_negative_maximums = math_ops.reduce_max(
math_ops.multiply(
diff - row_minimums, mask), 1, keep_dims=True) + row_minimums
# Compute the loss.
# Keep track of matrix of maximums where M_ij = max(m_i, m_j)
# where m_i is the max of alpha - negative D_i's.
# This matches the Caffe loss layer implementation at:
# https://github.com/rksltnl/Caffe-Deep-Metric-Learning-CVPR16/blob/0efd7544a9846f58df923c8b992198ba5c355454/src/caffe/layers/lifted_struct_similarity_softmax_layer.cpp # pylint: disable=line-too-long
max_elements = math_ops.maximum(
row_negative_maximums, array_ops.transpose(row_negative_maximums))
diff_tiled = array_ops.tile(diff, [batch_size, 1])
mask_tiled = array_ops.tile(mask, [batch_size, 1])
max_elements_vect = array_ops.reshape(
array_ops.transpose(max_elements), [-1, 1])
loss_exp_left = array_ops.reshape(
math_ops.reduce_sum(math_ops.multiply(
math_ops.exp(
diff_tiled - max_elements_vect),
mask_tiled), 1, keep_dims=True), [batch_size, batch_size])
loss_mat = max_elements + math_ops.log(
loss_exp_left + array_ops.transpose(loss_exp_left))
# Add the positive distance.
loss_mat += pairwise_distances
mask_positives = math_ops.cast(
adjacency, dtype=dtypes.float32) - array_ops.diag(
array_ops.ones([batch_size]))
# *0.5 for upper triangular, and another *0.5 for 1/2 factor for loss^2.
num_positives = math_ops.reduce_sum(mask_positives) / 2.0
lifted_loss = math_ops.truediv(
0.25 * math_ops.reduce_sum(
math_ops.square(
math_ops.maximum(
math_ops.multiply(loss_mat, mask_positives), 0.0))),
num_positives,
name='liftedstruct_loss')
return lifted_loss
def update_1d_tensor(y, index, value):
"""Updates 1d tensor y so that y[index] = value.
Args:
y: 1-D Tensor.
index: index of y to modify.
value: new value to write at y[index].
Returns:
y_mod: 1-D Tensor. Tensor y after the update.
"""
value = array_ops.squeeze(value)
# modify the 1D tensor x at index with value.
# ex) chosen_ids = update_1D_tensor(chosen_ids, cluster_idx, best_medoid)
y_before = array_ops.slice(y, [0], [index])
y_after = array_ops.slice(y, [index + 1], [-1])
y_mod = array_ops.concat([y_before, [value], y_after], 0)
return y_mod
def get_cluster_assignment(pairwise_distances, centroid_ids):
"""Assign data points to the neareset centroids.
Tensorflow has numerical instability and doesn't always choose
the data point with theoretically zero distance as it's nearest neighbor.
Thus, for each centroid in centroid_ids, explicitly assign
the centroid itself as the nearest centroid.
This is done through the mask tensor and the constraint_vect tensor.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of centroid indices.
Returns:
y_fixed: 1-D tensor of cluster assignment.
"""
predictions = math_ops.argmin(
array_ops.gather(pairwise_distances, centroid_ids), dimension=0)
batch_size = array_ops.shape(pairwise_distances)[0]
# Deal with numerical instability
mask = math_ops.reduce_any(array_ops.one_hot(
centroid_ids, batch_size, True, False, axis=-1, dtype=dtypes.bool),
axis=0)
constraint_one_hot = math_ops.multiply(
array_ops.one_hot(centroid_ids,
batch_size,
array_ops.constant(1, dtype=dtypes.int64),
array_ops.constant(0, dtype=dtypes.int64),
axis=0,
dtype=dtypes.int64),
math_ops.to_int64(math_ops.range(array_ops.shape(centroid_ids)[0])))
constraint_vect = math_ops.reduce_sum(
array_ops.transpose(constraint_one_hot), axis=0)
y_fixed = array_ops.where(mask, constraint_vect, predictions)
return y_fixed
def compute_facility_energy(pairwise_distances, centroid_ids):
"""Compute the average travel distance to the assigned centroid.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
centroid_ids: 1-D Tensor of indices.
Returns:
facility_energy: dtypes.float32 scalar.
"""
return -1.0 * math_ops.reduce_sum(
math_ops.reduce_min(
array_ops.gather(pairwise_distances, centroid_ids), axis=0))
def compute_clustering_score(labels, predictions, margin_type):
"""Computes the clustering score via sklearn.metrics functions.
There are various ways to compute the clustering score. Intuitively,
we want to measure the agreement of two clustering assignments (labels vs
predictions) ignoring the permutations and output a score from zero to one.
(where the values close to one indicate significant agreement).
This code supports following scoring functions:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
See http://scikit-learn.org/stable/modules/classes.html#clustering-metrics
for the detailed descriptions.
Args:
labels: 1-D Tensor. ground truth cluster assignment.
predictions: 1-D Tensor. predicted cluster assignment.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
clustering_score: dtypes.float32 scalar.
The possible valid values are from zero to one.
Zero means the worst clustering and one means the perfect clustering.
Raises:
ValueError: margin_type is not recognized.
"""
margin_type_to_func = {
'nmi': _compute_nmi_score,
'ami': _compute_ami_score,
'ari': _compute_ari_score,
'vmeasure': _compute_vmeasure_score,
'const': _compute_zeroone_score
}
if margin_type not in margin_type_to_func:
raise ValueError('Unrecognized margin_type: %s' % margin_type)
clustering_score_fn = margin_type_to_func[margin_type]
return array_ops.squeeze(clustering_score_fn(labels, predictions))
def _compute_nmi_score(labels, predictions):
return math_ops.to_float(
script_ops.py_func(
metrics.normalized_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='nmi'))
def _compute_ami_score(labels, predictions):
ami_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_mutual_info_score, [labels, predictions],
[dtypes.float64],
name='ami'))
return math_ops.maximum(0.0, ami_score)
def _compute_ari_score(labels, predictions):
ari_score = math_ops.to_float(
script_ops.py_func(
metrics.adjusted_rand_score, [labels, predictions], [dtypes.float64],
name='ari'))
# ari score can go below 0
# http://scikit-learn.org/stable/modules/clustering.html#adjusted-rand-score
return math_ops.maximum(0.0, ari_score)
def _compute_vmeasure_score(labels, predictions):
vmeasure_score = math_ops.to_float(
script_ops.py_func(
metrics.v_measure_score, [labels, predictions], [dtypes.float64],
name='vmeasure'))
return math_ops.maximum(0.0, vmeasure_score)
def _compute_zeroone_score(labels, predictions):
zeroone_score = math_ops.to_float(
math_ops.equal(
math_ops.reduce_sum(
math_ops.to_int32(math_ops.equal(labels, predictions))),
array_ops.shape(labels)[0]))
return zeroone_score
def _find_loss_augmented_facility_idx(pairwise_distances, labels, chosen_ids,
candidate_ids, margin_multiplier,
margin_type):
"""Find the next centroid that maximizes the loss augmented inference.
This function is a subroutine called from compute_augmented_facility_locations
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of current centroid indices.
candidate_ids: 1-D Tensor of candidate indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
integer index.
"""
num_candidates = array_ops.shape(candidate_ids)[0]
pairwise_distances_chosen = array_ops.gather(pairwise_distances, chosen_ids)
pairwise_distances_candidate = array_ops.gather(
pairwise_distances, candidate_ids)
pairwise_distances_chosen_tile = array_ops.tile(
pairwise_distances_chosen, [1, num_candidates])
candidate_scores = -1.0 * math_ops.reduce_sum(
array_ops.reshape(
math_ops.reduce_min(
array_ops.concat([
pairwise_distances_chosen_tile,
array_ops.reshape(pairwise_distances_candidate, [1, -1])
], 0),
axis=0,
keep_dims=True), [num_candidates, -1]),
axis=1)
nmi_scores = array_ops.zeros([num_candidates])
iteration = array_ops.constant(0)
def func_cond(iteration, nmi_scores):
del nmi_scores # Unused in func_cond()
return iteration < num_candidates
def func_body(iteration, nmi_scores):
predictions = get_cluster_assignment(
pairwise_distances,
array_ops.concat([chosen_ids, [candidate_ids[iteration]]], 0))
nmi_score_i = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
# return 1 - NMI score as the structured loss.
# because NMI is higher the better [0,1].
return iteration + 1, nmi_scores + array_ops.concat(
[pad_before, [1.0 - nmi_score_i], pad_after], 0)
_, nmi_scores = control_flow_ops.while_loop(
func_cond, func_body, [iteration, nmi_scores])
candidate_scores = math_ops.add(
candidate_scores, margin_multiplier * nmi_scores)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, dimension=0))
return candidate_ids[argmax_index]
def compute_augmented_facility_locations(pairwise_distances, labels, all_ids,
margin_multiplier, margin_type):
"""Computes the centroid locations.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
all_ids: 1-D Tensor of all data indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: 1-D Tensor of chosen centroid indices.
"""
def func_cond_augmented(iteration, chosen_ids):
del chosen_ids # Unused argument in func_cond_augmented.
return iteration < num_classes
def func_body_augmented(iteration, chosen_ids):
# find a new facility location to add
# based on the clustering score and the NMI score
candidate_ids = array_ops.setdiff1d(all_ids, chosen_ids)[0]
new_chosen_idx = _find_loss_augmented_facility_idx(pairwise_distances,
labels, chosen_ids,
candidate_ids,
margin_multiplier,
margin_type)
chosen_ids = array_ops.concat([chosen_ids, [new_chosen_idx]], 0)
return iteration + 1, chosen_ids
num_classes = array_ops.size(array_ops.unique(labels)[0])
chosen_ids = array_ops.constant(0, dtype=dtypes.int32, shape=[0])
# num_classes get determined at run time based on the sampled batch.
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented,
func_body_augmented, [iteration, chosen_ids],
shape_invariants=[iteration.get_shape(), tensor_shape.TensorShape(
[None])])
return chosen_ids
def update_medoid_per_cluster(pairwise_distances, pairwise_distances_subset,
labels, chosen_ids, cluster_member_ids,
cluster_idx, margin_multiplier, margin_type):
"""Updates the cluster medoid per cluster.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
pairwise_distances_subset: 2-D Tensor of pairwise distances for one cluster.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
cluster_member_ids: 1-D Tensor of cluster member indices for one cluster.
cluster_idx: Index of this one cluster.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond(iteration, scores_margin):
del scores_margin # Unused variable scores_margin.
return iteration < num_candidates
def func_body(iteration, scores_margin):
# swap the current medoid with the candidate cluster member
candidate_medoid = math_ops.to_int32(cluster_member_ids[iteration])
tmp_chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, candidate_medoid)
predictions = get_cluster_assignment(pairwise_distances, tmp_chosen_ids)
metric_score = compute_clustering_score(labels, predictions, margin_type)
pad_before = array_ops.zeros([iteration])
pad_after = array_ops.zeros([num_candidates - 1 - iteration])
return iteration + 1, scores_margin + array_ops.concat(
[pad_before, [1.0 - metric_score], pad_after], 0)
# pairwise_distances_subset is of size [p, 1, 1, p],
# the intermediate dummy dimensions at
# [1, 2] makes this code work in the edge case where p=1.
# this happens if the cluster size is one.
scores_fac = -1.0 * math_ops.reduce_sum(
array_ops.squeeze(pairwise_distances_subset, [1, 2]), axis=0)
iteration = array_ops.constant(0)
num_candidates = array_ops.size(cluster_member_ids)
scores_margin = array_ops.zeros([num_candidates])
_, scores_margin = control_flow_ops.while_loop(func_cond, func_body,
[iteration, scores_margin])
candidate_scores = math_ops.add(scores_fac, margin_multiplier * scores_margin)
argmax_index = math_ops.to_int32(
math_ops.argmax(candidate_scores, dimension=0))
best_medoid = math_ops.to_int32(cluster_member_ids[argmax_index])
chosen_ids = update_1d_tensor(chosen_ids, cluster_idx, best_medoid)
return chosen_ids
def update_all_medoids(pairwise_distances, predictions, labels, chosen_ids,
margin_multiplier, margin_type):
"""Updates all cluster medoids a cluster at a time.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
predictions: 1-D Tensor of predicted cluster assignment.
labels: 1-D Tensor of ground truth cluster assignment.
chosen_ids: 1-D Tensor of cluster centroid indices.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
def func_cond_augmented_pam(iteration, chosen_ids):
del chosen_ids # Unused argument.
return iteration < num_classes
def func_body_augmented_pam(iteration, chosen_ids):
"""Call the update_medoid_per_cluster subroutine."""
mask = math_ops.equal(
math_ops.to_int64(predictions), math_ops.to_int64(iteration))
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
chosen_ids = update_medoid_per_cluster(pairwise_distances,
pairwise_distances_subset, labels,
chosen_ids, this_cluster_ids,
iteration, margin_multiplier,
margin_type)
return iteration + 1, chosen_ids
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
_, chosen_ids = control_flow_ops.while_loop(
func_cond_augmented_pam, func_body_augmented_pam, [iteration, chosen_ids])
return chosen_ids
def compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids,
pam_max_iter=5):
"""Refine the cluster centroids with PAM local search.
For fixed iterations, alternate between updating the cluster assignment
and updating cluster medoids.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
margin_multiplier: multiplication constant.
margin_type: Type of structured margin to use. Default is nmi.
chosen_ids: 1-D Tensor of initial estimate of cluster centroids.
pam_max_iter: Number of refinement iterations.
Returns:
chosen_ids: Updated 1-D Tensor of cluster centroid indices.
"""
for _ in range(pam_max_iter):
# update the cluster assignment given the chosen_ids (S_pred)
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# update the medoids per each cluster
chosen_ids = update_all_medoids(pairwise_distances, predictions, labels,
chosen_ids, margin_multiplier, margin_type)
return chosen_ids
def compute_gt_cluster_score(pairwise_distances, labels):
"""Compute ground truth facility location score.
Loop over each unique classes and compute average travel distances.
Args:
pairwise_distances: 2-D Tensor of pairwise distances.
labels: 1-D Tensor of ground truth cluster assignment.
Returns:
gt_cluster_score: dtypes.float32 score.
"""
unique_class_ids = array_ops.unique(labels)[0]
num_classes = array_ops.size(unique_class_ids)
iteration = array_ops.constant(0)
gt_cluster_score = array_ops.constant(0.0, dtype=dtypes.float32)
def func_cond(iteration, gt_cluster_score):
del gt_cluster_score # Unused argument.
return iteration < num_classes
def func_body(iteration, gt_cluster_score):
"""Per each cluster, compute the average travel distance."""
mask = math_ops.equal(labels, unique_class_ids[iteration])
this_cluster_ids = array_ops.where(mask)
pairwise_distances_subset = array_ops.transpose(
array_ops.gather(
array_ops.transpose(
array_ops.gather(pairwise_distances, this_cluster_ids)),
this_cluster_ids))
this_cluster_score = -1.0 * math_ops.reduce_min(
math_ops.reduce_sum(
pairwise_distances_subset, axis=0))
return iteration + 1, gt_cluster_score + this_cluster_score
_, gt_cluster_score = control_flow_ops.while_loop(
func_cond, func_body, [iteration, gt_cluster_score])
return gt_cluster_score
def cluster_loss(labels,
embeddings,
margin_multiplier,
enable_pam_finetuning=True,
margin_type='nmi',
print_losses=False):
"""Computes the clustering loss.
The following structured margins are supported:
nmi: normalized mutual information
ami: adjusted mutual information
ari: adjusted random index
vmeasure: v-measure
const: indicator checking whether the two clusterings are the same.
Args:
labels: 2-D Tensor of labels of shape [batch size, 1]
embeddings: 2-D Tensor of embeddings of shape
[batch size, embedding dimension]. Embeddings should be l2 normalized.
margin_multiplier: float32 scalar. multiplier on the structured margin term
See section 3.2 of paper for discussion.
enable_pam_finetuning: Boolean, Whether to run local pam refinement.
See section 3.4 of paper for discussion.
margin_type: Type of structured margin to use. See section 3.2 of
paper for discussion. Can be 'nmi', 'ami', 'ari', 'vmeasure', 'const'.
print_losses: Boolean. Option to print the loss.
Paper: https://arxiv.org/abs/1612.01213.
Returns:
clustering_loss: A float32 scalar `Tensor`.
Raises:
ImportError: If sklearn dependency is not installed.
"""
if not HAS_SKLEARN:
raise ImportError('Cluster loss depends on sklearn.')
pairwise_distances = pairwise_distance(embeddings)
labels = array_ops.squeeze(labels)
all_ids = math_ops.range(array_ops.shape(embeddings)[0])
# Compute the loss augmented inference and get the cluster centroids.
chosen_ids = compute_augmented_facility_locations(pairwise_distances, labels,
all_ids, margin_multiplier,
margin_type)
# Given the predicted centroids, compute the clustering score.
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Branch whether to use PAM finetuning.
if enable_pam_finetuning:
# Initialize with augmented facility solution.
chosen_ids = compute_augmented_facility_locations_pam(pairwise_distances,
labels,
margin_multiplier,
margin_type,
chosen_ids)
score_pred = compute_facility_energy(pairwise_distances, chosen_ids)
# Given the predicted centroids, compute the cluster assignments.
predictions = get_cluster_assignment(pairwise_distances, chosen_ids)
# Compute the clustering (i.e. NMI) score between the two assignments.
clustering_score_pred = compute_clustering_score(labels, predictions,
margin_type)
# Compute the clustering score from labels.
score_gt = compute_gt_cluster_score(pairwise_distances, labels)
# Compute the hinge loss.
clustering_loss = math_ops.maximum(
score_pred + margin_multiplier * (1.0 - clustering_score_pred) - score_gt,
0.0,
name='clustering_loss')
clustering_loss.set_shape([])
if print_losses:
clustering_loss = logging_ops.Print(
clustering_loss,
['clustering_loss: ', clustering_loss, array_ops.shape(
clustering_loss)])
# Clustering specific summary.
summary.scalar('losses/score_pred', score_pred)
summary.scalar('losses/' + margin_type, clustering_score_pred)
summary.scalar('losses/score_gt', score_gt)
return clustering_loss
| apache-2.0 |
trachelr/mne-python | examples/preprocessing/plot_find_eog_artifacts.py | 19 | 1219 | """
==================
Find EOG artifacts
==================
Locate peaks of EOG to spot blinks and general EOG artifacts.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 998
eog_events = mne.preprocessing.find_eog_events(raw, event_id)
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=True,
exclude='bads')
tmin, tmax = -0.2, 0.2
epochs = mne.Epochs(raw, eog_events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
print("Number of detected EOG artifacts : %d" % len(data))
###############################################################################
# Plot EOG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('EOG (muV)')
plt.show()
| bsd-3-clause |
kaushik94/pymc | pymc/examples/ARM12_6uranium.py | 2 | 1933 | import numpy as np
from pymc import *
import pandas as pd
data = pd.read_csv(get_data_file('pymc.examples', 'data/srrs2.dat'))
cty_data = pd.read_csv(get_data_file('pymc.examples', 'data/cty.dat'))
data = data[data.state == 'MN']
data['fips'] = data.stfips * 1000 + data.cntyfips
cty_data['fips'] = cty_data.stfips * 1000 + cty_data.ctfips
data['lradon'] = np.log(np.where(data.activity == 0, .1, data.activity))
data = data.merge(cty_data, 'inner', on='fips')
unique = data[['fips']].drop_duplicates()
unique['group'] = np.arange(len(unique))
unique.set_index('fips')
data = data.merge(unique, 'inner', on='fips')
obs_means = data.groupby('fips').lradon.mean()
n = len(obs_means)
lradon = np.array(data.lradon)
floor = np.array(data.floor)
group = np.array(data.group)
ufull = np.array(data.Uppm)
model = Model()
with model:
groupmean = Normal('groupmean', 0, 10. ** -2.)
# as recommended by "Prior distributions for variance parameters in
# hierarchical models"
groupsd = Uniform('groupsd', 0, 10.)
sd = Uniform('sd', 0, 10.)
floor_m = Normal('floor_m', 0, 5. ** -2.)
u_m = Normal('u_m', 0, 5. ** -2)
means = Normal('means', groupmean, groupsd ** -2., shape=n)
lr = Normal('lr', floor * floor_m + means[group] + ufull * u_m, sd ** -
2., observed=lradon)
def run(n=3000):
if n == "short":
n = 50
with model:
start = {'groupmean': obs_means.mean(),
'groupsd': obs_means.std(),
'sd': data.groupby('group').lradon.std().mean(),
'means': np.array(obs_means),
'u_m': np.array([.72]),
'floor_m': 0.,
}
start = find_MAP(start, model.vars[:-1])
H = model.fastd2logp()
h = np.diag(H(start))
step = HamiltonianMC(model.vars, h)
trace = sample(n, step, start)
if __name__ == '__main__':
run()
| apache-2.0 |
mbelmadani/motifgp | motifgp/fortin2013/nsga2.py | 1 | 5211 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import random
import json
import numpy
# import article module
import fortin2013
from math import sqrt
from deap import algorithms
from deap import base
#from deap import benchmarks
#from deap.benchmarks.tools import diversity, convergence
from deap import creator
from deap import tools
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", array.array, typecode='d', fitness=creator.FitnessMin)
toolbox = base.Toolbox()
# Problem definition
# Functions zdt1, zdt2, zdt3, zdt6 have bounds [0, 1]
BOUND_LOW, BOUND_UP = 0.0, 1.0
# Functions zdt4 has bounds x1 = [0, 1], xn = [-5, 5], with n = 2, ..., 10
# BOUND_LOW, BOUND_UP = [0.0] + [-5.0]*9, [1.0] + [5.0]*9
# Functions zdt1, zdt2, zdt3 have 30 dimensions, zdt4 and zdt6 have 10
NDIM = 30
def uniform(low, up, size=None):
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("evaluate", benchmarks.zdt1)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=20.0, indpb=1.0/NDIM)
toolbox.register("preselect", fortin2013.selTournamentFitnessDCD)
toolbox.register("select", fortin2013.selNSGA2)
def main(seed=None):
random.seed(seed)
NGEN = 250
MU = 100
CXPB = 0.9
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean, axis=0)
stats.register("std", numpy.std, axis=0)
stats.register("min", numpy.min, axis=0)
stats.register("max", numpy.max, axis=0)
logbook = tools.Logbook()
logbook.header = "gen", "evals", "std", "min", "avg", "max"
pop = toolbox.population(n=MU)
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in pop if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# This is just to assign the crowding distance to the individuals
# no actual selection is done
pop = toolbox.select(pop, len(pop))
record = stats.compile(pop)
logbook.record(gen=0, evals=len(invalid_ind), **record)
print(logbook.stream)
# Begin the generational process
for gen in range(1, NGEN):
# Vary the population
offspring = toolbox.preselect(pop, len(pop))
offspring = [toolbox.clone(ind) for ind in offspring]
for ind1, ind2 in zip(offspring[::2], offspring[1::2]):
if random.random() <= CXPB:
toolbox.mate(ind1, ind2)
toolbox.mutate(ind1)
toolbox.mutate(ind2)
del ind1.fitness.values, ind2.fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Select the next generation population
pop = toolbox.select(pop + offspring, MU)
record = stats.compile(pop)
logbook.record(gen=gen, evals=len(invalid_ind), **record)
print(logbook.stream)
return pop, logbook
if __name__ == "__main__":
with open("zdt1_front.json") as optimal_front_data:
optimal_front = json.load(optimal_front_data)
# Use 500 of the 1000 points in the json file
optimal_front = sorted(optimal_front[i] for i in range(0, len(optimal_front), 2))
pop, stats = main()
pop.sort(key=lambda x: x.fitness.values)
print(stats)
print("Convergence: ", convergence(pop, optimal_front))
print("Diversity: ", diversity(pop, optimal_front[0], optimal_front[-1]))
# import matplotlib.pyplot as plt
# import numpy
#
# front = numpy.array([ind.fitness.values for ind in pop])
# optimal_front = numpy.array(optimal_front)
# plt.scatter(optimal_front[:,0], optimal_front[:,1], c="r")
# plt.scatter(front[:,0], front[:,1], c="b")
# plt.axis("tight")
# plt.show()
| lgpl-3.0 |
strets123/rdkit | rdkit/Chem/Draw/SimilarityMaps.py | 3 | 14368 | # $Id$
#
# Copyright (c) 2013, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Sereina Riniker, Aug 2013
from rdkit import Chem
from rdkit import RDConfig
from rdkit import DataStructs
from rdkit.Chem import rdMolDescriptors as rdMD
from rdkit.Chem import rdmolops
from rdkit.Chem import Draw
from rdkit.six import iteritems
import numpy
import math
import copy
from matplotlib import cm
def GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity):
"""
Calculates the atomic weights for the probe molecule
based on a fingerprint function and a metric.
Parameters:
refMol -- the reference molecule
probeMol -- the probe molecule
fpFunction -- the fingerprint function
metric -- the similarity metric
Note:
If fpFunction needs additional parameters, use a lambda construct
"""
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
if hasattr(refMol, '_fpInfo'): delattr(refMol, '_fpInfo')
refFP = fpFunction(refMol, -1)
probeFP = fpFunction(probeMol, -1)
baseSimilarity = metric(refFP, probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newSimilarity = metric(refFP, newFP)
weights.append(baseSimilarity - newSimilarity)
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
if hasattr(refMol, '_fpInfo'): delattr(refMol, '_fpInfo')
return weights
def GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction):
"""
Calculates the atomic weights for the probe molecule based on
a fingerprint function and the prediction function of a ML model.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
"""
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
probeFP = fpFunction(probeMol, -1)
baseProba = predictionFunction(probeFP)
# loop over atoms
weights = []
for atomId in range(probeMol.GetNumAtoms()):
newFP = fpFunction(probeMol, atomId)
newProba = predictionFunction(newFP)
weights.append(baseProba - newProba)
if hasattr(probeMol, '_fpInfo'): delattr(probeMol, '_fpInfo')
return weights
def GetStandardizedWeights(weights):
"""
Normalizes the weights,
such that the absolute maximum weight equals 1.0.
Parameters:
weights -- the list with the atomic weights
"""
tmp = [math.fabs(w) for w in weights]
currentMax = max(tmp)
if currentMax > 0:
return [w/currentMax for w in weights], currentMax
else:
return weights, currentMax
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
"""
Generates the similarity map for a molecule given the atomic weights.
Parameters:
mol -- the molecule of interest
colorMap -- the matplotlib color map scheme
scale -- the scaling: scale < 0 -> the absolute maximum weight is used as maximum scale
scale = double -> this is the maximum scale
size -- the size of the figure
sigma -- the sigma for the Gaussians
coordScale -- scaling factor for the coordinates
step -- the step for calcAtomGaussian
colors -- color of the contour lines
contourLines -- if integer number N: N contour lines are drawn
if list(numbers): contour lines at these numbers are drawn
alpha -- the alpha blending value for the contour lines
kwargs -- additional arguments for drawing
"""
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
def GetSimilarityMapForFingerprint(refMol, probeMol, fpFunction, metric=DataStructs.DiceSimilarity, **kwargs):
"""
Generates the similarity map for a given reference and probe molecule,
fingerprint function and similarity metric.
Parameters:
refMol -- the reference molecule
probeMol -- the probe molecule
fpFunction -- the fingerprint function
metric -- the similarity metric.
kwargs -- additional arguments for drawing
"""
weights = GetAtomicWeightsForFingerprint(refMol, probeMol, fpFunction, metric)
weights, maxWeight = GetStandardizedWeights(weights)
fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)
return fig, maxWeight
def GetSimilarityMapForModel(probeMol, fpFunction, predictionFunction, **kwargs):
"""
Generates the similarity map for a given ML model and probe molecule,
and fingerprint function.
Parameters:
probeMol -- the probe molecule
fpFunction -- the fingerprint function
predictionFunction -- the prediction function of the ML model
kwargs -- additional arguments for drawing
"""
weights = GetAtomicWeightsForModel(probeMol, fpFunction, predictionFunction)
weights, maxWeight = GetStandardizedWeights(weights)
fig = GetSimilarityMapFromWeights(probeMol, weights, **kwargs)
return fig, maxWeight
apDict = {}
apDict['normal'] = lambda m, bits, minl, maxl, bpe, ia: rdMD.GetAtomPairFingerprint(m, minLength=minl, maxLength=maxl, ignoreAtoms=ia)
apDict['hashed'] = lambda m, bits, minl, maxl, bpe, ia: rdMD.GetHashedAtomPairFingerprint(m, nBits=bits, minLength=minl, maxLength=maxl, ignoreAtoms=ia)
apDict['bv'] = lambda m, bits, minl, maxl, bpe, ia: rdMD.GetHashedAtomPairFingerprintAsBitVect(m, nBits=bits, minLength=minl, maxLength=maxl, nBitsPerEntry=bpe, ignoreAtoms=ia)
# usage: lambda m,i: GetAPFingerprint(m, i, fpType, nBits, minLength, maxLength, nBitsPerEntry)
def GetAPFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, minLength=1, maxLength=30, nBitsPerEntry=4):
"""
Calculates the atom pairs fingerprint with the torsions of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the pairs for (if -1, no pair is removed)
fpType -- the type of AP fingerprint ('normal', 'hashed', 'bv')
nBits -- the size of the bit vector (only for fpType='bv')
minLength -- the minimum path length for an atom pair
maxLength -- the maxmimum path length for an atom pair
nBitsPerEntry -- the number of bits available for each pair
"""
if fpType not in ['normal', 'hashed', 'bv']: raise ValueError("Unknown Atom pairs fingerprint type")
if atomId < 0:
return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, 0)
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
return apDict[fpType](mol, nBits, minLength, maxLength, nBitsPerEntry, [atomId])
ttDict = {}
ttDict['normal'] = lambda m, bits, ts, bpe, ia: rdMD.GetTopologicalTorsionFingerprint(m, targetSize=ts, ignoreAtoms=ia)
ttDict['hashed'] = lambda m, bits, ts, bpe, ia: rdMD.GetHashedTopologicalTorsionFingerprint(m, nBits=bits, targetSize=ts, ignoreAtoms=ia)
ttDict['bv'] = lambda m, bits, ts, bpe, ia: rdMD.GetHashedTopologicalTorsionFingerprintAsBitVect(m, nBits=bits, targetSize=ts, nBitsPerEntry=bpe, ignoreAtoms=ia)
# usage: lambda m,i: GetTTFingerprint(m, i, fpType, nBits, targetSize)
def GetTTFingerprint(mol, atomId=-1, fpType='normal', nBits=2048, targetSize=4, nBitsPerEntry=4):
"""
Calculates the topological torsion fingerprint with the pairs of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the torsions for (if -1, no torsion is removed)
fpType -- the type of TT fingerprint ('normal', 'hashed', 'bv')
nBits -- the size of the bit vector (only for fpType='bv')
minLength -- the minimum path length for an atom pair
maxLength -- the maxmimum path length for an atom pair
nBitsPerEntry -- the number of bits available for each torsion
"""
if fpType not in ['normal', 'hashed', 'bv']: raise ValueError("Unknown Topological torsion fingerprint type")
if atomId < 0:
return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, 0)
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
return ttDict[fpType](mol, nBits, targetSize, nBitsPerEntry, [atomId])
# usage: lambda m,i: GetMorganFingerprint(m, i, radius, fpType, nBits, useFeatures)
def GetMorganFingerprint(mol, atomId=-1, radius=2, fpType='bv', nBits=2048, useFeatures=False):
"""
Calculates the Morgan fingerprint with the environments of atomId removed.
Parameters:
mol -- the molecule of interest
radius -- the maximum radius
fpType -- the type of Morgan fingerprint: 'count' or 'bv'
atomId -- the atom to remove the environments for (if -1, no environments is removed)
nBits -- the size of the bit vector (only for fpType = 'bv')
useFeatures -- if false: ConnectivityMorgan, if true: FeatureMorgan
"""
if fpType not in ['bv', 'count']: raise ValueError("Unknown Morgan fingerprint type")
if not hasattr(mol, '_fpInfo'):
info = {}
# get the fingerprint
if fpType == 'bv': molFp = rdMD.GetMorganFingerprintAsBitVect(mol, radius, nBits=nBits, useFeatures=useFeatures, bitInfo=info)
else: molFp = rdMD.GetMorganFingerprint(mol, radius, useFeatures=useFeatures, bitInfo=info)
# construct the bit map
if fpType == 'bv': bitmap = [DataStructs.ExplicitBitVect(nBits) for x in range(mol.GetNumAtoms())]
else: bitmap = [[] for x in range(mol.GetNumAtoms())]
for bit, es in iteritems(info):
for at1, rad in es:
if rad == 0: # for radius 0
if fpType == 'bv': bitmap[at1][bit] = 1
else: bitmap[at1].append(bit)
else: # for radii > 0
env = Chem.FindAtomEnvironmentOfRadiusN(mol, rad, at1)
amap = {}
submol = Chem.PathToSubmol(mol, env, atomMap=amap)
for at2 in amap.keys():
if fpType == 'bv': bitmap[at2][bit] = 1
else: bitmap[at2].append(bit)
mol._fpInfo = (molFp, bitmap)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set")
if fpType == 'bv':
molFp = mol._fpInfo[0] ^ mol._fpInfo[1][atomId] # xor
else: # count
molFp = copy.deepcopy(mol._fpInfo[0])
# delete the bits with atomId
for bit in mol._fpInfo[1][atomId]:
molFp[bit] -= 1
return molFp
# usage: lambda m,i: GetRDKFingerprint(m, i, fpType, nBits, minPath, maxPath, nBitsPerHash)
def GetRDKFingerprint(mol, atomId=-1, fpType='bv', nBits=2048, minPath=1, maxPath=5, nBitsPerHash=2):
"""
Calculates the RDKit fingerprint with the paths of atomId removed.
Parameters:
mol -- the molecule of interest
atomId -- the atom to remove the paths for (if -1, no path is removed)
fpType -- the type of RDKit fingerprint: 'bv'
nBits -- the size of the bit vector
minPath -- minimum path length
maxPath -- maximum path length
nBitsPerHash -- number of to set per path
"""
if fpType not in ['bv', '']: raise ValueError("Unknown RDKit fingerprint type")
fpType = 'bv'
if not hasattr(mol, '_fpInfo'):
info = [] # list with bits for each atom
# get the fingerprint
molFp = Chem.RDKFingerprint(mol, fpSize=nBits, minPath=minPath, maxPath=maxPath, nBitsPerHash=nBitsPerHash, atomBits=info)
mol._fpInfo = (molFp, info)
if atomId < 0:
return mol._fpInfo[0]
else: # remove the bits of atomId
if atomId >= mol.GetNumAtoms(): raise ValueError("atom index greater than number of atoms")
if len(mol._fpInfo) != 2: raise ValueError("_fpInfo not set")
molFp = copy.deepcopy(mol._fpInfo[0])
molFp.UnSetBitsFromList(mol._fpInfo[1][atomId])
return molFp
| bsd-3-clause |
chrsrds/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 1 | 27908 | import warnings
from distutils.version import LooseVersion
import numpy as np
import pytest
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC
# TODO: use another dataset that has multiple drops
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
n_samples = y.size
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from io import StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', verbose=10)
sys.stdout = old_stdout
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
_, _, coef_path_ = linear_model.lars_path(
X, y, Gram=G, method='lar')
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert ocur == i + 1
else:
# no more than max_pred variables can go into the active set
assert ocur == X.shape[1]
def _assert_same_lars_path_result(output1, output2):
assert len(output1) == len(output2)
for o1, o2 in zip(output1, output2):
assert_allclose(o1, o2)
@pytest.mark.parametrize('method', ['lar', 'lasso'])
@pytest.mark.parametrize('return_path', [True, False])
def test_lars_path_gram_equivalent(method, return_path):
_assert_same_lars_path_result(
linear_model.lars_path_gram(
Xy=Xy, Gram=G, n_samples=n_samples, method=method,
return_path=return_path),
linear_model.lars_path(
X, y, Gram=G, method=method,
return_path=return_path))
def test_x_none_gram_none_raises_value_error():
# Test that lars_path with no X and Gram raises exception
Xy = np.dot(X.T, y)
assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None,
Xy=Xy)
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy,
method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
@pytest.mark.filterwarnings('ignore: `rcond` parameter will change')
# numpy deprecation
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * X # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
# Avoid FutureWarning about default value change when numpy >= 1.14
rcond = None if LooseVersion(np.__version__) >= '1.14' else -1
coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
@pytest.mark.filterwarnings('ignore:`rcond` parameter will change')
# numpy deprecation
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
_, _, coef_path_ = linear_model.lars_path(X, y, method='lasso')
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
rng = np.random.RandomState(0)
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert not np.isnan(coef_path_).any()
residual = np.dot(X, coef_path_[:, -1]) - y
assert (residual ** 2).sum() < 1. # just make sure it's bounded
n_samples = 10
X = rng.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar')
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lar', Gram=G)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lar', Gram=G, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, _, coef_path_ = linear_model.lars_path(
X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9)
alpha_, _, coef = linear_model.lars_path(
X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert alpha_ == alphas_[-1]
@pytest.mark.parametrize(
'classifier',
[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC])
def test_lars_precompute(classifier):
# Check for different values of precompute
G = np.dot(X.T, X)
clf = classifier(precompute=G)
output_1 = ignore_warnings(clf.fit)(X, y).coef_
for precompute in [True, False, 'auto', None]:
clf = classifier(precompute=precompute)
output_2 = clf.fit(X, y).coef_
assert_array_almost_equal(output_1, output_2, decimal=8)
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
_, _, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in (
[[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]]
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert obj_lars < obj_cd * (1. + 1e-8)
def test_lasso_lars_vs_lasso_cd():
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_lasso_cd_early_stopping():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert np.all(np.diff(lasso.alphas_) < 0)
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert lars_obj < cd_obj * (1. + 1e-8)
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert np.all(np.isfinite(clf.coef_))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert len(lars.coef_.nonzero()[0]) == 6
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert len(lars.alphas_) == 7
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
Y = np.vstack([y, y ** 2]).T
n_targets = Y.shape[1]
estimators = [
linear_model.LassoLars(),
linear_model.Lars(),
# regression test for gh-1615
linear_model.LassoLars(fit_intercept=False),
linear_model.Lars(fit_intercept=False),
]
for estimator in estimators:
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
assert not hasattr(lars_cv, 'n_nonzero_coefs')
def test_lars_cv_max_iter(recwarn):
warnings.simplefilter('always')
with np.errstate(divide='raise', invalid='raise'):
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
x = rng.randn(len(y))
X = diabetes.data
X = np.c_[X, x, x] # add correlated features
lars_cv = linear_model.LassoLarsCV(max_iter=5, cv=5)
lars_cv.fit(X, y)
# Check that there is no warning in general and no ConvergenceWarning
# in particular.
# Materialize the string representation of the warning to get a more
# informative error message in case of AssertionError.
recorded_warnings = [str(w) for w in recwarn]
assert recorded_warnings == []
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert lars_bic.alpha_ > lars_aic.alpha_
assert len(nonzero_bic) < len(nonzero_aic)
assert np.max(nonzero_bic) < diabetes.data.shape[1]
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
err_msg = "Positive constraint not supported for 'lar' coding method."
with pytest.raises(ValueError, match=err_msg):
linear_model.lars_path(diabetes['data'], diabetes['target'],
method='lar', positive=True)
method = 'lasso'
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=False)
assert coefs.min() < 0
_, _, coefs = \
linear_model.lars_path(X, y, return_path=True, method=method,
positive=True)
assert coefs.min() >= 0
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'LassoLars': {'alpha': 0.1},
'LassoLarsCV': {},
'LassoLarsIC': {}}
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(X, y)
assert estimator.coef_.min() < 0
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(X, y)
assert min(estimator.coef_) >= 0
def test_lasso_lars_vs_lasso_cd_positive():
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert err < 1e-3
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert error < 0.01
def test_lasso_lars_vs_R_implementation():
# Test that sklearn LassoLars implementation agrees with the LassoLars
# implementation available in R (lars library) under the following
# scenarios:
# 1) fit_intercept=False and normalize=False
# 2) fit_intercept=True and normalize=True
# Let's generate the data used in the bug report 7778
y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,
-19.42109366])
x = np.array([[0.47299829, 0, 0, 0, 0],
[0.08239882, 0.85784863, 0, 0, 0],
[0.30114139, -0.07501577, 0.80895216, 0, 0],
[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],
[-0.69363927, 0.06754067, 0.18064514, -0.0803561,
0.40427291]])
X = x.T
###########################################################################
# Scenario 1: Let's compare R vs sklearn when fit_intercept=False and
# normalize=False
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE,
# trace=TRUE, normalize=FALSE)
# r = t(model_lasso_lars$beta)
#
r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,
-83.777653739190711, -83.784156932888934,
-84.033390591756657],
[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,
0.025219751009936],
[0, -3.577397088285891, -4.702795355871871,
-7.016748621359461, -7.614898471899412, -0.336938391359179,
0, 0, 0.001213370600853, 0.048162321585148],
[0, 0, 0, 2.231558436628169, 2.723267514525966,
2.811549786389614, 2.813766976061531, 2.817462468949557,
2.817368178703816, 2.816221090636795],
[0, 0, -1.218422599914637, -3.457726183014808,
-4.021304522060710, -45.827461592423745,
-47.776608869312305,
-47.911561610746404, -47.914845922736234,
-48.039562334265717]])
model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,
normalize=False)
model_lasso_lars.fit(X, y)
skl_betas = model_lasso_lars.coef_path_
assert_array_almost_equal(r, skl_betas, decimal=12)
###########################################################################
###########################################################################
# Scenario 2: Let's compare R vs sklearn when fit_intercept=True and
# normalize=True
#
# Note: When normalize is equal to True, R returns the coefficients in
# their original units, that is, they are rescaled back, whereas sklearn
# does not do that, therefore, we need to do this step before comparing
# their results.
###########################################################################
#
# The R result was obtained using the following code:
#
# library(lars)
# model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE,
# trace=TRUE, normalize=TRUE)
# r2 = t(model_lasso_lars2$beta)
r2 = np.array([[0, 0, 0, 0, 0],
[0, 0, 0, 8.371887668009453, 19.463768371044026],
[0, 0, 0, 0, 9.901611055290553],
[0, 7.495923132833733, 9.245133544334507,
17.389369207545062, 26.971656815643499],
[0, 0, -1.569380717440311, -5.924804108067312,
-7.996385265061972]])
model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,
normalize=True)
model_lasso_lars2.fit(X, y)
skl_betas2 = model_lasso_lars2.coef_path_
# Let's rescale back the coefficients returned by sklearn before comparing
# against the R result (read the note above)
temp = X - np.mean(X, axis=0)
normx = np.sqrt(np.sum(temp ** 2, axis=0))
skl_betas2 /= normx[:, np.newaxis]
assert_array_almost_equal(r2, skl_betas2, decimal=12)
###########################################################################
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_copyX_behaviour(copy_X):
"""
Test that user input regarding copy_X is not being overridden (it was until
at least version 0.21)
"""
lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y)
assert copy_X == np.array_equal(X, X_copy)
@pytest.mark.parametrize('copy_X', [True, False])
def test_lasso_lars_fit_copyX_behaviour(copy_X):
"""
Test that user input to .fit for copy_X overrides default __init__ value
"""
lasso_lars = LassoLarsIC(precompute=False)
rng = np.random.RandomState(0)
X = rng.normal(0, 1, (100, 5))
X_copy = X.copy()
y = X[:, 2]
lasso_lars.fit(X, y, copy_X=copy_X)
assert copy_X == np.array_equal(X, X_copy)
| bsd-3-clause |
mattilyra/scikit-learn | sklearn/neural_network/tests/test_rbm.py | 225 | 6278 | import sys
import re
import numpy as np
from scipy.sparse import csc_matrix, csr_matrix, lil_matrix
from sklearn.utils.testing import (assert_almost_equal, assert_array_equal,
assert_true)
from sklearn.datasets import load_digits
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.neural_network import BernoulliRBM
from sklearn.utils.validation import assert_all_finite
np.seterr(all='warn')
Xdigits = load_digits().data
Xdigits -= Xdigits.min()
Xdigits /= Xdigits.max()
def test_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, n_iter=7, random_state=9)
rbm.fit(X)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
# in-place tricks shouldn't have modified X
assert_array_equal(X, Xdigits)
def test_partial_fit():
X = Xdigits.copy()
rbm = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=20, random_state=9)
n_samples = X.shape[0]
n_batches = int(np.ceil(float(n_samples) / rbm.batch_size))
batch_slices = np.array_split(X, n_batches)
for i in range(7):
for batch in batch_slices:
rbm.partial_fit(batch)
assert_almost_equal(rbm.score_samples(X).mean(), -21., decimal=0)
assert_array_equal(X, Xdigits)
def test_transform():
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=16, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
Xt1 = rbm1.transform(X)
Xt2 = rbm1._mean_hiddens(X)
assert_array_equal(Xt1, Xt2)
def test_small_sparse():
# BernoulliRBM should work on small sparse matrices.
X = csr_matrix(Xdigits[:4])
BernoulliRBM().fit(X) # no exception
def test_small_sparse_partial_fit():
for sparse in [csc_matrix, csr_matrix]:
X_sparse = sparse(Xdigits[:100])
X = Xdigits[:100].copy()
rbm1 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm2 = BernoulliRBM(n_components=64, learning_rate=0.1,
batch_size=10, random_state=9)
rbm1.partial_fit(X_sparse)
rbm2.partial_fit(X)
assert_almost_equal(rbm1.score_samples(X).mean(),
rbm2.score_samples(X).mean(),
decimal=0)
def test_sample_hiddens():
rng = np.random.RandomState(0)
X = Xdigits[:100]
rbm1 = BernoulliRBM(n_components=2, batch_size=5,
n_iter=5, random_state=42)
rbm1.fit(X)
h = rbm1._mean_hiddens(X[0])
hs = np.mean([rbm1._sample_hiddens(X[0], rng) for i in range(100)], 0)
assert_almost_equal(h, hs, decimal=1)
def test_fit_gibbs():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]]
# from the same input
rng = np.random.RandomState(42)
X = np.array([[0.], [1.]])
rbm1 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
# you need that much iters
rbm1.fit(X)
assert_almost_equal(rbm1.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm1.gibbs(X), X)
return rbm1
def test_fit_gibbs_sparse():
# Gibbs on the RBM hidden layer should be able to recreate [[0], [1]] from
# the same input even when the input is sparse, and test against non-sparse
rbm1 = test_fit_gibbs()
rng = np.random.RandomState(42)
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm2 = BernoulliRBM(n_components=2, batch_size=2,
n_iter=42, random_state=rng)
rbm2.fit(X)
assert_almost_equal(rbm2.components_,
np.array([[0.02649814], [0.02009084]]), decimal=4)
assert_almost_equal(rbm2.gibbs(X), X.toarray())
assert_almost_equal(rbm1.components_, rbm2.components_)
def test_gibbs_smoke():
# Check if we don't get NaNs sampling the full digits dataset.
# Also check that sampling again will yield different results.
X = Xdigits
rbm1 = BernoulliRBM(n_components=42, batch_size=40,
n_iter=20, random_state=42)
rbm1.fit(X)
X_sampled = rbm1.gibbs(X)
assert_all_finite(X_sampled)
X_sampled2 = rbm1.gibbs(X)
assert_true(np.all((X_sampled != X_sampled2).max(axis=1)))
def test_score_samples():
# Test score_samples (pseudo-likelihood) method.
# Assert that pseudo-likelihood is computed without clipping.
# See Fabian's blog, http://bit.ly/1iYefRk
rng = np.random.RandomState(42)
X = np.vstack([np.zeros(1000), np.ones(1000)])
rbm1 = BernoulliRBM(n_components=10, batch_size=2,
n_iter=10, random_state=rng)
rbm1.fit(X)
assert_true((rbm1.score_samples(X) < -300).all())
# Sparse vs. dense should not affect the output. Also test sparse input
# validation.
rbm1.random_state = 42
d_score = rbm1.score_samples(X)
rbm1.random_state = 42
s_score = rbm1.score_samples(lil_matrix(X))
assert_almost_equal(d_score, s_score)
# Test numerical stability (#2785): would previously generate infinities
# and crash with an exception.
with np.errstate(under='ignore'):
rbm1.score_samples([np.arange(1000) * 100])
def test_rbm_verbose():
rbm = BernoulliRBM(n_iter=2, verbose=10)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
rbm.fit(Xdigits)
finally:
sys.stdout = old_stdout
def test_sparse_and_verbose():
# Make sure RBM works with sparse input when verbose=True
old_stdout = sys.stdout
sys.stdout = StringIO()
from scipy.sparse import csc_matrix
X = csc_matrix([[0.], [1.]])
rbm = BernoulliRBM(n_components=2, batch_size=2, n_iter=1,
random_state=42, verbose=True)
try:
rbm.fit(X)
s = sys.stdout.getvalue()
# make sure output is sound
assert_true(re.match(r"\[BernoulliRBM\] Iteration 1,"
r" pseudo-likelihood = -?(\d)+(\.\d+)?,"
r" time = (\d|\.)+s",
s))
finally:
sys.stdout = old_stdout
| bsd-3-clause |
google/active-learning | sampling_methods/kcenter_greedy.py | 1 | 4339 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Returns points that minimizes the maximum distance of any point to a center.
Implements the k-Center-Greedy method in
Ozan Sener and Silvio Savarese. A Geometric Approach to Active Learning for
Convolutional Neural Networks. https://arxiv.org/abs/1708.00489 2017
Distance metric defaults to l2 distance. Features used to calculate distance
are either raw features or if a model has transform method then uses the output
of model.transform(X).
Can be extended to a robust k centers algorithm that ignores a certain number of
outlier datapoints. Resulting centers are solution to multiple integer program.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn.metrics import pairwise_distances
from sampling_methods.sampling_def import SamplingMethod
class kCenterGreedy(SamplingMethod):
def __init__(self, X, y, seed, metric='euclidean'):
self.X = X
self.y = y
self.flat_X = self.flatten_X()
self.name = 'kcenter'
self.features = self.flat_X
self.metric = metric
self.min_distances = None
self.n_obs = self.X.shape[0]
self.already_selected = []
def update_distances(self, cluster_centers, only_new=True, reset_dist=False):
"""Update min distances given cluster centers.
Args:
cluster_centers: indices of cluster centers
only_new: only calculate distance for newly selected points and update
min_distances.
rest_dist: whether to reset min_distances.
"""
if reset_dist:
self.min_distances = None
if only_new:
cluster_centers = [d for d in cluster_centers
if d not in self.already_selected]
if cluster_centers:
# Update min_distances for all examples given new cluster center.
x = self.features[cluster_centers]
dist = pairwise_distances(self.features, x, metric=self.metric)
if self.min_distances is None:
self.min_distances = np.min(dist, axis=1).reshape(-1,1)
else:
self.min_distances = np.minimum(self.min_distances, dist)
def select_batch_(self, model, already_selected, N, **kwargs):
"""
Diversity promoting active learning method that greedily forms a batch
to minimize the maximum distance to a cluster center among all unlabeled
datapoints.
Args:
model: model with scikit-like API with decision_function implemented
already_selected: index of datapoints already selected
N: batch size
Returns:
indices of points selected to minimize distance to cluster centers
"""
try:
# Assumes that the transform function takes in original data and not
# flattened data.
print('Getting transformed features...')
self.features = model.transform(self.X)
print('Calculating distances...')
self.update_distances(already_selected, only_new=False, reset_dist=True)
except:
print('Using flat_X as features.')
self.update_distances(already_selected, only_new=True, reset_dist=False)
new_batch = []
for _ in range(N):
if self.already_selected is None:
# Initialize centers with a randomly selected datapoint
ind = np.random.choice(np.arange(self.n_obs))
else:
ind = np.argmax(self.min_distances)
# New examples should not be in already selected since those points
# should have min_distance of zero to a cluster center.
assert ind not in already_selected
self.update_distances([ind], only_new=True, reset_dist=False)
new_batch.append(ind)
print('Maximum distance from cluster centers is %0.2f'
% max(self.min_distances))
self.already_selected = already_selected
return new_batch
| apache-2.0 |
joegomes/deepchem | deepchem/data/data_loader.py | 1 | 8305 | """
Process an input dataset into a format suitable for machine learning.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import gzip
import pandas as pd
import numpy as np
import csv
import numbers
import tempfile
from rdkit.Chem import rdmolfiles
from rdkit.Chem import rdmolops
from rdkit import Chem
import time
import sys
from deepchem.utils.save import log
from deepchem.utils.save import load_csv_files
from deepchem.utils.save import load_sdf_files
from deepchem.feat import UserDefinedFeaturizer
from deepchem.data import DiskDataset
def convert_df_to_numpy(df, tasks, verbose=False):
"""Transforms a dataframe containing deepchem input into numpy arrays"""
n_samples = df.shape[0]
n_tasks = len(tasks)
time1 = time.time()
y = np.hstack(
[np.reshape(np.array(df[task].values), (n_samples, 1)) for task in tasks])
time2 = time.time()
w = np.ones((n_samples, n_tasks))
missing = np.zeros_like(y).astype(int)
feature_shape = None
for ind in range(n_samples):
for task in range(n_tasks):
if y[ind, task] == "":
missing[ind, task] = 1
# ids = df[id_field].values
# Set missing data to have weight zero
for ind in range(n_samples):
for task in range(n_tasks):
if missing[ind, task]:
y[ind, task] = 0.
w[ind, task] = 0.
return y.astype(float), w.astype(float)
def featurize_smiles_df(df, featurizer, field, log_every_N=1000, verbose=True):
"""Featurize individual compounds in dataframe.
Given a featurizer that operates on individual chemical compounds
or macromolecules, compute & add features for that compound to the
features dataframe
"""
sample_elems = df[field].tolist()
features = []
for ind, elem in enumerate(sample_elems):
mol = Chem.MolFromSmiles(elem)
# TODO (ytz) this is a bandage solution to reorder the atoms so
# that they're always in the same canonical order. Presumably this
# should be correctly implemented in the future for graph mols.
if mol:
new_order = rdmolfiles.CanonicalRankAtoms(mol)
mol = rdmolops.RenumberAtoms(mol, new_order)
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
def get_user_specified_features(df, featurizer, verbose=True):
"""Extract and merge user specified features.
Merge features included in dataset provided by user
into final features dataframe
Three types of featurization here:
1) Molecule featurization
-) Smiles string featurization
-) Rdkit MOL featurization
2) Complex featurization
-) PDB files for interacting molecules.
3) User specified featurizations.
"""
time1 = time.time()
df[featurizer.feature_fields] = df[featurizer.feature_fields].apply(
pd.to_numeric)
X_shard = df.as_matrix(columns=featurizer.feature_fields)
time2 = time.time()
log("TIMING: user specified processing took %0.3f s" % (time2 - time1),
verbose)
return X_shard
def featurize_mol_df(df, featurizer, field, verbose=True, log_every_N=1000):
"""Featurize individual compounds in dataframe.
Featurizes .sdf files, so the 3-D structure should be preserved
so we use the rdkit "mol" object created from .sdf instead of smiles
string. Some featurizers such as CoulombMatrix also require a 3-D
structure. Featurizing from .sdf is currently the only way to
perform CM feautization.
"""
sample_elems = df[field].tolist()
features = []
for ind, mol in enumerate(sample_elems):
if ind % log_every_N == 0:
log("Featurizing sample %d" % ind, verbose)
features.append(featurizer.featurize([mol]))
valid_inds = np.array(
[1 if elt.size > 0 else 0 for elt in features], dtype=bool)
features = [elt for (is_valid, elt) in zip(valid_inds, features) if is_valid]
return np.squeeze(np.array(features)), valid_inds
class DataLoader(object):
"""
Handles loading/featurizing of chemical samples (datapoints).
Currently knows how to load csv-files/pandas-dataframes/SDF-files. Writes a
dataframe object to disk as output.
"""
def __init__(self,
tasks,
smiles_field=None,
id_field=None,
mol_field=None,
featurizer=None,
verbose=True,
log_every_n=1000):
"""Extracts data from input as Pandas data frame"""
if not isinstance(tasks, list):
raise ValueError("tasks must be a list.")
self.verbose = verbose
self.tasks = tasks
self.smiles_field = smiles_field
if id_field is None:
self.id_field = smiles_field
else:
self.id_field = id_field
self.mol_field = mol_field
self.user_specified_features = None
if isinstance(featurizer, UserDefinedFeaturizer):
self.user_specified_features = featurizer.feature_fields
self.featurizer = featurizer
self.log_every_n = log_every_n
def featurize(self, input_files, data_dir=None, shard_size=8192):
"""Featurize provided files and write to specified location."""
log("Loading raw samples now.", self.verbose)
log("shard_size: %d" % shard_size, self.verbose)
if not isinstance(input_files, list):
input_files = [input_files]
def shard_generator():
for shard_num, shard in enumerate(
self.get_shards(input_files, shard_size)):
time1 = time.time()
X, valid_inds = self.featurize_shard(shard)
ids = shard[self.id_field].values
ids = ids[valid_inds]
if len(self.tasks) > 0:
# Featurize task results iff they exist.
y, w = convert_df_to_numpy(shard, self.tasks, self.id_field)
# Filter out examples where featurization failed.
y, w = (y[valid_inds], w[valid_inds])
assert len(X) == len(ids) == len(y) == len(w)
else:
# For prospective data where results are unknown, it makes
# no sense to have y values or weights.
y, w = (None, None)
assert len(X) == len(ids)
time2 = time.time()
log("TIMING: featurizing shard %d took %0.3f s" %
(shard_num, time2 - time1), self.verbose)
yield X, y, w, ids
return DiskDataset.create_dataset(
shard_generator(), data_dir, self.tasks, verbose=self.verbose)
def get_shards(self, input_files, shard_size):
"""Stub for children classes."""
raise NotImplementedError
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
raise NotImplementedError
class CSVLoader(DataLoader):
"""
Handles loading of CSV files.
"""
def get_shards(self, input_files, shard_size, verbose=True):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size, verbose=verbose)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
return featurize_smiles_df(shard, self.featurizer, field=self.smiles_field)
class UserCSVLoader(DataLoader):
"""
Handles loading of CSV files with user-defined featurizers.
"""
def get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_csv_files(input_files, shard_size)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
assert isinstance(self.featurizer, UserDefinedFeaturizer)
X = get_user_specified_features(shard, self.featurizer)
return (X, np.ones(len(X), dtype=bool))
class SDFLoader(DataLoader):
"""
Handles loading of SDF files.
"""
def get_shards(self, input_files, shard_size):
"""Defines a generator which returns data for each shard"""
return load_sdf_files(input_files)
def featurize_shard(self, shard):
"""Featurizes a shard of an input dataframe."""
log("Currently featurizing feature_type: %s" %
self.featurizer.__class__.__name__, self.verbose)
return featurize_mol_df(shard, self.featurizer, field=self.mol_field)
| mit |
stkubr/zipline | zipline/utils/munge.py | 29 | 2299 | #
# Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas.core.common as com
def _interpolate(values, method, axis=None):
if values.ndim == 1:
axis = 0
elif values.ndim == 2:
axis = 1
else:
raise Exception("Cannot interpolate array with more than 2 dims")
values = values.copy()
values = interpolate_2d(values, method, axis=axis)
return values
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None):
"""
Copied from the 0.15.2. This did not exist in 0.12.0.
Differences:
- Don't depend on pad_2d and backfill_2d to return values
- Removed dtype kwarg. 0.12.0 did not have this option.
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = com.mask_missing(transf(values), fill_value)
# Note: pad_2d and backfill_2d work inplace in 0.12.0 and 0.15.2
# in 0.15.2 they also return a reference to values
if method == 'pad':
com.pad_2d(transf(values), limit=limit, mask=mask)
else:
com.backfill_2d(transf(values), limit=limit, mask=mask)
# reshape back
if ndim == 1:
values = values[0]
return values
def ffill(values, axis=None):
return _interpolate(values, 'pad', axis=axis)
def bfill(values, axis=None):
return _interpolate(values, 'bfill', axis=axis)
| apache-2.0 |
jmargeta/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 3 | 15808 | import warnings
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn import linear_model, datasets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
"""
Principle of Lars is to keep covariances tied and decreasing
"""
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_simple_precomputed():
"""
The same, with precomputed Gram matrix
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
"""
Test that lars_path with precomputed Gram and Xy gives the right answer
"""
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
"""
Test that Lars gives least square solution at the end
of the path
"""
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
"""
Test that Lars Lasso gives least square solution at the end
of the path
"""
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
"""Check that lars_path is robust to collinearity in input"""
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
_, _, coef_path_ = linear_model.lars_path(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
"""
Test that the ``return_path=False`` option returns the correct output
"""
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
"""
Test that the ``return_path=False`` option with Gram remains correct
"""
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
"""
Test that the ``return_path=False`` option with Gram and Xy remains correct
"""
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
# In this test the "drop for good strategy" of lars_path is necessary
# to give a good answer
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_true(len(warning_list) > 0)
assert_true('Dropping a regressor' in warning_list[0].message.args[0])
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_array_almost_equal(obj_lars, obj_cd)
def test_lasso_lars_vs_lasso_cd(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results
"""
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = np.linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = np.linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = np.linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
"""
Test that LassoLars and Lasso using coordinate descent give the
same results when early stopping is used.
(test : before, in the middle, and in the last part of the path)
"""
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = np.linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = np.linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
np.testing.assert_array_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
rng = np.random.RandomState(42)
# Generate data
n, m = 80, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
with warnings.catch_warnings(record=True) as warning_list:
warnings.simplefilter("always", UserWarning)
lars_alphas, _, lars_coef = linear_model.lars_path(X, y,
method='lasso')
assert_true(len(warning_list) > 0)
assert_true(('Dropping a regressor' in warning_list[0].message.args[0])
or ('Early stopping' in warning_list[0].message.args[0]))
lasso_coef = np.zeros((w.shape[0], len(lars_alphas)))
for i, model in enumerate(linear_model.lasso_path(X, y, alphas=lars_alphas,
tol=1e-6)):
lasso_coef[:, i] = model.coef_
np.testing.assert_array_almost_equal(lars_coef, lasso_coef, decimal=1)
def test_lars_drop_for_good():
# Create an ill-conditioned situation in which the LARS has to good
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
X = [[10, 10, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [100, -100, 1]
lars = linear_model.LassoLars(.001, normalize=False)
lars_coef_ = lars.fit(X, y).coef_
lars_obj = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, lars_coef_)) ** 2
+ .1 * linalg.norm(lars_coef_, 1))
coord_descent = linear_model.Lasso(.001, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, cd_coef_)) ** 2
+ .1 * linalg.norm(cd_coef_, 1))
assert_array_almost_equal(lars_obj / cd_obj, 1.0, decimal=3)
def test_lars_add_features():
"""
assure that at least some features get added if necessary
test for 6d2b4c
"""
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
"""
Assure that estimators receiving multidimensional y do the right thing
"""
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
""" Test the LassoLarsCV object by checking that the optimal alpha
increases as the number of samples increases.
This property is not actualy garantied in general and is just a
property of the given dataset, with the given steps chosen.
"""
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
""" Test the LassoLarsIC object by checking that
- some good features are selected.
- alpha_bic > alpha_aic
- n_nonzero_bic < n_nonzero_aic
"""
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
marcsans/cnn-physics-perception | phy/lib/python2.7/site-packages/matplotlib/tests/__init__.py | 6 | 1362 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import difflib
import os
from matplotlib.testing import setup
_multiprocess_can_split_ = True
# Check that the test directories exist
if not os.path.exists(os.path.join(
os.path.dirname(__file__), 'baseline_images')):
raise IOError(
'The baseline image directory does not exist. '
'This is most likely because the test data is not installed. '
'You may need to install matplotlib from source to get the '
'test data.')
def assert_str_equal(reference_str, test_str,
format_str=('String {str1} and {str2} do not '
'match:\n{differences}')):
"""
Assert the two strings are equal. If not, fail and print their
diffs using difflib.
"""
if reference_str != test_str:
diff = difflib.unified_diff(reference_str.splitlines(1),
test_str.splitlines(1),
'Reference', 'Test result',
'', '', 0)
raise ValueError(format_str.format(str1=reference_str,
str2=test_str,
differences=''.join(diff)))
| mit |
zyoohv/zyoohv.github.io | code_repository/tencent_ad_contest/tencent_contest/arrange_dataset/csv2libffm.py | 1 | 1856 | #! /usr/bin/python3
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
import scipy
from tqdm import tqdm
import argparse
root_path = '../../tencent_dataset/preliminary_contest_data/'
def trainpred_pair(index):
train_ary = scipy.sparse.load_npz(root_path + 'train_{}.npz'.format(index))
train_ary = train_ary.tocsr()
label_ary = pd.read_csv(root_path + 'label_{}.csv'.format(index))
label_ary = label_ary.loc[:, 'label'].values
train_ffm = []
with tqdm(total=train_ary.shape[0]) as pbar:
for l, line in zip(label_ary, train_ary):
now = str(l)
line = line.toarray()[0]
for i, val in enumerate(line):
if float(val) != 0.:
now = now + ' {}:{}:{}'.format(1, i + 1, val)
train_ffm.append(now)
pbar.update(1)
with open(root_path + 'ffm/train_{}.ffm'.format(index), 'w') as fout:
for line in train_ffm:
fout.write(line + '\n')
pred_ary = scipy.sparse.load_npz(root_path + 'pred_{}.npz'.format(index))
pred_ary = pred_ary.tocsr()
pred_ffm = []
with tqdm(total=pred_ary.shape[0]) as pbar:
for line in pred_ary:
line = line.toarray()[0]
now = ''
for i, val in enumerate(line):
if float(val) != 0.:
now = now + '{}:{}:{} '.format(1, i + 1, val)
pred_ffm.append(now)
pbar.update(1)
with open(root_path + 'ffm/pred_{}.ffm'.format(index), 'w') as fout:
for line in pred_ffm:
fout.write(line + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--index', type=int, help='produce the index of files')
args = parser.parse_args()
trainpred_pair(args.index)
if __name__ == '__main__':
main() | mit |
UltracoldAtomsLab/labhardware | projects/signaltransfer2/signalmeasure.py | 2 | 4174 | """
Transfering data from Stanford Research SR785 Signal analyzer
"""
import ConfigParser
import numpy as np
import sys
from time import strftime, sleep, time
## For Windows:
import matplotlib
matplotlib.rcParams['backend'] = 'wx'
import matplotlib.pylab as pl
# Own modules
sys.path.append("../../")
sys.path.append("../../drivers/")
import sr760
if __name__ == "__main__":
# Load configuration
try:
configfile = sys.argv[1] # first argument is configuration file name
config = ConfigParser.ConfigParser()
config.readfp(open(configfile))
except:
print "Cannot find configuration file."
sys.exit(1)
runparams = [None, None, None, None]
if len(sys.argv) >= 2:
try:
runparams[0] = int(sys.argv[2]) # should be span
runparams[1] = int(sys.argv[3]) # should be multipier
runparams[2] = int(sys.argv[4]) # should be averaging
runparams[3] = sys.argv[5]
except (ValueError, IndexError):
pass
# Get Configuration
GPIB = config.getint('Setup', 'GPIB')
basename = config.get('Setup', 'Basename')
# Connect to device
try:
device = sr760.StanfordSR760(GPIB)
except (IOError):
print("Couldn't find things on GPIB channel %d, exiting" %(GPIB))
sys.exit(1)
# Setting up the output filename
if not runparams[3]:
name = raw_input("Output basename? (default: %s) " %basename)
if len(name) > 0:
basename = name
else:
basename = runparams[3]
outname = "%s_%s" %(basename, strftime("%y%m%d_%H%M%S"))
print "0: 191mHz\t1: 382mHz\t2:763mHz\t3:1.5Hz"
print "4: 3.1Hz\t5: 6.1Hz\t6: 12.2Hz\t7: 24.4Hz"
print "8: 48.75Hz\t9: 97.5Hz\t10: 195Hz\t11: 390Hz"
print "12: 780Hz\t13: 1.56kHz\t14: 3.125kHz\t15: 6.25kHz"
print "16: 12.5kHz\t17: 25kHz\t18: 50kHz\t19: 100kHz"
span = -1 if runparams[0] is None else runparams[0]
while span not in range(20):
try:
span = int(raw_input("Frequency span? (s = 0-19) "))
except ValueError:
pass
if span == 0:
multiplier = 0
else:
multiplier = -1 if runparams[1] is None else runparams[1]
while multiplier not in range(span+1):
try:
multiplier = int(raw_input("Multiplier? (m = 0-%d), meaning 2^m better resolution " %(span)))
except ValueError:
pass
avgnum = 0 if runparams[2] is None else runparams[2]
while avgnum < 1:
try:
avgnum = int(raw_input("Averaging number? "))
except ValueError:
pass
print "Output filename: %s" %(outname)
realspan = span - multiplier
ranges = 2 ** multiplier
device.write("SPAN %d" %(realspan))
device.write("ICPL 0") # set AC coupling
device.write("MEAS 0 1") # select PSD measurement
device.write("DISP 0 0") # display log magnitude
startfreq = 0
basefreq = device.basefreq
freqstep = basefreq / 2**(19 - realspan)
start = time()
for i in range(ranges):
device.write("STRF %f" %(startfreq))
device.write("AVGO 1")
device.write("NAVG %d" %avgnum)
device.write("AVGT 0")
device.write("AVGM 0")
device.write("OVLP 0")
sleep(0.05)
device.write("STRT")
sleep(0.05)
ready = False
while not ready:
val = int(device.ask('*STB?'))
ready = (val & 1)
sleep(0.1)
data = device.pulldata()
if i == 0:
vals = data
else:
vals = np.append(vals, data, axis=0)
print "Done %d/%d" %(i+1, ranges)
startfreq += freqstep
print "Total time: %.1fs" %(time()-start)
# Get save data
np.savetxt("%s.csv" %(outname), vals, delimiter=",")
# Data plotting
xval = vals[:, 0]
pl.figure(figsize=(11.69, 8.27)) # landscape alignment A4
yval = vals[:, 1]
pl.subplot(111)
pl.plot(xval, yval, '-')
pl.ylabel('Y')
pl.xlabel('Hz')
pl.xlim([xval[0], xval[-1]])
pl.savefig("%s.png" %(outname))
device.write("LOCL 0")
pl.show()
| mit |
stephanieleevillanueva/pandasql | examples/demo.py | 3 | 2025 | from sklearn.datasets import load_iris
import pandas as pd
from pandasql import sqldf
from pandasql import load_meat, load_births
import re
births = load_births()
meat = load_meat()
iris = load_iris()
iris_df = pd.DataFrame(iris.data, columns=iris.feature_names)
iris_df['species'] = pd.Categorical(iris.target, levels=iris.target_names)
iris_df.columns = [re.sub("[() ]", "", col) for col in iris_df.columns]
print sqldf("select * from iris_df limit 10;", locals())
print sqldf("select sepalwidthcm, species from iris_df limit 10;", locals())
q = """
select
species
, avg(sepalwidthcm)
, min(sepalwidthcm)
, max(sepalwidthcm)
from
iris_df
group by
species;
"""
print "*"*80
print "aggregation"
print "-"*80
print q
print sqldf(q, locals())
def pysqldf(q):
"add this to your script if you get tired of calling locals()"
return sqldf(q, globals())
print "*"*80
print "calling from a helper function"
print '''def pysqldf(q):
"add this to your script if you get tired of calling locals()"
return sqldf(q, globals())'''
print "-"*80
print q
print pysqldf(q)
q = """
select
a.*
from
iris_df a
inner join
iris_df b
on a.species = b.species
limit 10;
"""
print "*"*80
print "joins"
print "-"*80
print q
print pysqldf(q)
q = """
select
*
from
iris_df
where
species = 'virginica'
and sepallengthcm > 7.7;
"""
print "*"*80
print "where clause"
print "-"*80
print q
print pysqldf(q)
iris_df['id'] = range(len(iris_df))
q = """
select
*
from
iris_df
where
id in (select id from iris_df where sepalwidthcm*sepallengthcm > 25);
"""
print "*"*80
print "subqueries"
print "-"*80
print q
print pysqldf(q)
q = """
SELECT
m.*
, b.births
FROM
meat m
INNER JOIN
births b
on m.date = b.date
ORDER BY
m.date;
"""
print pysqldf(q).head()
| mit |
rubikloud/scikit-learn | sklearn/pipeline.py | 61 | 21271 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
janezhango/BigDataMachineLearning | py/testdir_single_jvm/test_GLM2_hastie.py | 2 | 3613 |
## Dataset created from this:
#
# from sklearn.datasets import make_hastie_10_2
# import numpy as np
# i = 1000000
# f = 10
# (X,y) = make_hastie_10_2(n_samples=i,random_state=None)
# y.shape = (i,1)
# Y = np.hstack((X,y))
# np.savetxt('./1mx' + str(f) + '_hastie_10_2.data', Y, delimiter=',', fmt='%.2f');
import unittest, time, sys, copy
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_util, h2o_hosts, h2o_import as h2i
def glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=30):
print "\nStarting GLM of", csvFilename
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, hex_key=csvFilename + ".hex", schema='put', timeoutSecs=30)
y = 10
# Took n_folds out, because GLM doesn't include n_folds time and it's slow
# wanted to compare GLM time to my measured time
# hastie has two values 1,-1. need to specify case
kwargs = {'response': y, 'alpha': 0, 'family': 'binomial'}
# ToInt2.html?src_key=Twitter2DB.hex&column_index=2
# ToEnum2.html?src_key=Twitter2DB.hex&column_index=2
start = time.time()
# change the 1/-1 to enums
h2o.nodes[0].to_enum(src_key=parseResult['destination_key'], column_index=y+1)
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **kwargs)
print "GLM in", (time.time() - start), "secs (python)"
h2o_glm.simpleCheckGLM(self, glm, "C8", **kwargs)
# compare this glm to the first one. since the files are replications, the results
# should be similar?
validation = glm['glm_model']['submodels'][0]['validation']
if self.validation1:
h2o_glm.compareToFirstGlm(self, 'auc', validation, self.validation1)
else:
self.validation1 = copy.deepcopy(validation)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(1)
else:
h2o_hosts.build_cloud_with_hosts(1)
global SYNDATASETS_DIR
SYNDATASETS_DIR = h2o.make_syn_dir()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
validation1 = {}
def test_GLM2_hastie(self):
h2o.beta_features = True
# gunzip it and cat it to create 2x and 4x replications in SYNDATASETS_DIR
# FIX! eventually we'll compare the 1x, 2x and 4x results like we do
# in other tests. (catdata?)
bucket = 'home-0xdiag-datasets'
csvFilename = "1mx10_hastie_10_2.data.gz"
csvPathname = 'standard' + '/' + csvFilename
glm_doit(self, csvFilename, bucket, csvPathname, timeoutSecs=75)
fullPathname = h2i.find_folder_and_filename(bucket, csvPathname, returnFullPath=True)
filename1x = "hastie_1x.data"
pathname1x = SYNDATASETS_DIR + '/' + filename1x
h2o_util.file_gunzip(fullPathname, pathname1x)
filename2x = "hastie_2x.data"
pathname2x = SYNDATASETS_DIR + '/' + filename2x
h2o_util.file_cat(pathname1x,pathname1x,pathname2x)
glm_doit(self,filename2x, None, pathname2x, timeoutSecs=75)
filename4x = "hastie_4x.data"
pathname4x = SYNDATASETS_DIR + '/' + filename4x
h2o_util.file_cat(pathname2x,pathname2x,pathname4x)
print "Iterating 3 times on this last one for perf compare"
for i in range(3):
print "\nTrial #", i, "of", filename4x
glm_doit(self, filename4x, None, pathname4x, timeoutSecs=150)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
obarquero/intro_machine_learning_udacity | Projects/ud120-projects-master/tools/startup.py | 9 | 1161 | #!/usr/bin/python
print
print "checking for nltk"
try:
import nltk
except ImportError:
print "you should install nltk before continuing"
print "checking for numpy"
try:
import numpy
except ImportError:
print "you should install numpy before continuing"
print "checking for scipy"
try:
import scipy
except:
print "you should install scipy before continuing"
print "checking for sklearn"
try:
import sklearn
except:
print "you should install sklearn before continuing"
print
print "downloading the Enron dataset (this may take a while)"
print "to check on progress, you can cd up one level, then execute <ls -lthr>"
print "Enron dataset should be last item on the list, along with its current size"
print "download will complete at about 423 MB"
import urllib
url = "https://www.cs.cmu.edu/~./enron/enron_mail_20150507.tgz"
urllib.urlretrieve(url, filename="../enron_mail_20150507.tgz")
print "download complete!"
print
print "unzipping Enron dataset (this may take a while)"
import tarfile
import os
os.chdir("..")
tfile = tarfile.open("enron_mail_20150507.tgz", "r:gz")
tfile.extractall(".")
print "you're ready to go!"
| gpl-2.0 |
IvS-KULeuven/ComboCode | cc/ivs/sed/testSED.py | 3 | 45593 | """
Unit test covering sed.fit.py
@author: Joris Vos
"""
import numpy as np
from numpy import inf, array
from cc.ivs import sigproc
from cc.ivs.sed import fit, model, builder, filters
from cc.ivs.units import constants
from cc.ivs.catalogs import sesame
from cc.ivs.aux import loggers
from cc.ivs.units import constants
from matplotlib import mlab
import unittest
try:
import mock
from mock import patch
noMock = False
except Exception:
noMock = True
# Set at True to skip integration tests.
noIntegration = False
class SEDTestCase(unittest.TestCase):
"""Add some extra usefull assertion methods to the testcase class"""
def create_patch(self, name, method, **kwargs):
patcher = patch.object(name, method, **kwargs)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def assertArrayEqual(self, l1, l2, msg=None):
msg_ = "Array is not equal to expected array: %s != %s"%(l1,l2)
if msg != None: msg_ = msg
self.assertEqual(len(l1), len(l2), msg=msg_)
self.assertEqual(l1.dtype, l2.dtype, msg=msg_)
for f1, f2 in zip(l1,l2):
self.assertEqual(f1,f2, msg=msg_)
def assertArrayAlmostEqual(self, l1,l2,places=None,delta=None, msg=None):
msg_ = "assertArrayAlmostEqual Failed: %s != %s"%(str(l1),str(l2))
if msg != None: msg_ = msg_ + msg
for f1, f2 in zip(l1, l2):
self.assertAlmostEqual(f1,f2,places=places, delta=delta, msg=msg_)
def assertAllBetween(self, l, low, high, msg=None):
self.assertGreaterEqual(min(l), low, msg=msg)
self.assertLessEqual(max(l), high, msg=msg)
def assertAllBetweenDiff(self, l, low, high, places=4, msg=None):
self.assertGreaterEqual(min(l), low, msg=msg)
self.assertLessEqual(max(l), high, msg=msg)
lr = np.round(l, decimals=places)
self.assertTrue(len(np.unique(lr)) > 1, msg="All elements are equal: %s"%(l))
def assertInList(self, el, lst, msg=None):
msg_ = "The given element %s is not in list %s"%(el, lst)
if msg != None: msg_=msg
res = []
for l in lst:
try:
res.append(all(el == l))
except Exception:
res.append(el == l)
self.assertTrue(any(res), msg=msg_)
def assert_mock_args_in_last_call(self, mck, args=None, kwargs=None, msg=None):
args_, kwargs_ = mck.call_args
if args != None:
for arg in args:
msg_ = msg if msg != None else \
'Argument %s was not in call to %s (called with args: %s)'%(arg, mck, args_)
self.assertInList(arg, args_, msg=msg_)
if kwargs != None:
for key in kwargs.keys():
msg_ = msg if msg != None else \
'Key Word Argument %s=%s was not in call to %s (called with args: %s)'% \
(key, kwargs[key], mck, kwargs_)
self.assertTrue(key in kwargs_, msg_)
try:
self.assertEqual(kwargs[key], kwargs_[key], msg_)
except Exception:
self.assertArrayAlmostEqual(kwargs[key], kwargs_[key], places=5)
class ModelTestCase(SEDTestCase):
@classmethod
def setUpClass(ModelTestCase):
""" Setup the tmap grid as it is smaller and thus faster as kurucz"""
model.set_defaults(grid='kurucztest')
grid1 = dict(grid='tmaptest')
grid2 = dict(grid='tmaptest')
model.set_defaults_multiple(grid1,grid2)
model.copy2scratch(z='*', Rv='*')
def setUp(self):
self.photbands = ['STROMGREN.U', '2MASS.H']
def testGetItablePixSingle(self):
""" model.get_itable_pix() single case """
bgrid = {'teff': array([ 5500., 6874., 9645., 7234., 5932.]),
'logg': array([ 3.57, 4.00, 4.21, 3.84, 3.25]),
'ebv': array([ 0.0018, 0.0077, 0.0112, 0.0046, 0.0110]),
'rv': array([ 2.20, 2.40, 2.60, 2.80, 3.00]),
'z': array([ -0.5, -0.4, -0.3, -0.2, 0.0])}
flux_,Labs_ = model.get_itable_pix(photbands=self.photbands, **bgrid)
flux = [[3255462., 13286738., 53641850., 16012786., 4652578.69189492],
[967634., 1262321., 1789486., 1336016., 1066763.]]
Labs = [0.819679, 1.997615, 7.739527, 2.450019, 1.107916]
self.assertArrayAlmostEqual(flux_[0],flux[0],delta=100)
self.assertArrayAlmostEqual(flux_[1],flux[1],delta=100)
self.assertArrayAlmostEqual(Labs_,Labs,places=3)
def testGetItablePixBinary(self):
""" model.get_itable_pix() multiple case """
bgrid = {'teff': array([ 22674., 21774., 22813., 29343., 28170.]),
'logg': array([ 5.75, 6.07, 6.03 , 6.38, 5.97]),
'ebv': array([ 0.0018, 0.0077, 0.0112, 0.0046, 0.0110]),
'rad': array([ 4.96, 7.34, 4.56, 3.30, 8.54]),
'teff2': array([ 38779., 36447., 32099. , 31392., 35893.]),
'logg2': array([ 4.67, 4.92, 5.46, 4.96, 4.85]),
'ebv2': array([ 0.0018, 0.0077, 0.0112, 0.0046, 0.0110]),
'rad2': array([ 6.99, 9.61, 9.55, 6.55, 3.99])}
flux_,Labs_ = model.get_itable_pix(photbands=self.photbands, **bgrid)
flux = [[1.69858e+11, 2.88402e+11, 1.98190e+11, 1.00088e+11, 1.39618e+11],
[5.78019e+08, 1.04221e+09, 7.42561e+08, 3.63444e+08, 5.52846e+08]]
Labs = [67046.5663, 115877.0362, 81801.3768, 39791.7467, 56369.3989]
self.assertArrayAlmostEqual(flux_[0],flux[0],delta=0.01e+11)
self.assertArrayAlmostEqual(flux_[1],flux[1],delta=0.01e+09)
self.assertArrayAlmostEqual(Labs_,Labs,places=3)
def testGetItableSingle(self):
""" model.get_itable() single case """
flux_,Labs_ = model.get_itable(photbands=self.photbands, teff=6874, logg=4.21,
ebv=0.0077, z=-0.2)
flux = [13428649.32576484, 1271090.21316342]
Labs = 1.99865981674
self.assertAlmostEqual(flux_[0],flux[0], delta=100)
self.assertAlmostEqual(flux_[1],flux[1], delta=100)
self.assertAlmostEqual(Labs_,Labs, delta=100)
def testGetItableBinary(self):
""" model.get_itable() multiple case """
flux_,Labs_ = model.get_itable(photbands=self.photbands, teff=25000,
logg=5.12, ebv=0.001, teff2=33240, logg2=5.86, ebv2=0.001)
flux = [3.31834622e+09, 1.25032866e+07]
Labs = 1277.54498257
self.assertAlmostEqual(flux_[0],flux[0], delta=100)
self.assertAlmostEqual(flux_[1],flux[1], delta=100)
self.assertAlmostEqual(Labs_,Labs, delta=100)
def testGetTable(self):
""" model.get_table() single case """
wave, flux = model.get_table(teff=6874, logg=4.21, ebv=0.0)
self.assertEqual(len(wave), len(flux))
self.assertEqual(len(wave), 1221)
self.assertAlmostEqual(wave[0], 90.9, delta=0.1)
self.assertAlmostEqual(wave[-1], 1600000.0, delta=0.1)
self.assertAlmostEqual(wave[400], 4370.0, delta=0.1)
self.assertAlmostEqual(wave[800], 15925.0, delta=0.1)
self.assertAlmostEqual(flux[0], 0.0, delta=0.0001)
self.assertAlmostEqual(flux[-1], 0.020199, delta=0.0001)
self.assertAlmostEqual(flux[400], 24828659.5845, delta=0.0001)
self.assertAlmostEqual(flux[800], 1435461.60457, delta=0.0001)
def testGetTableBinary(self):
""" model.get_table() multiple case """
wave,flux = model.get_table(teff=25000, logg=5.12, ebv=0.001,
teff2=33240, logg2=5.86, ebv2=0.001)
self.assertEqual(len(wave), len(flux))
self.assertEqual(len(wave), 123104)
self.assertAlmostEqual(wave[0], 1000, delta=0.1)
self.assertAlmostEqual(wave[-1], 24999.8, delta=0.1)
self.assertAlmostEqual(wave[40000], 8671.20117188, delta=0.001)
self.assertAlmostEqual(wave[80000], 16509.4003906, delta=0.001)
self.assertAlmostEqual(flux[0], 170415154318.0, delta=0.5)
self.assertAlmostEqual(flux[-1], 2457826.26898, delta=0.0001)
self.assertAlmostEqual(flux[40000], 141915936.111, delta=0.001)
self.assertAlmostEqual(flux[80000], 12450102.801, delta=0.001)
class PixFitTestCase(SEDTestCase):
@classmethod
def setUpClass(PixFitTestCase):
""" Setup the tmap grid as it is smaller and thus faster as kurucz"""
model.set_defaults(grid='kurucztest')
grid1 = dict(grid='kurucztest')
grid2 = dict(grid='tmaptest')
model.set_defaults_multiple(grid1,grid2)
model.copy2scratch(z='*', Rv='*')
def setUp(self):
self.photbands = ['STROMGREN.U', '2MASS.H']
def testGenerateGridPixSingle(self):
""" fit.generate_grid_pix() single case """
grid = fit.generate_grid_single_pix(self.photbands,teffrange=(5000,10000),
loggrange=(3.50,4.50),ebvrange=(0.0, 0.012),zrange=(-0.5,0.0),
rvrange=(-inf,inf),points=50)
self.assertTrue('teff' in grid)
self.assertTrue('logg' in grid)
self.assertTrue('ebv' in grid)
self.assertTrue('rv' in grid)
self.assertTrue('z' in grid)
self.assertFalse('teff2' in grid)
self.assertFalse('rad' in grid)
self.assertAllBetweenDiff(grid['teff'], 5000, 10000, places=-2)
self.assertAllBetweenDiff(grid['logg'], 3.5, 4.5, places=1)
self.assertAllBetweenDiff(grid['ebv'], 0.0, 0.012, places=3)
self.assertAllBetweenDiff(grid['rv'], 2.1, 3.1, places=1)
self.assertAllBetweenDiff(grid['z'], -0.5, 0.0, places=1)
def testGenerateGridPixBinary(self):
""" fit.generate_grid_pix() binary case """
grid = fit.generate_grid_pix(self.photbands,teffrange=(5000,10000),
teff2range=(30000, 40000), loggrange=(3.5,4.50),
logg2range=(4.50, 6.50), ebvrange=(0.0, 0.012),
rvrange=(2.1,3.1), rv2range=(2.1,3.1),
masses=[0.47*1.63, 0.47], points=50)
self.assertTrue('teff' in grid)
self.assertTrue('logg' in grid)
self.assertTrue('teff2' in grid)
self.assertTrue('logg2' in grid)
self.assertTrue('ebv' in grid)
self.assertTrue('ebv2' in grid)
self.assertTrue('rad' in grid)
self.assertTrue('rad2' in grid)
self.assertFalse('z' in grid)
self.assertFalse('z2' in grid)
self.assertListEqual(grid['ebv'].tolist(), grid['ebv2'].tolist(),
msg="ebv should be the same for both components.")
self.assertListEqual(grid['rv'].tolist(), grid['rv2'].tolist(),
msg="Rv should be the same for both components.")
self.assertAllBetweenDiff(grid['teff'], 5000, 10000, places=-2)
self.assertAllBetweenDiff(grid['logg'], 3.5, 4.5, places=1)
self.assertAllBetweenDiff(grid['ebv'], 0.0, 0.012, places=3)
self.assertAllBetweenDiff(grid['rv'], 2.1, 3.1, places=1)
self.assertAllBetweenDiff(grid['teff2'], 30000, 40000, places=-2)
self.assertAllBetweenDiff(grid['logg2'], 4.5, 6.5, places=1)
G, Msol, Rsol = constants.GG_cgs, constants.Msol_cgs, constants.Rsol_cgs
rad = np.sqrt(G*0.47*1.63*Msol/10**grid['logg'])/Rsol
rad2 = np.sqrt(G*0.47*Msol/10**grid['logg2'])/Rsol
self.assertListEqual(rad.tolist(), grid['rad'].tolist())
self.assertListEqual(rad2.tolist(), grid['rad2'].tolist())
def testGenerateGridPixMultiple(self):
""" fit.generate_grid_pix() multiple case """
grid = fit.generate_grid_pix(self.photbands,teffrange=(5000,10000),
teff2range=(20000, 40000), loggrange=(3.5,4.5),radrange=(0.1,1.0),
logg2range=(4.50, 6.50), ebvrange=(0.0, 0.012),rad2range=(1.0,10.0),
points=50)
self.assertTrue('teff' in grid)
self.assertTrue('logg' in grid)
self.assertTrue('teff2' in grid)
self.assertTrue('logg2' in grid)
self.assertTrue('ebv' in grid)
self.assertTrue('ebv2' in grid)
self.assertTrue('rad' in grid)
self.assertTrue('rad2' in grid)
self.assertFalse('z' in grid)
self.assertFalse('z2' in grid)
self.assertFalse('rv' in grid)
self.assertFalse('rv2' in grid)
self.assertListEqual(grid['ebv'].tolist(), grid['ebv2'].tolist())
self.assertAllBetweenDiff(grid['teff'], 5000, 10000, places=-2)
self.assertAllBetweenDiff(grid['logg'], 3.5, 4.5, places=1)
self.assertAllBetweenDiff(grid['ebv'], 0.0, 0.012, places=3)
self.assertAllBetweenDiff(grid['rad'], 0.1, 1.0, places=1)
self.assertAllBetweenDiff(grid['teff2'], 20000, 40000, places=-2)
self.assertAllBetweenDiff(grid['logg2'], 4.5, 6.5, places=1)
self.assertAllBetweenDiff(grid['rad2'], 1.0, 10.0, places=0)
@unittest.skipIf(noMock, "Mock not installed")
def testiGridSearch(self):
""" fit.igrid_search_pix() """
meas = array([3.64007e-13, 2.49267e-13, 9.53516e-14] )
emeas = array([3.64007e-14, 2.49267e-14, 9.53516e-15])
photbands = ['STROMGREN.U', 'STROMGREN.B', 'STROMGREN.V']
grid = {'teff': array([ 22674., 21774., 22813., 29343., 28170.]),
'logg': array([ 5.75, 6.07, 6.03 , 6.38, 5.97]),
'ebv': array([ 0.0018, 0.0077, 0.0112, 0.0046, 0.0110]),
'z': array([0,0,0,0,0]),
'rv': array([ 2.20, 2.40, 2.60, 2.80, 3.00])}
syn_flux = array([[8.0218e+08, 7.2833e+08, 8.1801e+08, 1.6084e+09, 1.4178e+09],
[4.3229e+08, 4.0536e+08, 4.3823e+08, 7.0594e+08, 6.4405e+08],
[6.2270e+08, 5.7195e+08, 6.2482e+08, 1.0415e+09, 9.5594e+08]])
lumis = array([232.5337, 200.0878, 238.7946, 625.3935, 533.8251])
chisqs = array([260.1680, 255.4640, 251.1565, 221.6586, 233.4854])
scales = array([3.9450e-22, 4.2714e-22, 3.8880e-22, 2.2365e-22, 2.4783e-22])
e_scales = array([1.7789e-22, 1.9005e-22, 1.7449e-22, 1.0679e-22, 1.1745e-22])
mock_model = self.create_patch(model, 'get_itable_pix', return_value=(syn_flux, lumis))
mock_color = self.create_patch(filters, 'is_color', return_value=False)
mock_stat = self.create_patch(fit, 'stat_chi2', return_value=(chisqs,scales,e_scales))
chisqs_,scales_,e_scales_,lumis_ = fit.igrid_search_pix(meas, emeas, photbands, model_func=model.get_itable_pix, stat_func=fit.stat_chi2, **grid)
self.assertListEqual(chisqs.tolist(), chisqs_.tolist())
self.assertListEqual(scales_.tolist(), scales.tolist())
self.assertListEqual(e_scales_.tolist(), e_scales.tolist())
self.assertListEqual(lumis_.tolist(), lumis.tolist())
self.assert_mock_args_in_last_call(mock_model, kwargs=grid)
self.assert_mock_args_in_last_call(mock_model, kwargs={'photbands':photbands})
mock_stat.assert_called()
def testCreateParameterDict(self):
""" fit.create_parameter_dict() """
pars = dict(teff_value=5000, teff_min=4000, teff_max=6000,
logg_value=4.5, logg_min=3.5, logg_max=5.5,
ebv_value=0.001, ebv_min=0.0, ebv_max=0.01, ebv_vary=False,
z_value=0, z_vary=False,
rad_expr='G*sqrt(2*logg)/m')
parameters = fit.create_parameter_dict(**pars)
self.assertTrue('value' in parameters)
self.assertTrue('min' in parameters)
self.assertTrue('max' in parameters)
self.assertTrue('vary' in parameters)
self.assertTrue('expr' in parameters)
names = parameters['names']
self.assertTrue('teff' in names)
self.assertTrue('logg' in names)
self.assertTrue('ebv' in names)
self.assertTrue('z' in names)
self.assertTrue('rad' in names)
for key in parameters.keys():
self.assertTrue(type(parameters[key]) == np.ndarray)
self.assertEqual(parameters['value'][names == 'teff'], 5000)
self.assertEqual(parameters['value'][names == 'logg'], 4.5)
self.assertEqual(parameters['value'][names == 'ebv'], 0.001)
self.assertEqual(parameters['value'][names == 'z'], 0)
self.assertEqual(parameters['min'][names == 'teff'], 4000)
self.assertEqual(parameters['min'][names == 'logg'], 3.5)
self.assertEqual(parameters['min'][names == 'ebv'], 0.0)
self.assertEqual(parameters['max'][names == 'teff'], 6000)
self.assertEqual(parameters['max'][names == 'logg'], 5.5)
self.assertEqual(parameters['max'][names == 'ebv'], 0.01)
self.assertEqual(parameters['vary'][names == 'ebv'], False)
self.assertEqual(parameters['expr'][names == 'rad'], 'G*sqrt(2*logg)/m')
class MinimizeFitTestCase(SEDTestCase):
def testCreateParameterDict(self):
""" fit.create_parameter_dict() """
pars = {'logg_value': 4.0, 'vrad_vary': False, 'z_value': -0.3, 'vrad_value': 0,
'logg_vary': True, 'rv_min': 2.1, 'vrad_min': 0, 'vrad_max': 0, 'z_max': 0.0,
'ebv_max': 0.015, 'teff_value': 6000, 'z_vary': True, 'rv_value': 2.4,
'teff_vary': True, 'logg_min': 3.5, 'rv_max': 3.1, 'z_min': -0.5,
'ebv_min': 0.005, 'teff_min': 5000, 'logg_max': 4.5, 'ebv_value': 0.007,
'teff_max': 7000, 'rv_vary': True, 'ebv_vary': True}
exp = {'max': array([3.1, 7000, 4.5, 0.015, 0, 0.0], dtype=object),
'vary': array([True, True, True, True, False, True], dtype=object),
'names': array(['rv', 'teff', 'logg', 'ebv', 'vrad', 'z'], dtype='|S4'),
'value': array([2.4, 6000, 4.0, 0.007, 0, -0.3], dtype=object),
'min': array([2.1, 5000, 3.5, 0.005, 0, -0.5], dtype=object)}
res = fit.create_parameter_dict(**pars)
self.assertArrayEqual(res['max'], exp['max'])
self.assertArrayEqual(res['vary'], exp['vary'])
self.assertArrayEqual(res['min'], exp['min'])
self.assertArrayEqual(res['names'], exp['names'])
self.assertArrayEqual(res['value'], exp['value'])
@unittest.skipIf(noMock, "Mock not installed")
def testiMinimize(self):
""" fit.iminimize() normal mocked """
gm_return = (['minimizer'], ['startpars'], ['newmodels'], ['chisqrs'])
gifm_return = (['chisqr'], ['nfev'], ['scale'], ['labs'], ['grid'])
cpd_return = dict(names=['teff'], value=[5000], min=[4000], max=[6000], vary=[True])
mock_sfit_m = self.create_patch(sigproc.fit, 'minimize', return_value='minimizer')
mock_sfit_gm = self.create_patch(sigproc.fit, 'grid_minimize', return_value=gm_return)
mock_sfit_sp = self.create_patch(sigproc.fit.Function, 'setup_parameters')
mock_fit_gifm = self.create_patch(fit, '_get_info_from_minimizer', return_value=gifm_return)
mock_fit_cpd = self.create_patch(fit, 'create_parameter_dict', return_value=cpd_return)
meas = array([1.25e9, 2.34e8])
emeas = array([1.25e7, 2.34e6])
photbands = array(['J', 'C'])
#-- 1 point
grid, chisqr, nfev, scale, lumis = fit.iminimize(meas,emeas,photbands, points=None, epsfcn=0.01 )
self.assert_mock_args_in_last_call(mock_sfit_sp, kwargs=cpd_return)
self.assert_mock_args_in_last_call(mock_sfit_m, args=[photbands, meas], kwargs=dict(epsfcn=0.01, weights=1/emeas))
self.assert_mock_args_in_last_call(mock_fit_gifm, args=[['minimizer'], photbands, meas, emeas])
self.assertListEqual(grid,['grid'])
self.assertListEqual(chisqr,['chisqr'])
self.assertListEqual(nfev,['nfev'])
self.assertListEqual(scale,['scale'])
self.assertListEqual(lumis,['labs'])
@unittest.skipIf(noMock, "Mock not installed")
def testiMinimizeGrid(self):
""" fit.iminimize() grid mocked """
gm_return = (['minimizer'], ['startpars'], ['newmodels'], ['chisqrs'])
gifm_return = (['chisqr'], ['nfev'], ['scale'], ['labs'], ['grid'])
cpd_return = dict(names=['teff'], value=[5000], min=[4000], max=[6000], vary=[True])
mock_sfit_m = self.create_patch(sigproc.fit, 'minimize', return_value='minimizer')
mock_sfit_gm = self.create_patch(sigproc.fit, 'grid_minimize', return_value=gm_return)
mock_sfit_sp = self.create_patch(sigproc.fit.Function, 'setup_parameters')
mock_fit_gifm = self.create_patch(fit, '_get_info_from_minimizer', return_value=gifm_return)
mock_fit_cpd = self.create_patch(fit, 'create_parameter_dict', return_value=cpd_return)
meas = array([1.25e9, 2.34e8])
emeas = array([1.25e7, 2.34e6])
photbands = array(['J', 'C'])
#-- 10 point
grid, chisqr, nfev, scale, lumis = fit.iminimize(meas,emeas,photbands, points=10, epsfcn=0.01 )
self.assert_mock_args_in_last_call(mock_sfit_sp, kwargs=cpd_return)
self.assert_mock_args_in_last_call(mock_sfit_gm, args=[photbands, meas], kwargs=dict(epsfcn=0.01, weights=1/emeas, points=10))
self.assert_mock_args_in_last_call(mock_fit_gifm, args=[['minimizer'], photbands, meas, emeas])
self.assertListEqual(grid,['grid'])
self.assertListEqual(chisqr,['chisqr'])
self.assertListEqual(nfev,['nfev'])
self.assertListEqual(scale,['scale'])
self.assertListEqual(lumis,['labs'])
class BuilderTestCase(SEDTestCase):
@classmethod
def setUpClass(BuilderTestCase):
if not noMock:
sesame.search = mock.Mock(return_value={'plx':(0.0,0.0)})
builder.SED.load_photometry = mock.Mock(return_value=None)
def setUp(self):
self.sed = builder.SED(ID='TEST',load_fits=False)
self.sed.master = {}
self.sed.results = {'igrid_search':{}}
def testInit(self):
""" builder.sed.__init__() ToDo """
self.assertTrue(self.sed.ID == 'TEST')
def testCollectResults(self):
""" builder.sed.collect_results() """
bgrid = {'teff': array([ 22674., 21774., 22813., 29343., 28170.]),
'logg': array([ 5.75, 6.07, 6.03, 6.38, 5.97]),
'ebv': array([ 0.0018, 0.0077, 0.0112, 0.0046, 0.0110]),
'rv': array([ 2.20, 2.40, 2.60, 2.80, 3.00]),
'z': array([0,0,0,0,0])}
fgrid = {'chisq': array([1.,3.,2.,0.1,np.nan])}
self.sed.collect_results(grid=bgrid, fitresults=fgrid, mtype='igrid_search', selfact='chisq')
res = self.sed.results['igrid_search']['grid']
self.assertListEqual(res['chisq'].tolist(), [3.0, 2.0, 1.0, 0.1])
self.assertListEqual(res['teff'].tolist(), [21774.,22813.,22674.,29343.])
self.assertListEqual(res['logg'].tolist(), [6.07,6.03,5.75,6.38])
self.assertListEqual(res['ebv'].tolist(), [0.0077,0.0112,0.0018,0.0046])
self.assertListEqual(res['rv'].tolist(), [2.40,2.60,2.20,2.80])
self.assertListEqual(res['z'].tolist(), [0,0,0,0])
self.assertListEqual(res['ci_raw'].tolist(), [0.,0.,0.,0.])
self.assertListEqual(res['ci_red'].tolist(), [0.,0.,0.,0.])
def testCalculateDegreesOfFreedom(self):
""" builder.sed.calculateDF() """
ranges = {'teffrange': (5000,6000), 'teff2range': (20000,50000), 'loggrange': (4.5,4.5),
'logg2range': (4.5,5.5), 'ebvrange': (0.0,0.01), 'ebv2range': (0.0,0.01),
'zrange': (-0.5,0.5)}
df, dfinfo = self.sed.calculateDF(**ranges)
self.assertEqual(df, 6)
self.assertTrue('ebv' in dfinfo)
self.assertFalse('ebv2' in dfinfo)
@unittest.skipIf(noMock, "Mock not installed")
def testCalculateStatistics(self):
""" builder.sed.calculate_statistics()"""
dtypes = [('teff','f8'), ('logg','f8'),('ebv','f8'),('rv','f8'),('z','f8'), ('chisq','f8')]
grid = [array([ 22674., 21774., 22813., 29343., 28170.]),
array([ 5.75, 6.07, 6.03, 6.38, 5.97]),
array([ 0.0018, 0.0077, 0.0112, 0.0046, 0.0110]),
array([ 2.20, 2.40, 2.60, 2.80, 3.00]),
array([0,0,0,0,0]),
array([1.,3.,2.,0.1,10.0])]
master = np.rec.fromarrays(grid,dtype=dtypes)
master = mlab.rec_append_fields(master, 'ci_raw', np.zeros(len(master)))
master = mlab.rec_append_fields(master, 'ci_red', np.zeros(len(master)))
self.sed.results['igrid_search']['grid'] = master
self.sed.master['include'] = [True,True,False,True,True]
with mock.patch.object(builder.SED, 'calculateDF', return_value=5) as mock_method:
self.sed.calculate_statistics(df=5)
res = self.sed.results['igrid_search']
raw = [0.6826894, 0.9167354, 0.8427007, 0.2481703, 0.9984345]
red = [0.2481703, 0.4161175, 0.3452791, 0.0796556, 0.6826894]
self.assertFalse(mock_method.called)
self.assertArrayAlmostEqual(res['grid']['ci_raw'].tolist(), raw, places=5)
self.assertArrayAlmostEqual(res['grid']['ci_red'].tolist(), red, places=5)
self.assertEqual(res['factor'], 10.0)
def testCalculateConfidenceIntervals(self):
""" builder.sed.calculate_confidence_intervals() ToDo """
pass
def testGenerateRanges(self):
""" builder.sed.generate_ranges() ToDo """
pass
@unittest.skipIf(noMock, "Mock not installed")
def testiGridSearch(self):
""" builder.sed.igrid_search() mocked """
ranges = {'teffrange':(20000,30000), 'loggrange':(5.5,6.5)}
grid = {'teff': array([ 22674., 21774., 22813., 29343., 28170.]),
'logg': array([ 5.75, 6.07, 6.03, 6.38, 5.97])}
fitres = [[1.,3.,2.,0.1,10.0],[1.,3.,2.,0.1,10.0],[1.,3.,2.,0.1,10.0],[1.,3.,2.,0.1,10.0]]
ci = dict(name=['teff', 'logg'], value=[30000, 5.5], cilow=[29000, 5.0], cihigh=[31000, 6.0])
mock_sed_gr = self.create_patch(builder.SED, 'generate_ranges', return_value=ranges)
mock_fit_ggp = self.create_patch(fit, 'generate_grid_pix', return_value=grid)
mock_fit_isp = self.create_patch(fit, 'igrid_search_pix', return_value=fitres)
mock_sed_cr = self.create_patch(builder.SED, 'collect_results')
mock_sed_cs = self.create_patch(builder.SED, 'calculate_statistics')
mock_sed_cci = self.create_patch(builder.SED, 'calculate_confidence_intervals', return_value=ci)
mock_sed_sci = self.create_patch(builder.SED, 'store_confidence_intervals')
mock_sed_sbm = self.create_patch(builder.SED, 'set_best_model')
mock_p2s = self.create_patch(builder, 'photometry2str', return_value='TEST')
self.sed.master = {'include':0, 0:0, 'cmeas':[0,0], 'e_cmeas':[0,0], 'photband':[0,0]}
self.sed.igrid_search(teffrange=(10,20), loggrange=(4.5,4.5), ebvrange=(0,0.1), zrange=(0,0),rvrange=(3.1,3.1),vradrange=(0,0), df=4, set_model=True)
mock_sed_gr.assert_called_with(teffrange=(10,20), loggrange=(4.5,4.5), ebvrange=(0,0.1), zrange=(0,0),rvrange=(3.1,3.1),vradrange=(0,0))
mock_fit_ggp.assert_called()
mock_fit_isp.assert_called()
mock_sed_cr.assert_called()
self.assert_mock_args_in_last_call(mock_sed_cr, kwargs={'grid':grid})
mock_sed_cs.assert_called()
self.assert_mock_args_in_last_call(mock_sed_cs, kwargs={'df':4})
mock_sed_sci.assert_called()
self.assert_mock_args_in_last_call(mock_sed_sci, kwargs=ci)
mock_sed_cci.assert_called()
mock_sed_sbm.assert_called()
class XIntegrationTestCase(SEDTestCase):
photbands = ['STROMGREN.U', 'STROMGREN.B', 'STROMGREN.V', 'STROMGREN.Y',
'2MASS.H', '2MASS.J', '2MASS.KS']
measHot = None
measCold = None
measBin = None
@classmethod
def setUpClass(cls):
if not noMock:
sesame.search = mock.Mock(return_value={'plx':(0.0,0.0)})
if not noIntegration:
# ==== COLD model ====
model.set_defaults(grid='kurucztest')
model.copy2scratch(z='*', Rv='*')
measCold = model.get_itable_pix(photbands=cls.photbands, teff=array([6000]), \
logg=array([4.0]),ebv=array([0.01]), rv=array([2.8]), z=array([-0.25]))[0][:,0]
np.random.seed(111)
cls.measCold = measCold
# ==== HOT model ====
model.set_defaults(grid='tmaptest')
model.copy2scratch(z='*', Rv='*')
measHot = model.get_itable_pix(photbands=cls.photbands, teff=array([30000]), \
logg=array([5.5]),ebv=array([0.01]), rv=3.1, z=0.0)[0][:,0]
np.random.seed(111)
cls.measHot = measHot
# ==== BINARY model ====
grid1 = dict(grid='kurucztest')
grid2 = dict(grid='tmaptest')
model.set_defaults_multiple(grid1,grid2)
model.clean_scratch(z='*', Rv='*')
model.copy2scratch(z='*', Rv='*')
G, Msol, Rsol = constants.GG_cgs, constants.Msol_cgs, constants.Rsol_cgs
masses = [0.85, 0.50]
rad = array([np.sqrt(G*masses[0]*Msol/10**4.0)/Rsol])
rad2 = array([np.sqrt(G*masses[1]*Msol/10**5.5)/Rsol])
measBin = model.get_itable_pix(photbands=cls.photbands, teff=array([6000]), \
logg=array([4.0]),ebv=array([0.01]), teff2=array([30000]),
logg2=array([5.5]), ebv2=array([0.01]), rad=rad,
rad2=rad2)[0][:,0]
np.random.seed(111)
cls.measBin = measBin
cls.masses = masses
@unittest.skipIf(noIntegration, "Integration tests are skipped.")
def testiGrid_searchSingleCold(self):
""" INTEGRATION igrid_search single star (kurucz)"""
sed = builder.SED(ID='TEST', load_fits=False)
np.random.seed(111)
meas = self.measCold + np.random.uniform(0, 0.01, size=len(self.measCold)) * self.measCold
emeas = meas / 100.0
units = ['erg/s/cm2/AA' for i in meas]
source = ['SYNTH' for i in meas]
sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
model.set_defaults(grid='kurucztest')
model.copy2scratch(z='*', Rv='*')
np.random.seed(111)
sed.igrid_search(points=100000,teffrange=(5000, 7000),loggrange=(3.5, 4.5),
ebvrange=(0.005, 0.015),zrange=(-0.5,0.0),rvrange=(2.1,3.1),
vradrange=(0,0),df=None,CI_limit=0.95,set_model=True)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff'], 6000, delta=50)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg'], 3.98, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv'], 0.011, delta=0.02)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['rv'], 2.13, delta=0.5)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['z'], -0.28, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_l'], 5949, delta=50)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_l'], 3.62, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_l'], 0.005, delta=0.02)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['rv_l'], 2.1, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['z_l'], -0.46, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_u'], 6060, delta=50)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_u'], 4.5, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_u'], 0.015, delta=0.02)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['rv_u'], 3.1, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['z_u'], -0.05, delta=0.1)
# check that the best model is stored
self.assertTrue('model' in sed.results['igrid_search'])
self.assertTrue('synflux' in sed.results['igrid_search'])
self.assertTrue('chi2' in sed.results['igrid_search'])
self.assertEqual(len(sed.results['igrid_search']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
self.assertEqual(len(sed.results['igrid_search']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
@unittest.skipIf(noIntegration, "Integration tests are skipped.")
def testiGrid_searchSingleHot(self):
""" INTEGRATION igrid_search single star (tmap) """
sed = builder.SED(ID='TEST', load_fits=False)
np.random.seed(111)
meas = self.measHot + np.random.uniform(0, 0.04, size=len(self.measHot)) * self.measHot
emeas = meas / 100.0
units = ['erg/s/cm2/AA' for i in meas]
source = ['SYNTH' for i in meas]
sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
model.set_defaults(grid='tmaptest')
model.copy2scratch(z='*', Rv='*')
np.random.seed(111)
sed.igrid_search(points=100000,teffrange=(25000, 35000),loggrange=(5.0, 6.0),
ebvrange=(0.005, 0.015),zrange=(0,0),rvrange=(3.1,3.1),
vradrange=(0,0),df=None,CI_limit=0.95,set_model=True)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff'], 30200, delta=250)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg'], 5.67, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv'], 0.0078, delta=0.02)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_l'], 29337, delta=250)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_l'], 5.0, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_l'], 0.005, delta=0.02)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_u'], 31623, delta=250)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_u'], 6.0, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_u'], 0.015, delta=0.02)
# check that the best model is stored
self.assertTrue('model' in sed.results['igrid_search'])
self.assertTrue('synflux' in sed.results['igrid_search'])
self.assertTrue('chi2' in sed.results['igrid_search'])
self.assertEqual(len(sed.results['igrid_search']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
self.assertEqual(len(sed.results['igrid_search']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
@unittest.skipIf(noIntegration, "Integration tests are skipped.")
def testiGrid_searchBinary(self):
""" INTEGRATION igrid_search binary star (kurucz-tmap) """
sed = builder.BinarySED(ID='TEST', load_fits=False)
np.random.seed(111)
meas = self.measBin + np.random.uniform(0, 0.04, size=len(self.measBin)) * self.measBin
emeas = meas / 100.0
units = ['erg/s/cm2/AA' for i in meas]
source = ['SYNTH' for i in meas]
sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
grid1 = dict(grid='kurucztest')
grid2 = dict(grid='tmaptest')
model.set_defaults_multiple(grid1,grid2)
np.random.seed(111)
sed.igrid_search(points=100000,teffrange=[(5000, 7000),(25000, 35000)],
loggrange=[(3.5, 4.5),(5.0, 6.0)],
ebvrange=[(0.005, 0.015), (0.005, 0.015)],
zrange=[(0,0),(0,0)],
radrange=[(0,10),(0,10)],
masses=self.masses,
df=None,CI_limit=0.95,set_model=False)
teff = sed.results['igrid_search']['CI']['teff']
logg = sed.results['igrid_search']['CI']['logg']
ebv = sed.results['igrid_search']['CI']['ebv']
teff2 = sed.results['igrid_search']['CI']['teff2']
logg2 = sed.results['igrid_search']['CI']['logg2']
ebv2 = sed.results['igrid_search']['CI']['ebv2']
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff'], 6134, delta=50)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg'], 4.38, delta=0.25)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv'], 0.008, delta=0.002)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_l'], 5826, delta=50)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_l'], 4.35, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_l'], 0.005, delta=0.002)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff_u'], 6291, delta=50)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg_u'], 4.40, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv_u'], 0.015, delta=0.002)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff2'], 32195, delta=250)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg2'], 5.97, delta=0.25)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv2'], 0.008, delta=0.002)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff2_l'], 26820, delta=250)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg2_l'], 5.70, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv2_l'], 0.005, delta=0.002)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['teff2_u'], 33025, delta=250)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['logg2_u'], 5.99, delta=0.1)
self.assertAlmostEqual(sed.results['igrid_search']['CI']['ebv2_u'], 0.015, delta=0.002)
@unittest.skipIf(noIntegration, "Integration tests are skipped.")
def testiMinimizeSingleCold(self):
""" INTEGRATION iminimize single star (kurucz) """
sed = builder.SED(ID='TEST', load_fits=False)
np.random.seed(111)
meas = self.measCold + np.random.uniform(0, 0.04, size=len(self.measCold)) * self.measCold
emeas = meas / 100.0
units = ['erg/s/cm2/AA' for i in meas]
source = ['SYNTH' for i in meas]
sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
model.set_defaults(grid='kurucztest')
model.copy2scratch(z='*', Rv='*')
np.random.seed(111)
sed.iminimize(teff=6000, logg=4.0, ebv=0.007, z=-0.3, rv=2.4, vrad=0,
teffrange=(5000, 7000),loggrange=(3.5, 4.5),zrange=(-0.5,0.0),
ebvrange=(0.005, 0.015), rvrange=(2.1,3.1),vradrange=(0,0),
points=None,df=None,CI_limit=0.60,calc_ci=True, set_model=True)
self.assertAlmostEqual(sed.results['iminimize']['CI']['teff'], 6036, delta=50)
self.assertAlmostEqual(sed.results['iminimize']['CI']['logg'], 4.19, delta=0.1)
self.assertAlmostEqual(sed.results['iminimize']['CI']['ebv'], 0.015, delta=0.02)
self.assertAlmostEqual(sed.results['iminimize']['CI']['z'], -0.21, delta=0.1)
self.assertAlmostEqual(sed.results['iminimize']['CI']['rv'], 2.1, delta=0.3)
self.assertAlmostEqual(sed.results['iminimize']['CI']['scale'], 1, delta=0.5)
self.assertAlmostEqual(sed.results['iminimize']['CI']['teff_l'], 6025, delta=50)
self.assertAlmostEqual(sed.results['iminimize']['CI']['teff_u'], 6036, delta=50)
self.assertAlmostEqual(sed.results['iminimize']['CI']['scale_l'], 1, delta=0.5)
self.assertAlmostEqual(sed.results['iminimize']['CI']['scale_u'], 1, delta=0.5)
self.assertEqual(sed.results['iminimize']['grid']['teffstart'][0], 6000)
self.assertEqual(sed.results['iminimize']['grid']['loggstart'][0], 4.0)
self.assertEqual(sed.results['iminimize']['grid']['ebvstart'][0], 0.007)
self.assertEqual(sed.results['iminimize']['grid']['zstart'][0], -0.3)
self.assertEqual(sed.results['iminimize']['grid']['rvstart'][0], 2.4)
self.assertAlmostEqual(sed.results['iminimize']['grid']['chisq'][0], 3.9, delta=1)
self.assertTrue('model' in sed.results['iminimize'])
self.assertTrue('synflux' in sed.results['iminimize'])
self.assertTrue('chi2' in sed.results['iminimize'])
self.assertEqual(len(sed.results['iminimize']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
self.assertEqual(len(sed.results['iminimize']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
@unittest.skipIf(noIntegration, "Integration tests are skipped.")
def testiMinimizeSingleHot(self):
""" INTEGRATION iminimize single star (tmap) """
sed = builder.SED(ID='TEST', load_fits=False)
np.random.seed(111)
meas = self.measHot + np.random.uniform(0, 0.04, size=len(self.measHot)) * self.measHot
emeas = meas / 100.0
units = ['erg/s/cm2/AA' for i in meas]
source = ['SYNTH' for i in meas]
sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
model.set_defaults(grid='tmaptest')
model.copy2scratch(z='*', Rv='*')
np.random.seed(111)
sed.iminimize(teff=27000, logg=5.1, ebv=0.01, z=0, rv=3.1, vrad=0,
teffrange=(25000, 35000),loggrange=(5.0, 6.0),
ebvrange=(0.005, 0.015),zrange=(0,0),rvrange=(3.1,3.1),
vradrange=(0,0),df=None,CI_limit=0.95,set_model=False)
self.assertAlmostEqual(sed.results['iminimize']['CI']['teff'], 30250, delta=100)
self.assertAlmostEqual(sed.results['iminimize']['CI']['logg'], 5.66, delta=0.1)
self.assertAlmostEqual(sed.results['iminimize']['CI']['ebv'], 0.008, delta=0.02)
self.assertAlmostEqual(sed.results['iminimize']['grid']['chisq'][0], 3.8, delta=1)
@unittest.skipIf(noIntegration, "Integration tests are skipped.")
def testiMinimizeBinary(self):
""" INTEGRATION iminimize binary star (kurucz-tmap) """
sed = builder.BinarySED(ID='TEST', load_fits=False)
np.random.seed(111)
meas = self.measBin + np.random.uniform(0, 0.01, size=len(self.measBin)) * self.measBin
emeas = meas / 100.0
units = ['erg/s/cm2/AA' for i in meas]
source = ['SYNTH' for i in meas]
sed.add_photometry_fromarrays(meas, emeas, units, self.photbands, source)
sed.set_photometry_scheme('abs')
np.random.seed(111)
sed.iminimize(teff=(5300,29000), logg=(4.0,5.3), ebv=(0.01,0.01), z=(0,0), rv=(3.1,3.1),
vrad=(0,0),teffrange=[(5000,7000),(25000, 35000)],loggrange=[(3.5,4.5),
(5.0, 6.0)], ebvrange=[(0.01,0.01),(0.01, 0.01)] ,zrange=[(0,0),(0,0)],
rvrange=[(3.1,3.1),(3.1,3.1)], vradrange=[(0,0),(0,0)], df=2,
CI_limit=None, masses = self.masses,calc_ci=False, set_model=True)
self.assertAlmostEqual(sed.results['iminimize']['CI']['teff'], 6023, delta=100)
self.assertAlmostEqual(sed.results['iminimize']['CI']['logg'], 3.95, delta=0.1)
self.assertAlmostEqual(sed.results['iminimize']['CI']['ebv'], 0.01, delta=0.02)
self.assertAlmostEqual(sed.results['iminimize']['CI']['teff2'], 30506, delta=100)
self.assertAlmostEqual(sed.results['iminimize']['CI']['logg2'], 5.47, delta=0.1)
self.assertAlmostEqual(sed.results['iminimize']['CI']['scale'], 0.9, delta=0.5)
self.assertTrue('model' in sed.results['iminimize'])
self.assertTrue('synflux' in sed.results['iminimize'])
self.assertTrue('chi2' in sed.results['iminimize'])
self.assertEqual(len(sed.results['iminimize']['model']), 3, msg='stored model has wrong number of collumns (should be 3)')
self.assertEqual(len(sed.results['iminimize']['synflux']), 3, msg='stored synflux has wrong number of collumns (should be 3)')
| gpl-3.0 |
richardliaw/ray | python/ray/serve/examples/doc/tutorial_sklearn.py | 2 | 2522 | # yapf: disable
import ray
# __doc_import_begin__
from ray import serve
import pickle
import json
import numpy as np
import requests
import os
import tempfile
from sklearn.datasets import load_iris
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import mean_squared_error
# __doc_import_end__
# yapf: enable
# __doc_train_model_begin__
# Load data
iris_dataset = load_iris()
data, target, target_names = iris_dataset["data"], iris_dataset[
"target"], iris_dataset["target_names"]
# Instantiate model
model = GradientBoostingClassifier()
# Training and validation split
np.random.shuffle(data), np.random.shuffle(target)
train_x, train_y = data[:100], target[:100]
val_x, val_y = data[100:], target[100:]
# Train and evaluate models
model.fit(train_x, train_y)
print("MSE:", mean_squared_error(model.predict(val_x), val_y))
# Save the model and label to file
MODEL_PATH = os.path.join(tempfile.gettempdir(),
"iris_model_logistic_regression.pkl")
LABEL_PATH = os.path.join(tempfile.gettempdir(), "iris_labels.json")
with open(MODEL_PATH, "wb") as f:
pickle.dump(model, f)
with open(LABEL_PATH, "w") as f:
json.dump(target_names.tolist(), f)
# __doc_train_model_end__
# __doc_define_servable_begin__
class BoostingModel:
def __init__(self):
with open(MODEL_PATH, "rb") as f:
self.model = pickle.load(f)
with open(LABEL_PATH) as f:
self.label_list = json.load(f)
def __call__(self, flask_request):
payload = flask_request.json
print("Worker: received flask request with data", payload)
input_vector = [
payload["sepal length"],
payload["sepal width"],
payload["petal length"],
payload["petal width"],
]
prediction = self.model.predict([input_vector])[0]
human_name = self.label_list[prediction]
return {"result": human_name}
# __doc_define_servable_end__
ray.init(num_cpus=8)
# __doc_deploy_begin__
client = serve.start()
client.create_backend("lr:v1", BoostingModel)
client.create_endpoint("iris_classifier", backend="lr:v1", route="/regressor")
# __doc_deploy_end__
# __doc_query_begin__
sample_request_input = {
"sepal length": 1.2,
"sepal width": 1.0,
"petal length": 1.1,
"petal width": 0.9,
}
response = requests.get(
"http://localhost:8000/regressor", json=sample_request_input)
print(response.text)
# Result:
# {
# "result": "versicolor"
# }
# __doc_query_end__
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.