repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Eniac-Xie/faster-rcnn-resnet | tools/train_svms.py | 16 | 13480 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Train post-hoc SVMs using the algorithm and hyper-parameters from
traditional R-CNN.
"""
import _init_paths
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
from fast_rcnn.test import im_detect
from utils.timer import Timer
import caffe
import argparse
import pprint
import numpy as np
import numpy.random as npr
import cv2
from sklearn import svm
import os, sys
class SVMTrainer(object):
"""
Trains post-hoc detection SVMs for all classes using the algorithm
and hyper-parameters of traditional R-CNN.
"""
def __init__(self, net, imdb):
self.imdb = imdb
self.net = net
self.layer = 'fc7'
self.hard_thresh = -1.0001
self.neg_iou_thresh = 0.3
dim = net.params['cls_score'][0].data.shape[1]
scale = self._get_feature_scale()
print('Feature dim: {}'.format(dim))
print('Feature scale: {:.3f}'.format(scale))
self.trainers = [SVMClassTrainer(cls, dim, feature_scale=scale)
for cls in imdb.classes]
def _get_feature_scale(self, num_images=100):
TARGET_NORM = 20.0 # Magic value from traditional R-CNN
_t = Timer()
roidb = self.imdb.roidb
total_norm = 0.0
count = 0.0
inds = npr.choice(xrange(self.imdb.num_images), size=num_images,
replace=False)
for i_, i in enumerate(inds):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
count += feat.shape[0]
print('{}/{}: avg feature norm: {:.3f}'.format(i_ + 1, num_images,
total_norm / count))
return TARGET_NORM * 1.0 / (total_norm / count)
def _get_pos_counts(self):
counts = np.zeros((len(self.imdb.classes)), dtype=np.int)
roidb = self.imdb.roidb
for i in xrange(len(roidb)):
for j in xrange(1, self.imdb.num_classes):
I = np.where(roidb[i]['gt_classes'] == j)[0]
counts[j] += len(I)
for j in xrange(1, self.imdb.num_classes):
print('class {:s} has {:d} positives'.
format(self.imdb.classes[j], counts[j]))
return counts
def get_pos_examples(self):
counts = self._get_pos_counts()
for i in xrange(len(counts)):
self.trainers[i].alloc_pos(counts[i])
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
gt_boxes = roidb[i]['boxes'][gt_inds]
_t.tic()
scores, boxes = im_detect(self.net, im, gt_boxes)
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
if len(cls_inds) > 0:
cls_feat = feat[cls_inds, :]
self.trainers[j].append_pos(cls_feat)
print 'get_pos_examples: {:d}/{:d} {:.3f}s' \
.format(i + 1, len(roidb), _t.average_time)
def initialize_net(self):
# Start all SVM parameters at zero
self.net.params['cls_score'][0].data[...] = 0
self.net.params['cls_score'][1].data[...] = 0
# Initialize SVMs in a smart way. Not doing this because its such
# a good initialization that we might not learn something close to
# the SVM solution.
# # subtract background weights and biases for the foreground classes
# w_bg = self.net.params['cls_score'][0].data[0, :]
# b_bg = self.net.params['cls_score'][1].data[0]
# self.net.params['cls_score'][0].data[1:, :] -= w_bg
# self.net.params['cls_score'][1].data[1:] -= b_bg
# # set the background weights and biases to 0 (where they shall remain)
# self.net.params['cls_score'][0].data[0, :] = 0
# self.net.params['cls_score'][1].data[0] = 0
def update_net(self, cls_ind, w, b):
self.net.params['cls_score'][0].data[cls_ind, :] = w
self.net.params['cls_score'][1].data[cls_ind] = b
def train_with_hard_negatives(self):
_t = Timer()
roidb = self.imdb.roidb
num_images = len(roidb)
# num_images = 100
for i in xrange(num_images):
im = cv2.imread(self.imdb.image_path_at(i))
if roidb[i]['flipped']:
im = im[:, ::-1, :]
_t.tic()
scores, boxes = im_detect(self.net, im, roidb[i]['boxes'])
_t.toc()
feat = self.net.blobs[self.layer].data
for j in xrange(1, self.imdb.num_classes):
hard_inds = \
np.where((scores[:, j] > self.hard_thresh) &
(roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
self.neg_iou_thresh))[0]
if len(hard_inds) > 0:
hard_feat = feat[hard_inds, :].copy()
new_w_b = \
self.trainers[j].append_neg_and_retrain(feat=hard_feat)
if new_w_b is not None:
self.update_net(j, new_w_b[0], new_w_b[1])
print(('train_with_hard_negatives: '
'{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
_t.average_time))
def train(self):
# Initialize SVMs using
# a. w_i = fc8_w_i - fc8_w_0
# b. b_i = fc8_b_i - fc8_b_0
# c. Install SVMs into net
self.initialize_net()
# Pass over roidb to count num positives for each class
# a. Pre-allocate arrays for positive feature vectors
# Pass over roidb, computing features for positives only
self.get_pos_examples()
# Pass over roidb
# a. Compute cls_score with forward pass
# b. For each class
# i. Select hard negatives
# ii. Add them to cache
# c. For each class
# i. If SVM retrain criteria met, update SVM
# ii. Install new SVM into net
self.train_with_hard_negatives()
# One final SVM retraining for each class
# Install SVMs into net
for j in xrange(1, self.imdb.num_classes):
new_w_b = self.trainers[j].append_neg_and_retrain(force=True)
self.update_net(j, new_w_b[0], new_w_b[1])
class SVMClassTrainer(object):
"""Manages post-hoc SVM training for a single object class."""
def __init__(self, cls, dim, feature_scale=1.0,
C=0.001, B=10.0, pos_weight=2.0):
self.pos = np.zeros((0, dim), dtype=np.float32)
self.neg = np.zeros((0, dim), dtype=np.float32)
self.B = B
self.C = C
self.cls = cls
self.pos_weight = pos_weight
self.dim = dim
self.feature_scale = feature_scale
self.svm = svm.LinearSVC(C=C, class_weight={1: 2, -1: 1},
intercept_scaling=B, verbose=1,
penalty='l2', loss='l1',
random_state=cfg.RNG_SEED, dual=True)
self.pos_cur = 0
self.num_neg_added = 0
self.retrain_limit = 2000
self.evict_thresh = -1.1
self.loss_history = []
def alloc_pos(self, count):
self.pos_cur = 0
self.pos = np.zeros((count, self.dim), dtype=np.float32)
def append_pos(self, feat):
num = feat.shape[0]
self.pos[self.pos_cur:self.pos_cur + num, :] = feat
self.pos_cur += num
def train(self):
print('>>> Updating {} detector <<<'.format(self.cls))
num_pos = self.pos.shape[0]
num_neg = self.neg.shape[0]
print('Cache holds {} pos examples and {} neg examples'.
format(num_pos, num_neg))
X = np.vstack((self.pos, self.neg)) * self.feature_scale
y = np.hstack((np.ones(num_pos),
-np.ones(num_neg)))
self.svm.fit(X, y)
w = self.svm.coef_
b = self.svm.intercept_[0]
scores = self.svm.decision_function(X)
pos_scores = scores[:num_pos]
neg_scores = scores[num_pos:]
pos_loss = (self.C * self.pos_weight *
np.maximum(0, 1 - pos_scores).sum())
neg_loss = self.C * np.maximum(0, 1 + neg_scores).sum()
reg_loss = 0.5 * np.dot(w.ravel(), w.ravel()) + 0.5 * b ** 2
tot_loss = pos_loss + neg_loss + reg_loss
self.loss_history.append((tot_loss, pos_loss, neg_loss, reg_loss))
for i, losses in enumerate(self.loss_history):
print((' {:d}: obj val: {:.3f} = {:.3f} '
'(pos) + {:.3f} (neg) + {:.3f} (reg)').format(i, *losses))
# Sanity check
scores_ret = (
X * 1.0 / self.feature_scale).dot(w.T * self.feature_scale) + b
assert np.allclose(scores, scores_ret[:, 0], atol=1e-5), \
"Scores from returned model don't match decision function"
return ((w * self.feature_scale, b), pos_scores, neg_scores)
def append_neg_and_retrain(self, feat=None, force=False):
if feat is not None:
num = feat.shape[0]
self.neg = np.vstack((self.neg, feat))
self.num_neg_added += num
if self.num_neg_added > self.retrain_limit or force:
self.num_neg_added = 0
new_w_b, pos_scores, neg_scores = self.train()
# scores = np.dot(self.neg, new_w_b[0].T) + new_w_b[1]
# easy_inds = np.where(neg_scores < self.evict_thresh)[0]
not_easy_inds = np.where(neg_scores >= self.evict_thresh)[0]
if len(not_easy_inds) > 0:
self.neg = self.neg[not_easy_inds, :]
# self.neg = np.delete(self.neg, easy_inds)
print(' Pruning easy negatives')
print(' Cache holds {} pos examples and {} neg examples'.
format(self.pos.shape[0], self.neg.shape[0]))
print(' {} pos support vectors'.format((pos_scores <= 1).sum()))
print(' {} neg support vectors'.format((neg_scores >= -1).sum()))
return new_w_b
else:
return None
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train SVMs (old skool)')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
# Must turn this off to prevent issues when digging into the net blobs to
# pull out features (tricky!)
cfg.DEDUP_BOXES = 0
# Must turn this on because we use the test im_detect() method to harvest
# hard negatives
cfg.TEST.SVM = True
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
# fix the random seed for reproducibility
np.random.seed(cfg.RNG_SEED)
# set up caffe
caffe.set_mode_gpu()
if args.gpu_id is not None:
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
out = os.path.splitext(os.path.basename(args.caffemodel))[0] + '_svm'
out_dir = os.path.dirname(args.caffemodel)
imdb = get_imdb(args.imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# enhance roidb to contain flipped examples
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_images()
print 'done'
SVMTrainer(net, imdb).train()
filename = '{}/{}.caffemodel'.format(out_dir, out)
net.save(filename)
print 'Wrote svm model to: {:s}'.format(filename)
| mit |
lepy/phuzzy | docs/examples/ssb/ssb_approx.py | 1 | 4225 | # -*- coding: utf-8 -*-
import phuzzy as ph
import phuzzy.approx.doe
import phuzzy.mpl.plots
from phuzzy.mpl import mix_mpl
import matplotlib.pyplot as plt
def calc_w(P, L, W, H, E):
I = W * H** 3 / 12.
w = P * L ** 3 / (48 * E * I)
return w
number_of_alpha_levels = 31
# load P
P0 = 5000. # N
dP = 0.01 * P0 # N
P = ph.Triangle(alpha0=[P0 - dP, P0 + dP], alpha1=[P0], name="P", number_of_alpha_levels=number_of_alpha_levels)
# dimensions L, W, H
W0 = 50 # mm
H0 = 100 # mm
L0 = 2000 # mm
dW = 0.01 * W0 # mm
dH = 0.01 * H0 # mm
dL = 0.01 * L0 # mm
L = ph.Triangle(alpha0=[L0 - dL, L0 + dL], alpha1=[L0], name="L", number_of_alpha_levels=number_of_alpha_levels)
W = ph.Triangle(alpha0=[W0 - dW, W0 + dW], alpha1=[W0], name="W", number_of_alpha_levels=number_of_alpha_levels)
H = ph.Triangle(alpha0=[H0 - dH, H0 + dH], alpha1=[H0], name="H", number_of_alpha_levels=number_of_alpha_levels)
# material
E0 = 30000. # N/mm2
dE = 0.1 * E0 # N/mm2
E = ph.TruncNorm(alpha0=[E0 - dE, E0 + dE], alpha1=[E0], name="E", number_of_alpha_levels=number_of_alpha_levels)
I0 = W0 * H0 ** 3 / 12.
w0 = P0 * L0 ** 3 / (48 * E0 * I0)
print("I0 = {:.4g} mm^4".format(I0))
# I0 = 4.167e+06 mm^4
print("w0 = {:.4g} mm".format(w0))
# w0 = 6.667 mm
I = W * H** 3 / 12.
I.name = "I"
w = P * L ** 3 / (48 * E * I)
w.name = r"P L^3 / (48 EI)"
print("I = {} mm^4".format(I))
# I = FuzzyNumber(W*H^3/12.0:[[4002483.375, 4335850.041666667], [4166666.6666666665, 4166666.6666666665]]) mm^4
print("w = {} mm".format(w))
# w = FuzzyNumber(P*L^3/E*48*W*H^3/12.0:[[5.594629603627992, 8.024370049019725], [6.666666666666667, 6.666666666666667]]) mm
w_mean = w.mean()
dw_l = w_mean - w.min()
dw_r = w.max() - w_mean
print("w = {:.4g} mm (- {:.4g}|+ {:.4g})".format(w_mean, dw_l, dw_r))
# w = 6.703 mm (- 1.109|+ 1.321)
print("w = {:.4g} mm [{:.4g},{:.4g}]".format(w_mean, w.min(), w.max()))
# w = 6.703 mm [5.595,8.024]
# create Expression object
expr = phuzzy.approx.doe.Expression(designvars=[P, L, W, H, E],
function=calc_w,
name="w_a")
# expr.generate_training_doe(name="train", n=30, method="lhs")
expr.generate_training_doe(name="train", n=30, method="cc")
expr.eval()
w_a = expr.get_fuzzynumber_from_results(name="w_a")
expr.fit_model(model="knn")
print(expr.model)
expr.generate_prediction_doe(name="prediction", n=100000, method="lhs")
# expr.generate_prediction_doe(name="prediction", n=100, method="meshgrid")
w_b = expr.predict(name="w_b")
print(w_b)
mix_mpl(w_b)
fig, axs = phuzzy.mpl.plots.plot_xyz(w, w_a, w_b)
mix_mpl(w)
w.plot(axs[1], labels=False)
w.plot(axs[2], labels=False)
samples = expr.results_training
axs[1].scatter(samples.res, samples.alpha, s=3)
axs[1].set_title("%d training samples" % len(expr.results_training))
axs[2].scatter(samples.res, samples.alpha, s=3)
axs[2].set_title("%d training samples" % len(expr.results_training))
fig.tight_layout()
plt.show()
mix_mpl(I)
mix_mpl(w)
H_ = 100. # mm
B_ = 300. # mm
fig, axs = plt.subplots(1, 2, dpi=90, facecolor='w', edgecolor='k', figsize=(B_ / 25.4, H_ / 25.4))
axs[0].axvline(I0, lw=2, alpha=.4, c="r", label="$I_0$")
axs[1].axvline(w0, lw=2, alpha=.4, c="r", label="$w_0 = {:.4g}\,mm$".format(w0))
I.plot(ax=axs[0])
w.plot(ax=axs[1])
axs[0].set_title("area moment of inertia $I$")
axs[1].set_title("deflection $w$")
axs[0].set_xlabel(r"area moment of inertia $I=\frac{WH^3}{12}$")
axs[1].set_xlabel(r"deflection $w=\frac{PL^3}{48EI}$" + "$ = {:.4g}\,mm\,[{:.4g},{:.4g}]$".format(w_mean, w.min(), w.max()))
axs[0].legend()
axs[1].legend()
fig.tight_layout(pad=1.18, h_pad=1.1)
fig.savefig("ssb.png")
H_ = 250. # mm
B_ = 300. # mm
fig, axs = plt.subplots(3, 2, dpi=90, facecolor='w', edgecolor='k', figsize=(B_ / 25.4, H_ / 25.4))
A = W * H
ys = [P, L,
W, H,
E, A]
P.title = r"load $P$"
L.title = r"length $L$"
W.title = r"width $W$"
H.title = r"height $H$"
E.title = r"young's modulus $E$"
A.title = r"cross section area $A$"
for i, y in enumerate(ys):
mix_mpl(y)
ax = axs.ravel()[i]
y.plot(ax=ax)
if hasattr(y, "title"):
ax.set_title(y.title)
fig.tight_layout()
fig.savefig("ssb_parameter.png")
plt.show()
| mit |
kerrpy/kerrpy | kerrpy/geodesics.py | 1 | 5257 | from .utils.draw import drawScene, drawGeodesic
from math import gcd
import numpy as np
from matplotlib import pyplot as plt
SPHERE = 0
DISK = 1
HORIZON = 2
class Geodesic:
def __init__(self, status, coordinates, colour='royalBlue'):
self.status = SPHERE
self.coordinates = coordinates
# Detect if the ray collided with the disk, remove the following steps
# and change its colour
indicesDisk = np.where(status == DISK)[0]
if indicesDisk.size > 0:
self.status = DISK
firstCollision = indicesDisk[0]
self.coordinates = coordinates[:firstCollision, :]
# Detect if the ray entered the horizon, remove the following steps
# and change its colour
indicesCollision = np.where(status == HORIZON)[0]
if indicesCollision.size > 0:
self.status = HORIZON
firstCollision = indicesCollision[0]
self.coordinates = coordinates[:firstCollision, :]
# Set colour
if self.status == SPHERE:
self.colour = 'royalBlue'
elif self.status == HORIZON:
self.colour = 'maroon'
else:
self.colour = 'darkolivegreen'
def plot(self, ax=None):
showPlot = False
if not ax:
showPlot = True
# Start figure
fig = plt.figure()
# Start 3D plot
ax = fig.gca(projection='3d')
ax.set_axis_off()
# Set axes limits
ax.set_xlim3d(-25, 25)
ax.set_ylim3d(-25, 25)
ax.set_zlim3d(-25, 25)
# Draw the scene
drawScene(ax)
drawGeodesic(ax, self.coordinates, self.colour)
if showPlot:
# Show the plot
plt.show()
class CongruenceSnapshot:
def __init__(self, status, coordinates, texels=None):
self.status = status
self.coordinates = coordinates
self.texels = texels
self.congruenceMatrixRows = self.status.shape[0]
self.congruenceMatrixCols = self.status.shape[1]
self.dpi = gcd(self.status.shape[0], self.status.shape[1])
self.imageSize = (self.status.shape[0] / self.dpi, self.status.shape[1] / self.dpi)
self.numPixels = self.congruenceMatrixRows * self.congruenceMatrixCols
self.colors = [
[1, 1, 1], # Sphere
[1, 0, 0], # Disk
[0, 0, 0] # Horizon
]
def plot(self):
fig = plt.figure(frameon=False)
fig.set_size_inches(self.congruenceMatrixCols, self.congruenceMatrixRows)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if self.texels is None:
image = np.empty((self.congruenceMatrixRows, self.congruenceMatrixCols, 3))
for row in range(self.congruenceMatrixRows):
for col in range(self.congruenceMatrixCols):
status = self.status[row, col]
image[row, col, :] = self.colors[status]
ax.imshow(image)
else:
ax.imshow(self.texels)
plt.show()
def save(self, path):
fig = plt.figure(frameon=False)
fig.set_size_inches(self.imageSize[1], self.imageSize[0])
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
if self.texels is None:
image = np.empty((self.congruenceMatrixRows, self.congruenceMatrixCols, 3))
for row in range(self.congruenceMatrixRows):
for col in range(self.congruenceMatrixCols):
status = self.status[row, col]
image[row, col, :] = self.colors[status]
ax.imshow(image)
else:
ax.imshow(self.texels)
fig.savefig(path, dpi=self.dpi)
plt.close(fig)
class Congruence:
def __init__(self, status, coordinates):
self.status = status
self.coordinates = coordinates
self.congruenceMatrixRows = status.shape[0]
self.congruenceMatrixCols = status.shape[1]
self.numPixels = self.congruenceMatrixRows * self.congruenceMatrixCols
self.numSlices = status.shape[2]
self.colors = [
[1, 1, 1], # Sphere
[1, 0, 0], # Disk
[0, 0, 0] # Horizon
]
def snapshot(self, instant):
return CongruenceSnapshot(self.status[:, :, instant], self.coordinates[:, :, :, instant])
def geodesic(self, row, col):
return Geodesic(self.status[row, col, :], np.transpose(self.coordinates[row, col, :, :]))
def plot(self):
# Start figure
fig = plt.figure()
# Start 3D plot
ax = fig.gca(projection='3d')
ax.set_axis_off()
# Set axes limits
ax.set_xlim3d(-25, 25)
ax.set_ylim3d(-25, 25)
ax.set_zlim3d(-25, 25)
# Draw the scene
drawScene(ax)
# Draw the rays
for row in range(0, self.congruenceMatrixRows):
for col in range(0, self.congruenceMatrixCols):
self.geodesic(row, col).plot(ax)
# Add a legend
# ax.legend()
# Show the plot
plt.show()
| gpl-3.0 |
mjgrav2001/scikit-learn | examples/ensemble/plot_gradient_boosting_oob.py | 230 | 4762 | """
======================================
Gradient Boosting Out-of-Bag estimates
======================================
Out-of-bag (OOB) estimates can be a useful heuristic to estimate
the "optimal" number of boosting iterations.
OOB estimates are almost identical to cross-validation estimates but
they can be computed on-the-fly without the need for repeated model
fitting.
OOB estimates are only available for Stochastic Gradient Boosting
(i.e. ``subsample < 1.0``), the estimates are derived from the improvement
in loss based on the examples not included in the bootstrap sample
(the so-called out-of-bag examples).
The OOB estimator is a pessimistic estimator of the true
test loss, but remains a fairly good approximation for a small number of trees.
The figure shows the cumulative sum of the negative OOB improvements
as a function of the boosting iteration. As you can see, it tracks the test
loss for the first hundred iterations but then diverges in a
pessimistic way.
The figure also shows the performance of 3-fold cross validation which
usually gives a better estimate of the test loss
but is computationally more demanding.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn.cross_validation import KFold
from sklearn.cross_validation import train_test_split
# Generate data (adapted from G. Ridgeway's gbm example)
n_samples = 1000
random_state = np.random.RandomState(13)
x1 = random_state.uniform(size=n_samples)
x2 = random_state.uniform(size=n_samples)
x3 = random_state.randint(0, 4, size=n_samples)
p = 1 / (1.0 + np.exp(-(np.sin(3 * x1) - 4 * x2 + x3)))
y = random_state.binomial(1, p, size=n_samples)
X = np.c_[x1, x2, x3]
X = X.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
random_state=9)
# Fit classifier with out-of-bag estimates
params = {'n_estimators': 1200, 'max_depth': 3, 'subsample': 0.5,
'learning_rate': 0.01, 'min_samples_leaf': 1, 'random_state': 3}
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
acc = clf.score(X_test, y_test)
print("Accuracy: {:.4f}".format(acc))
n_estimators = params['n_estimators']
x = np.arange(n_estimators) + 1
def heldout_score(clf, X_test, y_test):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_folds=3):
cv = KFold(n=X_train.shape[0], n_folds=n_folds)
cv_clf = ensemble.GradientBoostingClassifier(**params)
val_scores = np.zeros((n_estimators,), dtype=np.float64)
for train, test in cv:
cv_clf.fit(X_train[train], y_train[train])
val_scores += heldout_score(cv_clf, X_train[test], y_train[test])
val_scores /= n_folds
return val_scores
# Estimate best n_estimator using cross-validation
cv_score = cv_estimate(3)
# Compute best n_estimator for test data
test_score = heldout_score(clf, X_test, y_test)
# negative cumulative sum of oob improvements
cumsum = -np.cumsum(clf.oob_improvement_)
# min loss according to OOB
oob_best_iter = x[np.argmin(cumsum)]
# min loss according to test (normalize such that first loss is 0)
test_score -= test_score[0]
test_best_iter = x[np.argmin(test_score)]
# min loss according to cv (normalize such that first loss is 0)
cv_score -= cv_score[0]
cv_best_iter = x[np.argmin(cv_score)]
# color brew for the three curves
oob_color = list(map(lambda x: x / 256.0, (190, 174, 212)))
test_color = list(map(lambda x: x / 256.0, (127, 201, 127)))
cv_color = list(map(lambda x: x / 256.0, (253, 192, 134)))
# plot curves and vertical lines for best iterations
plt.plot(x, cumsum, label='OOB loss', color=oob_color)
plt.plot(x, test_score, label='Test loss', color=test_color)
plt.plot(x, cv_score, label='CV loss', color=cv_color)
plt.axvline(x=oob_best_iter, color=oob_color)
plt.axvline(x=test_best_iter, color=test_color)
plt.axvline(x=cv_best_iter, color=cv_color)
# add three vertical lines to xticks
xticks = plt.xticks()
xticks_pos = np.array(xticks[0].tolist() +
[oob_best_iter, cv_best_iter, test_best_iter])
xticks_label = np.array(list(map(lambda t: int(t), xticks[0])) +
['OOB', 'CV', 'Test'])
ind = np.argsort(xticks_pos)
xticks_pos = xticks_pos[ind]
xticks_label = xticks_label[ind]
plt.xticks(xticks_pos, xticks_label)
plt.legend(loc='upper right')
plt.ylabel('normalized loss')
plt.xlabel('number of iterations')
plt.show()
| bsd-3-clause |
fengzhyuan/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
equialgo/scikit-learn | sklearn/svm/tests/test_sparse.py | 63 | 13366 | import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
| bsd-3-clause |
xuewei4d/scikit-learn | examples/linear_model/plot_sparse_logistic_regression_20newsgroups.py | 18 | 4240 | """
====================================================
Multiclass sparse logistic regression on 20newgroups
====================================================
Comparison of multinomial logistic L1 vs one-versus-rest L1 logistic regression
to classify documents from the newgroups20 dataset. Multinomial logistic
regression yields more accurate results and is faster to train on the larger
scale dataset.
Here we use the l1 sparsity that trims the weights of not informative
features to zero. This is good if the goal is to extract the strongly
discriminative vocabulary of each class. If the goal is to get the best
predictive accuracy, it is better to use the non sparsity-inducing l2 penalty
instead.
A more traditional (and possibly better) way to predict on a sparse subset of
input features would be to use univariate feature selection followed by a
traditional (l2-penalised) logistic regression model.
"""
import timeit
import warnings
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.exceptions import ConvergenceWarning
print(__doc__)
# Author: Arthur Mensch
warnings.filterwarnings("ignore", category=ConvergenceWarning,
module="sklearn")
t0 = timeit.default_timer()
# We use SAGA solver
solver = 'saga'
# Turn down for faster run time
n_samples = 10000
X, y = fetch_20newsgroups_vectorized(subset='all', return_X_y=True)
X = X[:n_samples]
y = y[:n_samples]
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=42,
stratify=y,
test_size=0.1)
train_samples, n_features = X_train.shape
n_classes = np.unique(y).shape[0]
print('Dataset 20newsgroup, train_samples=%i, n_features=%i, n_classes=%i'
% (train_samples, n_features, n_classes))
models = {'ovr': {'name': 'One versus Rest', 'iters': [1, 2, 4]},
'multinomial': {'name': 'Multinomial', 'iters': [1, 3, 7]}}
for model in models:
# Add initial chance-level values for plotting purpose
accuracies = [1 / n_classes]
times = [0]
densities = [1]
model_params = models[model]
# Small number of epochs for fast runtime
for this_max_iter in model_params['iters']:
print('[model=%s, solver=%s] Number of epochs: %s' %
(model_params['name'], solver, this_max_iter))
lr = LogisticRegression(solver=solver,
multi_class=model,
penalty='l1',
max_iter=this_max_iter,
random_state=42,
)
t1 = timeit.default_timer()
lr.fit(X_train, y_train)
train_time = timeit.default_timer() - t1
y_pred = lr.predict(X_test)
accuracy = np.sum(y_pred == y_test) / y_test.shape[0]
density = np.mean(lr.coef_ != 0, axis=1) * 100
accuracies.append(accuracy)
densities.append(density)
times.append(train_time)
models[model]['times'] = times
models[model]['densities'] = densities
models[model]['accuracies'] = accuracies
print('Test accuracy for model %s: %.4f' % (model, accuracies[-1]))
print('%% non-zero coefficients for model %s, '
'per class:\n %s' % (model, densities[-1]))
print('Run time (%i epochs) for model %s:'
'%.2f' % (model_params['iters'][-1], model, times[-1]))
fig = plt.figure()
ax = fig.add_subplot(111)
for model in models:
name = models[model]['name']
times = models[model]['times']
accuracies = models[model]['accuracies']
ax.plot(times, accuracies, marker='o',
label='Model: %s' % name)
ax.set_xlabel('Train time (s)')
ax.set_ylabel('Test accuracy')
ax.legend()
fig.suptitle('Multinomial vs One-vs-Rest Logistic L1\n'
'Dataset %s' % '20newsgroups')
fig.tight_layout()
fig.subplots_adjust(top=0.85)
run_time = timeit.default_timer() - t0
print('Example run in %.3f s' % run_time)
plt.show()
| bsd-3-clause |
madjelan/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
nrhine1/scikit-learn | sklearn/linear_model/tests/test_theil_sen.py | 234 | 9928 | """
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from nose.tools import raises, assert_almost_equal
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import assert_greater, assert_less
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = open(os.devnull, 'w')
sys.stderr = open(os.devnull, 'w')
yield
sys.stdout.flush()
sys.stderr.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1/(np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
coolralf/KaggleTraining | Titanic/titanic.py | 1 | 3377 | #%%
import pandas as pd
import numpy as np
import os
from pandas import Series,DataFrame
path = os.getcwd()
data_train = pd.read_csv(r"Titanic/train.csv")
#data_info...
from sklearn.ensemble import RandomForestRegressor
def set_missing_ages(df):
age_df = df[['Age','Fare','Parch','SibSp','Pclass']]
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
y = known_age[:,0]
X = known_age[:,1:]
rfr = RandomForestRegressor(random_state=0,n_estimators=2000,n_jobs=-1)
rfr.fit(X,y)
predictedAges = rfr.predict(unknown_age[:,1::])
df.loc[(df.Age.isnull()),'Age'] = predictedAges
return df,rfr
def set_Cabin_type(df):
df.loc[(df.Cabin.notnull()),'Cabin'] = 'Yes'
df.loc[(df.Cabin.isnull()),'Cabin'] = 'No'
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
dummies_Cabin = pd.get_dummies(data_train["Cabin"],prefix='Cabin')
dummies_Embarked = pd.get_dummies(data_train['Embarked'],prefix='Embarked')
dummies_Sex = pd.get_dummies(data_train['Sex'],prefix='Sex')
dummies_Pclass = pd.get_dummies(data_train['Pclass'],prefix='Pclass')
df = pd.concat([data_train,dummies_Cabin,dummies_Embarked,dummies_Pclass,dummies_Sex],axis = 1)
df.drop(['Cabin','Pclass','Sex','Name','Ticket','Embarked'],axis = 1,inplace=True)
#scaling
import sklearn.preprocessing as preprocessing
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(df['Age'])
df['Age_scaled'] = scaler.fit_transform(df['Age'],age_scale_param)
fare_scale_param = scaler.fit(df['Fare'])
df['Fare_scaled']= scaler.fit_transform(df['Fare'],fare_scale_param)
#modelling
from sklearn import linear_model
train_df = df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
train_np = train_df.as_matrix()
y = train_np[:,0]
X = train_np[:,1:]
clf = linear_model.LogisticRegression(C=1.0,penalty='l2',tol=1e-4)
clf.fit(X,y)
# model save
from sklearn.externals import joblib
joblib.dump(clf,"Titanic/clf.model")
#processing test_data
data_test= pd.read_csv(r"Titanic/test.csv")
data_test.loc[(data_test.Fare.isnull()),'Fare']= 0
tmp_df = data_test[['Age','Fare','Parch','SibSp','Pclass']]
null_age = tmp_df[data_test.Age.isnull()].as_matrix()
X = null_age[:,1:]
predictedAges = rfr.predict(X)
data_test.loc[(data_test.Age.isnull()),'Age'] = predictedAges
data_test = set_Cabin_type(data_test)
dummies_Cabin = pd.get_dummies(data_test["Cabin"],prefix='Cabin')
dummies_Embarked = pd.get_dummies(data_test['Embarked'],prefix='Embarked')
dummies_Sex = pd.get_dummies(data_test['Sex'],prefix='Sex')
dummies_Pclass = pd.get_dummies(data_test['Pclass'],prefix='Pclass')
df_test = pd.concat([data_test,dummies_Cabin,dummies_Embarked,dummies_Pclass,dummies_Sex],axis = 1)
df_test.drop(['Cabin','Pclass','Sex','Name','Ticket','Embarked'],axis = 1,inplace=True)
df_test['Age_scaled'] = scaler.fit_transform(df_test['Age'],age_scale_param)
df_test['Fare_scaled']= scaler.fit_transform(df_test['Fare'],fare_scale_param)
test = df_test.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions = clf.predict(test)
result = pd.DataFrame({'PassengerId':data_test['PassengerId'].as_matrix(),'Survived':predictions.astype(np.int32)})
result.to_csv("Titanic/result.csv",index=False) | mit |
alexeyum/scikit-learn | benchmarks/bench_sample_without_replacement.py | 397 | 8008 | """
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
| bsd-3-clause |
EPAENERGYSTAR/epathermostat | docs/conf.py | 1 | 9869 | # -*- coding: utf-8 -*-
#
# thermostat documentation build configuration file, created by
# sphinx-quickstart on Mon May 18 15:25:28 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = [
'pandas',
'eemeter',
'eemeter.consumption',
'eemeter.weather',
'eemeter.weather.location',
'eemeter.location',
'eemeter.evaluation',
'eeweather.cache',
'scipy',
'scipy.optimize',
'scipy.stats',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.autosectionlabel',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'thermostat'
copyright = u'2015-2016, Open Energy Efficiency, Inc.'
author = u'Open Energy Efficiency, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.7'
# The full version, including alpha/beta/rc tags.
release = '1.7.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'thermostatdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'thermostat.tex', u'thermostat Documentation',
u'Open Energy Efficiency, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thermostat', u'thermostat Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'thermostat', u'thermostat Documentation',
author, 'thermostat', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/msgpack/__init__.py | 9 | 1180 | # coding: utf-8
from pandas.msgpack._version import version
from pandas.msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
from pandas.msgpack._packer import Packer
from pandas.msgpack._unpacker import unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
| mit |
leungmanhin/opencog | opencog/python/attic/spatiotemporal/temporal_events/__init__.py | 33 | 9273 | from scipy.stats.distributions import rv_frozen
from spatiotemporal.temporal_events.relation_formulas import FormulaCreator, RelationFormulaGeometricMean, BaseRelationFormula, RelationFormulaConvolution
from spatiotemporal.temporal_events.util import calculate_bounds_of_probability_distribution
from spatiotemporal.time_intervals import check_is_time_interval, TimeInterval
from spatiotemporal.temporal_events.membership_function import MembershipFunction, ProbabilityDistributionPiecewiseLinear
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.functions import FunctionPiecewiseLinear, FUNCTION_ZERO
__author__ = 'keyvan'
class TemporalEvent(list, TimeInterval):
_distribution_beginning = None
_distribution_ending = None
_beginning = None
_ending = None
_dict = None
def __init__(self, distribution_beginning, distribution_ending,
bins=50, relation_formula=None):
if not isinstance(distribution_beginning, rv_frozen):
raise TypeError("'distribution_beginning' should be a scipy frozen distribution")
if not isinstance(distribution_ending, rv_frozen):
raise TypeError("'distribution_ending' should be a scipy frozen distribution")
self._distribution_beginning = distribution_beginning
self._distribution_ending = distribution_ending
a, beginning = calculate_bounds_of_probability_distribution(distribution_beginning)
ending, b = calculate_bounds_of_probability_distribution(distribution_ending)
self._beginning = UnixTime(beginning)
self._ending = UnixTime(ending)
self.membership_function = MembershipFunction(self)
bins_beginning = bins / 2
bins_ending = bins - bins_beginning
self.interval_beginning = TimeInterval(a, beginning, bins_beginning)
self.interval_ending = TimeInterval(ending, b, bins_ending)
list.__init__(self, self.interval_beginning + self.interval_ending)
TimeInterval.__init__(self, a, b, bins)
if relation_formula is None:
relation_formula = RelationFormulaConvolution()
elif not isinstance(relation_formula, BaseRelationFormula):
raise TypeError("'relation_formula' should be of type 'BaseRelationFormula'")
relation_formula.bounds[distribution_beginning] = self.a, self.beginning
relation_formula.bounds[distribution_ending] = self.ending, self.b
self._formula_creator = FormulaCreator(relation_formula)
def degree(self, time_step=None, a=None, b=None, interval=None):
"""
usage: provide 'time_step' or 'a' and 'b' or 'interval'
"""
if time_step is not None:
return self.membership_function(time_step)
if interval is None:
if (a, b) == (None, None):
interval = self
else:
interval = TimeInterval(a, b)
else:
check_is_time_interval(interval)
return integral(self.membership_function, interval.a, interval.b)
def temporal_relations_with(self, other):
return self._formula_creator.temporal_relations_between(self, other)
def instance(self):
return TemporalInstance(self.distribution_beginning.rvs(), self.distribution_ending.rvs())
def to_dict(self):
if self._dict is None:
self._dict = {}
for time_step in self.to_list():
self._dict[time_step] = self.membership_function(time_step)
return self._dict
# def plot(self, show_distributions=False):
# import matplotlib.pyplot as plt
# plt.plot(self.to_datetime_list(), self.membership_function())
# if show_distributions:
# if hasattr(self.distribution_beginning, 'plot'):
# self.distribution_beginning.plot()
# else:
# plt.plot(self.interval_beginning.to_datetime_list(),
# self.distribution_beginning.pdf(self.interval_beginning))
# if hasattr(self.distribution_ending, 'plot'):
# self.distribution_ending.plot()
# else:
# plt.plot(self.interval_ending.to_datetime_list(),
# self.distribution_ending.pdf(self.interval_ending))
# return plt
def plot(self, plt=None, show_distributions=False):
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.to_float_list(), self.membership_function())
if show_distributions:
if hasattr(self.distribution_beginning, 'plot'):
self.distribution_beginning.plot()
else:
plt.plot(self.interval_beginning.to_float_list(),
self.distribution_beginning.pdf(self.interval_beginning))
if hasattr(self.distribution_ending, 'plot'):
self.distribution_ending.plot()
else:
plt.plot(self.interval_ending.to_float_list(),
self.distribution_ending.pdf(self.interval_ending))
return plt
@property
def distribution_beginning(self):
return self._distribution_beginning
@property
def distribution_ending(self):
return self._distribution_ending
@property
def beginning(self):
return self._beginning
@property
def ending(self):
return self._ending
def __getitem__(self, portion_index):
if portion_index not in [0, 1]:
raise IndexError("TemporalEvent object only accepts '0' or '1' as index")
if portion_index == 0:
return self.distribution_beginning
return self.distribution_ending
def __mul__(self, other):
return self.temporal_relations_with(other)
def __str__(self):
return repr(self)
# use TemporalEventTrapezium instead
class TemporalEventPiecewiseLinear(TemporalEvent):
def __init__(self, dictionary_beginning, dictionary_ending, bins=50):
input_list_beginning, output_list_beginning = convert_dict_to_sorted_lists(dictionary_beginning)
for i in xrange(1, len(input_list_beginning)):
if not dictionary_beginning[input_list_beginning[i]] > dictionary_beginning[input_list_beginning[i - 1]]:
raise TypeError("values of 'dictionary_beginning' should be increasing in time")
input_list_ending, output_list_ending = convert_dict_to_sorted_lists(dictionary_ending)
for i in xrange(1, len(input_list_ending)):
if not dictionary_ending[input_list_ending[i]] < dictionary_ending[input_list_ending[i - 1]]:
raise TypeError("values of 'dictionary_ending' should be decreasing in time")
dictionary_ending = {}
for i, time_step in enumerate(input_list_ending):
dictionary_ending[time_step] = output_list_ending[len(input_list_ending) - i - 1]
input_list_ending, output_list_ending = convert_dict_to_sorted_lists(dictionary_ending)
distribution_beginning = ProbabilityDistributionPiecewiseLinear(dictionary_beginning)
distribution_ending = ProbabilityDistributionPiecewiseLinear(dictionary_ending)
TemporalEvent.__init__(self, distribution_beginning, distribution_ending, bins=bins)
self._list = sorted(set(input_list_beginning + input_list_ending))
self.membership_function = FunctionPiecewiseLinear(self.to_dict(), FUNCTION_ZERO)
def __getitem__(self, index):
return self._list.__getitem__(index)
def __len__(self):
return len(self._list)
def __iter__(self):
return iter(self._list)
def __reversed__(self):
return reversed(self._list)
def __repr__(self):
pairs = ['{0}: {1}'.format(self[i], self.membership_function[i]) for i in xrange(len(self))]
return '{0}({1})'.format(self.__class__.__name__, ', '.join(pairs))
class TemporalInstance(TimeInterval):
def __init__(self, a, b):
TimeInterval.__init__(self, a, b, 1)
def plot(self):
import matplotlib.pyplot as plt
from spatiotemporal.unix_time import UnixTime
plt.plot([UnixTime(self.a).to_datetime(), UnixTime(self.b).to_datetime()], [1, 1])
return plt
if __name__ == '__main__':
from utility.functions import integral
from scipy.stats import norm
import matplotlib.pyplot as plt
#event = TemporalInstance(datetime(2010, 1, 1), datetime(2011, 2, 1))
#plt = event.plot()
#plt.show()
events = [
# TemporalEvent(norm(loc=10, scale=2), norm(loc=30, scale=2), 100),
# TemporalEvent(norm(loc=5, scale=2), norm(loc=15, scale=4), 100),
TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {6: 1, 7: 0.9, 8: 0.6, 9: 0.1, 10: 0}),
TemporalEventPiecewiseLinear({1: 0, 2: 0.1, 3: 0.3, 4: 0.7, 5: 1}, {3.5: 1, 4.5: 0.9, 8: 0.6, 9: 0.1, 10: 0})
]
print type(events[0])
print events[0] * events[1]
for event in events:
plt = event.plot()
print integral(event.distribution_beginning.pdf, event.a, event.beginning)
print event.distribution_beginning.rvs(10)
plt.ylim(ymax=1.1)
#plt.figure()
plt.show()
| agpl-3.0 |
mlyundin/scikit-learn | sklearn/neighbors/tests/test_kd_tree.py | 159 | 7852 | import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.neighbors.kd_tree import (KDTree, NeighborsHeap,
simultaneous_sort, kernel_norm,
nodeheap_sort, DTYPE, ITYPE)
from sklearn.neighbors.dist_metrics import DistanceMetric
from sklearn.utils.testing import SkipTest, assert_allclose
V = np.random.random((3, 3))
V = np.dot(V, V.T)
DIMENSION = 3
METRICS = {'euclidean': {},
'manhattan': {},
'chebyshev': {},
'minkowski': dict(p=3)}
def brute_force_neighbors(X, Y, k, metric, **kwargs):
D = DistanceMetric.get_metric(metric, **kwargs).pairwise(Y, X)
ind = np.argsort(D, axis=1)[:, :k]
dist = D[np.arange(Y.shape[0])[:, None], ind]
return dist, ind
def test_kd_tree_query():
np.random.seed(0)
X = np.random.random((40, DIMENSION))
Y = np.random.random((10, DIMENSION))
def check_neighbors(dualtree, breadth_first, k, metric, kwargs):
kdt = KDTree(X, leaf_size=1, metric=metric, **kwargs)
dist1, ind1 = kdt.query(Y, k, dualtree=dualtree,
breadth_first=breadth_first)
dist2, ind2 = brute_force_neighbors(X, Y, k, metric, **kwargs)
# don't check indices here: if there are any duplicate distances,
# the indices may not match. Distances should not have this problem.
assert_array_almost_equal(dist1, dist2)
for (metric, kwargs) in METRICS.items():
for k in (1, 3, 5):
for dualtree in (True, False):
for breadth_first in (True, False):
yield (check_neighbors,
dualtree, breadth_first,
k, metric, kwargs)
def test_kd_tree_query_radius(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind = kdt.query_radius([query_pt], r + eps)[0]
i = np.where(rad <= r + eps)[0]
ind.sort()
i.sort()
assert_array_almost_equal(i, ind)
def test_kd_tree_query_radius_distance(n_samples=100, n_features=10):
np.random.seed(0)
X = 2 * np.random.random(size=(n_samples, n_features)) - 1
query_pt = np.zeros(n_features, dtype=float)
eps = 1E-15 # roundoff error can cause test to fail
kdt = KDTree(X, leaf_size=5)
rad = np.sqrt(((X - query_pt) ** 2).sum(1))
for r in np.linspace(rad[0], rad[-1], 100):
ind, dist = kdt.query_radius([query_pt], r + eps, return_distance=True)
ind = ind[0]
dist = dist[0]
d = np.sqrt(((query_pt - X[ind]) ** 2).sum(1))
assert_array_almost_equal(d, dist)
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel)
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kd_tree_kde(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
kdt = KDTree(X, leaf_size=10)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for h in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, h)
def check_results(kernel, h, atol, rtol, breadth_first):
dens = kdt.kernel_density(Y, h, atol=atol, rtol=rtol,
kernel=kernel,
breadth_first=breadth_first)
assert_allclose(dens, dens_true, atol=atol,
rtol=max(rtol, 1e-7))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, h, atol, rtol,
breadth_first)
def test_gaussian_kde(n_samples=1000):
# Compare gaussian KDE results to scipy.stats.gaussian_kde
from scipy.stats import gaussian_kde
np.random.seed(0)
x_in = np.random.normal(0, 1, n_samples)
x_out = np.linspace(-5, 5, 30)
for h in [0.01, 0.1, 1]:
kdt = KDTree(x_in[:, None])
try:
gkde = gaussian_kde(x_in, bw_method=h / np.std(x_in))
except TypeError:
raise SkipTest("Old scipy, does not accept explicit bandwidth.")
dens_kdt = kdt.kernel_density(x_out[:, None], h) / n_samples
dens_gkde = gkde.evaluate(x_out)
assert_array_almost_equal(dens_kdt, dens_gkde, decimal=3)
def test_kd_tree_two_point(n_samples=100, n_features=3):
np.random.seed(0)
X = np.random.random((n_samples, n_features))
Y = np.random.random((n_samples, n_features))
r = np.linspace(0, 1, 10)
kdt = KDTree(X, leaf_size=10)
D = DistanceMetric.get_metric("euclidean").pairwise(Y, X)
counts_true = [(D <= ri).sum() for ri in r]
def check_two_point(r, dualtree):
counts = kdt.two_point_correlation(Y, r=r, dualtree=dualtree)
assert_array_almost_equal(counts, counts_true)
for dualtree in (True, False):
yield check_two_point, r, dualtree
def test_kd_tree_pickle():
import pickle
np.random.seed(0)
X = np.random.random((10, 3))
kdt1 = KDTree(X, leaf_size=1)
ind1, dist1 = kdt1.query(X)
def check_pickle_protocol(protocol):
s = pickle.dumps(kdt1, protocol=protocol)
kdt2 = pickle.loads(s)
ind2, dist2 = kdt2.query(X)
assert_array_almost_equal(ind1, ind2)
assert_array_almost_equal(dist1, dist2)
for protocol in (0, 1, 2):
yield check_pickle_protocol, protocol
def test_neighbors_heap(n_pts=5, n_nbrs=10):
heap = NeighborsHeap(n_pts, n_nbrs)
for row in range(n_pts):
d_in = np.random.random(2 * n_nbrs).astype(DTYPE)
i_in = np.arange(2 * n_nbrs, dtype=ITYPE)
for d, i in zip(d_in, i_in):
heap.push(row, d, i)
ind = np.argsort(d_in)
d_in = d_in[ind]
i_in = i_in[ind]
d_heap, i_heap = heap.get_arrays(sort=True)
assert_array_almost_equal(d_in[:n_nbrs], d_heap[row])
assert_array_almost_equal(i_in[:n_nbrs], i_heap[row])
def test_node_heap(n_nodes=50):
vals = np.random.random(n_nodes).astype(DTYPE)
i1 = np.argsort(vals)
vals2, i2 = nodeheap_sort(vals)
assert_array_almost_equal(i1, i2)
assert_array_almost_equal(vals[i1], vals2)
def test_simultaneous_sort(n_rows=10, n_pts=201):
dist = np.random.random((n_rows, n_pts)).astype(DTYPE)
ind = (np.arange(n_pts) + np.zeros((n_rows, 1))).astype(ITYPE)
dist2 = dist.copy()
ind2 = ind.copy()
# simultaneous sort rows using function
simultaneous_sort(dist, ind)
# simultaneous sort rows using numpy
i = np.argsort(dist2, axis=1)
row_ind = np.arange(n_rows)[:, None]
dist2 = dist2[row_ind, i]
ind2 = ind2[row_ind, i]
assert_array_almost_equal(dist, dist2)
assert_array_almost_equal(ind, ind2)
| bsd-3-clause |
mfrey/baltimore | plot/overheadplot.py | 1 | 3419 | #!/usr/bin/env python2.7
import os
import re
import csv
import numpy as np
import matplotlib.pyplot as plt
import json
import requests
import matplotlib
class OverheadPlot:
def __init__(self):
self.title = "Overhead ($\mu$ and $\pm \sigma$ interval)"
self.ylabel = "Packets [%]"
self.xlabel = "Pause Time [s]"
self.xlist = []
self.mu = []
self.sigma = []
#self.yticks = [2, 3, 4, 6, 7, 8, 9, 10, 12, 15]
#self.yticks = [0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 1.0]
self.yticks = [2, 3, 4, 6, 7, 8, 9, 10]
self.labels = []
self.markers = ['s','^','v','2','*','3','d']
self.legend_location = 4
def draw(self, filename):
figure, axis = plt.subplots(1)
axis.plot(self.xlist, self.mu, lw=2, label='ARA', color='#348ABD')
# axis.plot(t, mu1, lw=2, label='mean population 2', color='yellow')
#axis.fill_between(self.xlist, self.mu + self.sigma, self.mu - self.sigma, facecolor='blue', alpha=0.5)
axis.fill_between(self.xlist, [i + j for i, j in zip(self.mu, self.sigma)], [i - j for i, j in zip(self.mu, self.sigma)], facecolor='#348ABD', alpha=0.5)
# axis.fill_between(t, mu2+sigma2, mu2-sigma2, facecolor='yellow', alpha=0.5)
axis.set_title(self.title)
axis.legend(loc=self.legend_location)
axis.set_xlabel(self.xlabel)
axis.set_ylabel(self.ylabel)
axis.grid()
figure.savefig(filename)
if __name__ == "__main__":
csv_location = '/home/michael/Desktop/Projekte/remote/jupiter/Desktop/TechReport'
overhead = {}
scenarios = ['ARA0', 'ARA100', 'ARA300', 'ARA500', 'ARA700', 'ARA900', 'ARA1000']
scenario_files = {}
for root, _, files in os.walk(csv_location):
for name in files:
if name.endswith('csv'):
scenario = name.split("/")[-1].split("_")[0]
if scenario in scenarios:
if scenario not in scenario_files:
scenario_files[scenario] = []
scenario_files[scenario].append(os.path.join(root, name))
for scenario in scenario_files:
for csv_file in scenario_files[scenario]:
if csv_file.endswith("overhead_raw.csv"):
overhead[scenario] = []
with open(csv_file, 'rt') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
next(reader)
for row in reader:
if len(row) > 1:
overhead[scenario].append(float(row[1]) * 100)
# print(overhead[scenario])
# print(" ")
# print(overhead)
s = requests.get("https://raw.github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers/master/styles/bmh_matplotlibrc.json").json()
matplotlib.rcParams.update(s)
plot = OverheadPlot()
plot.xlist = [0, 100, 300, 500, 700, 900, 1000]
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
mean = []
std = []
for scenario in sorted(overhead, key = alphanum_key):
mean.append(np.mean(overhead[scenario]))
std.append(np.std(overhead[scenario]))
plot.mu = mean
plot.sigma = std
plot.draw("test.pdf")
| gpl-3.0 |
dimarkov/seaborn | seaborn/tests/test_utils.py | 11 | 11537 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
from ..utils import get_dataset_names, load_dataset
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from .. import utils, rcmod
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(object):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
plt.close("all")
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
plt.close("all")
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
plt.close("all")
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
plt.close("all")
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
plt.close("all")
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
plt.close('all')
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
plt.close("all")
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
abonaca/gary | gary/dynamics/plot.py | 1 | 8987 | # coding: utf-8
""" ...explain... """
from __future__ import division, print_function
__author__ = "adrn <[email protected]>"
# Third-party
import numpy as np
__all__ = ['plot_orbits', 'three_panel']
def _get_axes(dim, axes=None, triangle=False, subplots_kwargs=dict()):
"""
Parameters
----------
dim : int
Dimensionality of the orbit.
axes : array_like (optional)
Array of matplotlib Axes objects.
triangle : bool (optional)
Make a triangle plot instead of plotting all projections in a single row.
subplots_kwargs : dict (optional)
Dictionary of kwargs passed to the matplotlib `subplots()` call.
"""
import matplotlib.pyplot as plt
if dim == 3:
if triangle and axes is None:
figsize = subplots_kwargs.pop('figsize', (12,12))
sharex = subplots_kwargs.pop('sharex', True)
sharey = subplots_kwargs.pop('sharey', True)
fig,axes = plt.subplots(2,2,figsize=figsize, sharex=sharex, sharey=sharey,
**subplots_kwargs)
axes[0,1].set_visible(False)
axes = axes.flat
axes = [axes[0],axes[2],axes[3]]
elif triangle and axes is not None:
try:
axes = axes.flat
except:
pass
if len(axes) == 4:
axes = [axes[0],axes[2],axes[3]]
elif not triangle and axes is None:
figsize = subplots_kwargs.pop('figsize', (14,5))
fig,axes = plt.subplots(1, 3, figsize=figsize, **subplots_kwargs)
elif dim <= 2:
if axes is not None:
try:
len(axes)
except TypeError: # single axes object
axes = [axes]
else:
if dim ==1:
figsize = subplots_kwargs.pop('figsize', (14,8))
elif dim == 2:
figsize = subplots_kwargs.pop('figsize', (8,8))
fig,axes = plt.subplots(1, 1, figsize=figsize, **subplots_kwargs)
axes = [axes]
else:
raise ValueError("Orbit must have dimensions <= 3.")
return axes
def plot_orbits(x, t=None, ix=None, axes=None, triangle=False,
subplots_kwargs=dict(), labels=("$x$", "$y$", "$z$"), **kwargs):
"""
Given time series of positions, `x`, make nice plots of the orbit in
cartesian projections.
Parameters
----------
x : array_like
Array of positions. The last axis (`axis=-1`) is assumed
to be the dimensionality, e.g., `x.shape[-1]`. The first axis
(`axis=0`) is assumed to be the time axis.
t : array_like (optional)
Arra of times. Only used if the input orbit is 1 dimensional.
ix : int, array_like (optional)
Index or array of indices of orbits to plot. For example, if `x` is an
array of shape (1024,32,6) -- 1024 timesteps for 32 orbits in 6D
phase-space -- `ix` would specify which of the 32 orbits to plot.
axes : array_like (optional)
Array of matplotlib Axes objects.
triangle : bool (optional)
Make a triangle plot instead of plotting all projections in a single row.
subplots_kwargs : dict (optional)
Dictionary of kwargs passed to the matplotlib `subplots()` call.
labels : iterable (optional)
List or iterable of axis labels as strings. They should correspond to the
dimensions of the input orbit, for example, if the input orbit has shape
(1000,1,3), then labels should have length=3.
Other Parameters
----------------
kwargs
All other keyword arguments are passed to the matplotlib `plot()` call.
You can pass in any of the usual style kwargs like `color=...`,
`marker=...`, etc.
"""
if x.ndim == 2:
x = x[:,np.newaxis]
# dimensionality of input orbit
dim = x.shape[-1]
if dim > 3:
# if orbit has more than 3 dimensions, only use the first 3
dim = 3
# hack in some defaults to subplots kwargs so by default share x and y axes
if 'sharex' not in subplots_kwargs:
subplots_kwargs['sharex'] = True
if 'sharey' not in subplots_kwargs:
subplots_kwargs['sharey'] = True
axes = _get_axes(dim=dim, axes=axes, triangle=triangle,
subplots_kwargs=subplots_kwargs)
if ix is not None:
ixs = np.atleast_1d(ix)
else:
ixs = range(x.shape[1])
if dim == 3:
for ii in ixs:
axes[0].plot(x[:,ii,0], x[:,ii,1], **kwargs)
axes[1].plot(x[:,ii,0], x[:,ii,2], **kwargs)
axes[2].plot(x[:,ii,1], x[:,ii,2], **kwargs)
if triangle:
# HACK: until matplotlib 1.4 comes out, need this
axes[0].set_ylim(axes[0].get_xlim())
axes[2].set_xlim(axes[0].get_ylim())
axes[0].set_ylabel(labels[1])
axes[1].set_xlabel(labels[0])
axes[1].set_ylabel(labels[2])
axes[2].set_xlabel(labels[1])
else:
axes[0].set_xlabel(labels[0])
axes[0].set_ylabel(labels[1])
axes[1].set_xlabel(labels[0])
axes[1].set_ylabel(labels[2])
axes[2].set_xlabel(labels[1])
axes[2].set_ylabel(labels[2])
if not triangle:
axes[0].figure.tight_layout()
elif dim == 2:
for ii in ixs:
axes[0].plot(x[:,ii,0], x[:,ii,1], **kwargs)
axes[0].set_xlabel(labels[0])
axes[0].set_ylabel(labels[1])
axes[0].figure.tight_layout()
elif dim == 1:
if t is None:
t = np.arange(len(x))
for ii in ixs:
axes[0].plot(t, x[:,ii,0], **kwargs)
axes[0].set_xlabel("$t$")
axes[0].set_ylabel(labels[0])
axes[0].figure.tight_layout()
return axes[0].figure
def three_panel(q, relative_to=None, symbol=None, autolim=True,
axes=None, triangle=False, subplots_kwargs=dict(), **kwargs):
"""
Given 3D quantities, `q`, (not astropy quantities...), make nice three-panel or
triangle plots of projections of the values.
Parameters
----------
q : array_like
Array of values. The last axis (`axis=-1`) is assumed
to be the dimensionality, e.g., `q.shape[-1]`.
relative_to : bool (optional)
Plot the values relative to this value or values.
symbol : str (optional)
Symbol to represent the quantity for axis labels. Can be Latex.
autolim : bool (optional)
Automatically set the plot limits to be something sensible.
axes : array_like (optional)
Array of matplotlib Axes objects.
triangle : bool (optional)
Make a triangle plot instead of plotting all projections in a single row.
subplots_kwargs : dict (optional)
Dictionary of kwargs passed to the matplotlib `subplots()` call.
Other Parameters
----------------
kwargs
All other keyword arguments are passed to the matplotlib `scatter()` call.
You can pass in any of the usual style kwargs like `color=...`,
`marker=...`, etc.
"""
# don't propagate changes back...
q = q.copy()
# change default marker
marker = kwargs.pop('marker', '.')
# get axes object from arguments
axes = _get_axes(dim=3, axes=axes, triangle=triangle, subplots_kwargs=subplots_kwargs)
# if the quantities are relative
label = None
if relative_to is not None:
q -= relative_to
if symbol is not None:
label = r"$\Delta {sym}_{{ix}}/{sym}^{{{{(0)}}}}_{{ix}}$".format(sym=symbol)
else:
if symbol is not None:
label = r"${sym}_{{ix}}$".format(sym=symbol)
axes[0].scatter(q[:,0], q[:,1], marker=marker, **kwargs)
axes[1].scatter(q[:,0], q[:,2], marker=marker, **kwargs)
axes[2].scatter(q[:,1], q[:,2], marker=marker, **kwargs)
if label is not None:
if triangle:
axes[0].set_ylabel(label.format(ix=2))
axes[1].set_xlabel(label.format(ix=1))
axes[1].set_ylabel(label.format(ix=3))
axes[2].set_xlabel(label.format(ix=1))
else:
axes[0].set_xlabel(label.format(ix=1))
axes[0].set_ylabel(label.format(ix=2))
axes[1].set_xlabel(label.format(ix=1))
axes[1].set_ylabel(label.format(ix=3))
axes[2].set_xlabel(label.format(ix=2))
axes[2].set_ylabel(label.format(ix=3))
if autolim:
lims = []
for i in range(3):
mx,mi = q[:,i].max(), q[:,i].min()
delta = mx-mi
lims.append((mi-delta*0.05, mx+delta*0.05))
axes[0].set_xlim(lims[0])
axes[0].set_ylim(lims[1])
axes[1].set_xlim(lims[0])
axes[1].set_ylim(lims[2])
axes[2].set_xlim(lims[1])
axes[2].set_ylim(lims[2])
if not triangle:
axes[0].figure.tight_layout()
return axes[0].figure
| mit |
withsmilo/lezhin_data_challenge_pyconkr_2017 | src/features.py | 1 | 9421 | import loader
import numpy as np
import pandas as pd
from pathlib import Path
from definitions import *
def get_features():
def generate_user_features(df):
print('Generate user features')
if Path(name__features_user).exists():
print('- {} is existed already. Let\'s load it'.format(name__features_user))
return pd.read_pickle(name__features_user)
print('- {} is not existed. Let\'s generate it'.format(name__features_user))
# Step #1. Grouped by USER_ID_1, USER_ID_2, USER_ID_3
usr1 = df.groupby([df.USER_ID_1, df.USER_ID_2, df.USER_ID_3]).agg({'ORDERED': 'sum',
'SESSION_CNT': ['sum', 'mean']
})
usr1.columns = usr1.columns.droplevel(0)
usr1.columns = ['USR_ORDERED_SUM',
'USR_SESSION_CNT_SUM', 'USR_SESSION_CNT_MEAN'
]
usr1.reset_index(inplace=True)
# Step #2. Grouped by USER_ID_1, USER_ID_2
usr2 = df.groupby([df.USER_ID_1, df.USER_ID_2]).agg({'TENDENCY_1': ['sum', 'mean'],
'TENDENCY_2': ['sum', 'mean'],
'TENDENCY_3': ['sum', 'mean'],
'TENDENCY_4': ['sum', 'mean'],
'TENDENCY_5': ['sum', 'mean'],
'TENDENCY_6': ['sum', 'mean'],
'TENDENCY_7': ['sum', 'mean'],
'TENDENCY_8': ['sum', 'mean'],
'TENDENCY_9': ['sum', 'mean'],
'TENDENCY_10': ['sum', 'mean'],
'TENDENCY_11': ['sum', 'mean'],
'TENDENCY_12': ['sum', 'mean'],
'TENDENCY_13': ['sum', 'mean'],
'TENDENCY_14': ['sum', 'mean'],
'TENDENCY_15': ['sum', 'mean'],
'TENDENCY_16': ['sum', 'mean']})
usr2.columns = usr2.columns.droplevel(0)
usr2.columns = ['USR_TENDENCY_1_SUM', 'USR_TENDENCY_1_MEAN',
'USR_TENDENCY_2_SUM', 'USR_TENDENCY_2_MEAN',
'USR_TENDENCY_3_SUM', 'USR_TENDENCY_3_MEAN',
'USR_TENDENCY_4_SUM', 'USR_TENDENCY_4_MEAN',
'USR_TENDENCY_5_SUM', 'USR_TENDENCY_5_MEAN',
'USR_TENDENCY_6_SUM', 'USR_TENDENCY_6_MEAN',
'USR_TENDENCY_7_SUM', 'USR_TENDENCY_7_MEAN',
'USR_TENDENCY_8_SUM', 'USR_TENDENCY_8_MEAN',
'USR_TENDENCY_9_SUM', 'USR_TENDENCY_9_MEAN',
'USR_TENDENCY_10_SUM', 'USR_TENDENCY_10_MEAN',
'USR_TENDENCY_11_SUM', 'USR_TENDENCY_11_MEAN',
'USR_TENDENCY_12_SUM', 'USR_TENDENCY_12_MEAN',
'USR_TENDENCY_13_SUM', 'USR_TENDENCY_13_MEAN',
'USR_TENDENCY_14_SUM', 'USR_TENDENCY_14_MEAN',
'USR_TENDENCY_15_SUM', 'USR_TENDENCY_15_MEAN',
'USR_TENDENCY_16_SUM', 'USR_TENDENCY_16_MEAN']
usr2.reset_index(inplace=True)
# Step #3. Merged usr1 with usr2
usr = usr1.merge(usr2, on=['USER_ID_1', 'USER_ID_2'])
print('- Saving...')
usr.to_pickle(name__features_user)
print('- Saved {}'.format(name__features_user))
return usr
def generate_product_features(df):
print('Generate product features')
if Path(name__features_product).exists():
print('- {} is existed already. Let\'s load it'.format(name__features_product))
return pd.read_pickle(name__features_product)
print('- {} is not existed. Let\'s generate it'.format(name__features_product))
# Grouped by PRODUCT_ID
prd = df.groupby([df.PRODUCT_ID]).agg({'ORDERED' : 'sum',
# 'LAST_EPISODE': ['sum', 'mean'],
# 'START_DATE': ['sum', 'mean'],
# 'TOTAL_EPISODE_CNT': ['sum', 'mean']
})
# prd.columns = prd.columns.droplevel(0)
prd.columns = ['PRD_ORDERED_SUM',
# 'PRD_LAST_EPISODE_SUM', 'PRD_LAST_EPISODE_MEAN',
# 'PRD_START_DATE_SUM', 'PRD_START_DATE_MEAN',
# 'PRD_TOTAL_EPISODE_CNT_SUM', 'PRD_TOTAL_EPISODE_CNT_MEAN'
]
prd.reset_index(inplace=True)
print('- Saving...')
prd.to_pickle(name__features_product)
print('- Saved {}'.format(name__features_product))
return prd
def generate_user_product_features(df):
print('Generate user_product features')
if Path(name__features_user_product).exists():
print('- {} is existed already. Let\'s load it'.format(name__features_user_product))
return pd.read_pickle(name__features_user_product)
print('- {} is not existed. Let\'s generate it'.format(name__features_user_product))
# Grouped by USER_ID_1, USER_ID_2, USER_ID_3, PRODUCT_ID
usr_prd = df.groupby([df.USER_ID_1, df.USER_ID_2, df.USER_ID_3, df.PRODUCT_ID])\
.agg({'USER_ID_1': 'size',
'ORDERED': 'sum'})
# usr_prd.columns = usr_prd.columns.droplevel(0)
usr_prd.columns = ['UP_VIEW_CNT', 'UP_ORDERED_SUM']
usr_prd['UP_ORDERED_RATIO'] = pd.Series(usr_prd.UP_ORDERED_SUM / usr_prd.UP_VIEW_CNT).astype(np.float32)
usr_prd.reset_index(inplace=True)
print('- Saving...')
usr_prd.to_pickle(name__features_user_product)
print('- Saved {}'.format(name__features_user_product))
return usr_prd
def generate_features(dtrain, dtest):
# We do not use user_features because they took my cv score down!
# usr = generate_user_features(dtrain)
prd = generate_product_features(dtrain)
usr_prd = generate_user_product_features(dtrain)
# Merge usr_prd with original data
dtrain = dtrain.merge(usr_prd, on=['USER_ID_1', 'USER_ID_2', 'USER_ID_3', 'PRODUCT_ID'], how='left') \
.merge(prd, on=['PRODUCT_ID'], how='left')
dtest = dtest.merge(usr_prd, on=['USER_ID_1', 'USER_ID_2', 'USER_ID_3', 'PRODUCT_ID'], how='left') \
.merge(prd, on=['PRODUCT_ID'], how='left')
# Concatenate USER_ID
dtrain['USER_ID'] = dtrain[['USER_ID_1', 'USER_ID_2', 'USER_ID_3']].apply(
lambda x: '{}_{}_{}'.format(x[0], x[1], x[2]), axis=1)
dtrain.drop(['USER_ID_1', 'USER_ID_2', 'USER_ID_3'], axis=1, inplace=True)
dtest['USER_ID'] = dtest[['USER_ID_1', 'USER_ID_2', 'USER_ID_3']].apply(
lambda x: '{}_{}_{}'.format(x[0], x[1], x[2]), axis=1)
dtest.drop(['USER_ID_1', 'USER_ID_2', 'USER_ID_3'], axis=1, inplace=True)
# Add combined features
dtrain['UP_PRD_ORDERED_RATIO'] = (dtrain.UP_ORDERED_SUM / dtrain.PRD_ORDERED_SUM).astype(np.float32)
dtest['UP_PRD_ORDERED_RATIO'] = (dtest.UP_ORDERED_SUM / dtest.PRD_ORDERED_SUM).astype(np.float32)
# Remove some 'less important' initial features
drop_column_list = ['BUY_PRODUCT_31', 'BUY_PRODUCT_47', 'BUY_PRODUCT_54',
'BUY_PRODUCT_63', 'BUY_PRODUCT_64', 'BUY_PRODUCT_69',
'BUY_PRODUCT_78', 'BUY_PRODUCT_85', 'BUY_PRODUCT_91', 'BUY_PRODUCT_97',
'BUY_PRODUCT_58',
'SCHEDULE_4', 'SCHEDULE_10',
'GENRE_5', 'GENRE_7', 'GENRE_10', 'GENRE_11', 'GENRE_12', 'GENRE_13', 'GENRE_17', 'GENRE_18',
'TAG_3', 'TAG_4', 'TAG_5',
'TENDENCY_9']
dtrain.drop(drop_column_list, axis=1, inplace=True)
dtest.drop(drop_column_list, axis=1, inplace=True)
print('Train Features {}: [{}]'.format(dtrain.shape, ', '.join(dtrain.columns)))
print('Test Features {}: [{}]'.format(dtest.shape, ', '.join(dtest.columns)))
return dtrain, dtest
if Path(name__features_train).exists() and Path(name__features_test).exists():
print('{} {} are existed already. Let\'s load it'.format(name__features_train, name__features_test))
return pd.read_pickle(name__features_train), pd.read_pickle(name__features_test)
# Load target data
train, test = loader.load()
# Generate features
f_train, f_test = generate_features(train, test)
print('Saving...')
f_train.to_pickle(name__features_train)
f_test.to_pickle(name__features_test)
print('Saved {} {}'.format(name__features_train, name__features_test))
return f_train, f_test
| apache-2.0 |
Titan-C/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
laurent-george/bokeh | bokeh/models/sources.py | 13 | 10604 | from __future__ import absolute_import
from ..plot_object import PlotObject
from ..properties import HasProps
from ..properties import Any, Int, String, Instance, List, Dict, Either, Bool, Enum
from ..validation.errors import COLUMN_LENGTHS
from .. import validation
from ..util.serialization import transform_column_source_data
from .actions import Callback
class DataSource(PlotObject):
""" A base class for data source types. ``DataSource`` is
not generally useful to instantiate on its own.
"""
column_names = List(String, help="""
An list of names for all the columns in this DataSource.
""")
selected = Dict(String, Dict(String, Any), default={
'0d': {'flag': False, 'indices': []},
'1d': {'indices': []},
'2d': {'indices': []}
}, help="""
A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
- 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
dict with the following keys:
- flag (boolean): true if glyph was with false otherwise
- indices (list): indices hit (if applicable)
- 1d: indicates whether any of all other glyph (except [multi]line or
patches) was hit:
- indices (list): indices that were hit/selected
- 2d: indicates whether a [multi]line or patches) were hit:
- indices (list(list)): indices of the lines/patches that were
hit/selected
""")
callback = Instance(Callback, help="""
A callback to run in the browser whenever the selection is changed.
""")
def columns(self, *columns):
""" Returns a ColumnsRef object for a column or set of columns
on this data source.
Args:
*columns
Returns:
ColumnsRef
"""
return ColumnsRef(source=self, columns=list(columns))
class ColumnsRef(HasProps):
""" A utility object to allow referring to a collection of columns
from a specified data source, all together.
"""
source = Instance(DataSource, help="""
A data source to reference.
""")
columns = List(String, help="""
A list of column names to reference from ``source``.
""")
class ColumnDataSource(DataSource):
""" Maps names of columns to sequences or arrays.
If the ColumnDataSource initializer is called with a single
argument that is a dict, that argument is used as the value for
the "data" attribute. For example::
ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
.. note::
There is an implicit assumption that all the columns in a
a given ColumnDataSource have the same length.
"""
data = Dict(String, Any, help="""
Mapping of column names to sequences of data. The data can be, e.g,
Python lists or tuples, NumPy arrays, etc.
""")
def __init__(self, *args, **kw):
""" If called with a single argument that is a dict, treat
that implicitly as the "data" attribute.
"""
if len(args) == 1 and "data" not in kw:
kw["data"] = args[0]
# TODO (bev) invalid to pass args and "data", check and raise exception
raw_data = kw.pop("data", {})
if not isinstance(raw_data, dict):
import pandas as pd
if isinstance(raw_data, pd.DataFrame):
raw_data = self.from_df(raw_data)
else:
raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
for name, data in raw_data.items():
self.add(data, name)
super(ColumnDataSource, self).__init__(**kw)
# TODO: (bev) why not just return a ColumnDataSource?
@classmethod
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
Args:
data (DataFrame) : data to convert
Returns:
dict(str, list)
"""
index = data.index
new_data = {}
for colname in data:
new_data[colname] = data[colname].tolist()
if index.name:
new_data[index.name] = index.tolist()
elif index.names and not all([x is None for x in index.names]):
new_data["_".join(index.names)] = index.tolist()
else:
new_data["index"] = index.tolist()
return new_data
def to_df(self):
""" Convert this data source to pandas dataframe.
If ``column_names`` is set, use those. Otherwise let Pandas
infer the column names. The ``column_names`` property can be
used both to order and filter the columns.
Returns:
DataFrame
"""
import pandas as pd
if self.column_names:
return pd.DataFrame(self.data, columns=self.column_names)
else:
return pd.DataFrame(self.data)
def add(self, data, name=None):
""" Appends a new column of data to the data source.
Args:
data (seq) : new data to add
name (str, optional) : column name to use.
If not supplied, generate a name go the form "Series ####"
Returns:
str: the column name used
"""
if name is None:
n = len(self.data)
while "Series %d"%n in self.data:
n += 1
name = "Series %d"%n
self.column_names.append(name)
self.data[name] = data
return name
def vm_serialize(self, changed_only=True):
attrs = super(ColumnDataSource, self).vm_serialize(changed_only=changed_only)
if 'data' in attrs:
attrs['data'] = transform_column_source_data(attrs['data'])
return attrs
def remove(self, name):
""" Remove a column of data.
Args:
name (str) : name of the column to remove
Returns:
None
.. note::
If the column name does not exist, a warning is issued.
"""
try:
self.column_names.remove(name)
del self.data[name]
except (ValueError, KeyError):
import warnings
warnings.warn("Unable to find column '%s' in data source" % name)
def push_notebook(self):
""" Update date for a plot in the IPthon notebook in place.
This function can be be used to update data in plot data sources
in the IPython notebook, without having to use the Bokeh server.
Returns:
None
.. warning::
The current implementation leaks memory in the IPython notebook,
due to accumulating JS code. This function typically works well
with light UI interactions, but should not be used for continuously
updating data. See :bokeh-issue:`1732` for more details and to
track progress on potential fixes.
"""
from IPython.core import display
from bokeh.protocol import serialize_json
id = self.ref['id']
model = self.ref['type']
json = serialize_json(self.vm_serialize())
js = """
var ds = Bokeh.Collections('{model}').get('{id}');
var data = {json};
ds.set(data);
""".format(model=model, id=id, json=json)
display.display_javascript(js, raw=True)
@validation.error(COLUMN_LENGTHS)
def _check_column_lengths(self):
lengths = set(len(x) for x in self.data.values())
if len(lengths) > 1:
return str(self)
class RemoteSource(DataSource):
data_url = String(help="""
The URL to the endpoint for the data.
""")
data = Dict(String, Any, help="""
Additional data to include directly in this data source object. The
columns provided here are merged with those from the Bokeh server.
""")
polling_interval = Int(help="""
polling interval for updating data source in milliseconds
""")
class AjaxDataSource(RemoteSource):
method = Enum('POST', 'GET', help="http method - GET or POST")
mode = Enum("replace", "append", help="""
Whether to append new data to existing data (up to ``max_size``),
or to replace existing data entirely.
""")
max_size = Int(help="""
Maximum size of the data array being kept after each pull requests.
Larger than that size, the data will be right shifted.
""")
if_modified = Bool(False, help="""
Whether to include an ``If-Modified-Since`` header in AJAX requests
to the server. If this header is supported by the server, then only
new data since the last request will be returned.
""")
class BlazeDataSource(RemoteSource):
#blaze parts
expr = Dict(String, Any(), help="""
blaze expression graph in json form
""")
namespace = Dict(String, Any(), help="""
namespace in json form for evaluating blaze expression graph
""")
local = Bool(help="""
Whether this data source is hosted by the bokeh server or not.
""")
def from_blaze(self, remote_blaze_obj, local=True):
from blaze.server import to_tree
# only one Client object, can hold many datasets
assert len(remote_blaze_obj._leaves()) == 1
leaf = remote_blaze_obj._leaves()[0]
blaze_client = leaf.data
json_expr = to_tree(remote_blaze_obj, {leaf : ':leaf'})
self.data_url = blaze_client.url + "/compute.json"
self.local = local
self.expr = json_expr
def to_blaze(self):
from blaze.server.client import Client
from blaze.server import from_tree
from blaze import Data
# hacky - blaze urls have `compute.json` in it, but we need to strip it off
# to feed it into the blaze client lib
c = Client(self.data_url.rsplit('compute.json', 1)[0])
d = Data(c)
return from_tree(self.expr, {':leaf' : d})
class ServerDataSource(BlazeDataSource):
""" A data source that referes to data located on a Bokeh server.
The data from the server is loaded on-demand by the client.
"""
# Paramters of data transformation operations
# The 'Any' is used to pass primtives around.
# TODO: (jc) Find/create a property type for 'any primitive/atomic value'
transform = Dict(String,Either(Instance(PlotObject), Any), help="""
Paramters of the data transformation operations.
The associated valuse is minimally a tag that says which downsample routine
to use. For some downsamplers, parameters are passed this way too.
""")
| bsd-3-clause |
guillermo-carrasco/bcbio-nextgen-vm | setup.py | 2 | 2841 | #!/usr/bin/env python
import os
import shutil
import sys
from setuptools import setup, find_packages
version = "0.1.0a"
def write_version_py():
version_py = os.path.join(os.path.dirname(__file__), "bcbiovm", "version.py")
try:
import subprocess
p = subprocess.Popen(["git", "rev-parse", "--short", "HEAD"],
stdout=subprocess.PIPE)
githash = p.stdout.read().strip()
except:
githash = ""
with open(version_py, "w") as out_handle:
out_handle.write("\n".join(['__version__ = "%s"' % version,
'__git_revision__ = "%s"' % githash]))
write_version_py()
if "--record=/dev/null" in sys.argv: # conda build
install_requires = []
else:
install_requires = [
"matplotlib", "pandas", "paramiko", "six", "PyYAML",
"pythonpy", "bcbio-nextgen"]
setup(name="bcbio-nextgen-vm",
version=version,
author="Brad Chapman and bcbio-nextgen contributors",
description="Run bcbio-nextgen genomic sequencing analyses using isolated containers and virtual machines",
license="MIT",
url="https://github.com/chapmanb/bcbio-nextgen-vm",
packages=find_packages(),
scripts=["scripts/bcbio_vm.py"],
install_requires=install_requires)
def ansible_pb_files(ansible_pb_dir):
"""Retrieve ansible files for installation. Derived from elasticluster setup.
"""
ansible_data = []
for (dirname, dirnames, filenames) in os.walk(ansible_pb_dir):
tmp = []
for fname in filenames:
if fname.startswith(".git"): continue
tmp.append(os.path.join(dirname, fname))
ansible_data.append((os.path.join("share", "bcbio-vm", dirname), tmp))
return ansible_data
def elasticluster_config_files(base_dir):
"""Retrieve example elasticluster config files for installation.
"""
return [(os.path.join("share", "bcbio-vm", base_dir),
[os.path.join(base_dir, x) for x in os.listdir(base_dir)])]
if __name__ == "__main__":
"""Install ansible playbooks and other associated data files.
"""
if sys.argv[1] in ["develop", "install"]:
for dirname, fnames in ansible_pb_files("ansible") + elasticluster_config_files("elasticluster"):
dirname = os.path.join(os.path.abspath(sys.prefix), dirname)
if not os.path.exists(dirname):
os.makedirs(dirname)
for fname in fnames:
if sys.argv[1] == "develop":
link_path = os.path.join(dirname, os.path.basename(fname))
if not os.path.exists(link_path):
link_target = os.path.join(os.getcwd(), fname)
os.symlink(link_target, link_path)
else:
shutil.copy(fname, dirname)
| mit |
mindriot101/matplotlib-pngrenderer | testing/test_png_output.py | 1 | 2075 | from __future__ import print_function
import os
import sys
sys.path.insert(0, '.')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import zipfile
from pngrenderer.core import PNGRenderer
from pngrenderer.context import png_render
def build_plot():
renderer = PNGRenderer(os.path.join(
os.path.dirname(__file__), "out"))
xdata = np.arange(10)
ydata = xdata ** 2
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xdata, ydata, 'r-')
out_fname = os.path.join(os.path.dirname(__file__), "out.zip")
return (fig, ax, renderer, out_fname)
def setup_function(function):
print("Setup")
fig, ax, renderer, out_fname = build_plot()
function.func_globals['fig'] = fig
function.func_globals['ax'] = ax
function.func_globals['renderer'] = renderer
function.func_globals['out_fname'] = out_fname
def teardown_function(function):
print("Teardown")
try:
os.remove(function.func_globals['out_fname'])
except OSError:
pass
def zip_contents(fname, test_contents):
with zipfile.ZipFile(fname) as infile:
return infile.namelist() == test_contents
def test_single_page():
renderer.save_page("first.png", fig)
renderer.render()
assert os.path.isfile(out_fname)
assert zip_contents(out_fname, ['first.png'])
def test_multiple_pages():
renderer.save_page("first.png", fig)
renderer.save_page("second.png", fig)
renderer.render()
assert os.path.isfile(out_fname)
assert zip_contents(out_fname, ['first.png', 'second.png'])
def test_without_explicit_figure():
renderer.save_page("first.png")
renderer.render()
assert zip_contents(out_fname, ['first.png'])
def test_context_manager():
stub = os.path.join(os.path.dirname(__file__), "out")
with png_render(stub) as png_renderer:
build_plot()
png_renderer.save_page("first.png")
assert zip_contents(stub + '.zip', ['first.png'])
def test_savefig_alias():
assert renderer.save_page == renderer.savefig
| mit |
PatrickOReilly/scikit-learn | sklearn/utils/tests/test_estimator_checks.py | 69 | 3894 | import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import MultiTaskElasticNet
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
name = NoSparseClassifier.__name__
msg = "Estimator " + name + " doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(AdaBoostClassifier)
check_estimator(MultiTaskElasticNet)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
| bsd-3-clause |
henry-ngo/VIP | vip_hci/pca/pca_fullfr.py | 1 | 48009 | #! /usr/bin/env python
"""
PCA algorithm performed on full frame for ADI, RDI or SDI (IFS data).
"""
from __future__ import division, print_function
__author__ = 'C. Gomez @ ULg'
__all__ = ['pca',
'pca_incremental',
'pca_optimize_snr']
import numpy as np
import pandas as pd
import pyprind
from skimage import draw
from astropy.io import fits
from matplotlib import pyplot as plt
from sklearn.decomposition import IncrementalPCA
from .svd import svd_wrapper
from .pca_local import find_indices, compute_pa_thresh
from .utils_pca import (prepare_matrix, reshape_matrix, pca_annulus,
scale_cube_for_pca)
from ..preproc import (cube_derotate, cube_collapse, check_PA_vector,
check_scal_vector)
from ..conf import timing, time_ini, check_enough_memory, get_available_memory
from ..var import frame_center, dist
from ..stats import descriptive_stats
from .. import phot
import warnings
warnings.filterwarnings("ignore", category=Warning)
def pca(cube, angle_list=None, cube_ref=None, scale_list=None, ncomp=1, ncomp2=1,
svd_mode='lapack', scaling=None, mask_center_px=None, source_xy=None,
delta_rot=1, fwhm=4, collapse='median', check_mem=True,
full_output=False, verbose=True, debug=False):
""" Algorithm where the reference PSF and the quasi-static speckle pattern
are modeled using Principal Component Analysis. Depending on the input
parameters this PCA function can work in ADI, RDI or SDI (IFS data) mode.
ADI:
if neither a reference cube or a scaling vector are provided, the target
cube itself is used to learn the PCs and to obtain a low-rank approximation
reference PSF (star + speckles).
RDI + ADI:
if a reference cube is provided (triggered by *cube_ref* parameter), its
PCs are used to project the target frames and to obtain the reference PSF
(star + speckles).
SDI (IFS data):
if a scaling vector is provided (triggered by *scale_list* parameter) and
the cube is a 3d array, its assumed it contains 1 frame at multiple
spectral channels. The frames are re-scaled to match the longest wavelenght.
Then PCA is applied on this re-scaled cube where the planet will move
radially.
SDI (IFS data) + ADI:
if a scaling vector is provided (triggered by *scale_list* parameter) and
the cube is a 4d array [# channels, # adi-frames, Y, X], its assumed it
contains several multi-spectral ADI frames. A double PCA is performed, first
on each ADI multi-spectral frame (using ``ncomp`` PCs), then using each ADI
residual to exploit the rotation (using ``ncomp2`` PCs). If ``ncomp2`` is
None only one PCA is performed on the ADI multi-spectral frames and then
the resulting frames are de-rotated and combined.
Several SVD libraries can be used with almost (randsvd stands for randomized
SVD) the same result but different computing time.
References
----------
KLIP: http://arxiv.org/abs/1207.4197
pynpoint: http://arxiv.org/abs/1207.6637
IFS data: http://arxiv.org/abs/1409.6388
Parameters
----------
cube : array_like, 3d
Input cube.
angle_list : array_like, 1d
Corresponding parallactic angle for each frame.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
scale_list :
Scaling factors in case of IFS data. Normally, the scaling factors are
the central channel wavelength divided by the shortest wavelength in the
cube. More thorough approaches can be used to get the scaling factors.
ncomp : int, optional
How many PCs are used as a lower-dimensional subspace to project the
target frames. In ADI ncomp is the number of PCs from the target data,
in RDI ncomp is the number of PCs from the reference data and in IFS
ncomp is the number of PCs from the library of spectral channels.
ncomp2 : int, optional
How many PCs are used for IFS+ADI datacubes in the second stage PCA.
ncomp2 goes up to the number of multi-spectral frames.
svd_mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy', 'randcupy'}, str
Switch for the SVD method/library to be used. ``lapack`` uses the LAPACK
linear algebra library through Numpy and it is the most conventional way
of computing the SVD (deterministic result computed on CPU). ``arpack``
uses the ARPACK Fortran libraries accessible through Scipy (computation
on CPU). ``eigen`` computes the singular vectors through the
eigendecomposition of the covariance M.M' (computation on CPU).
``randsvd`` uses the randomized_svd algorithm implemented in Sklearn
(computation on CPU). ``cupy`` uses the Cupy library for GPU computation
of the SVD as in the LAPACK version. ``eigencupy`` offers the same
method as with the ``eigen`` option but on GPU (through Cupy).
``randcupy`` is an adaptation of the randomized_svd algorith, where all
the computations are done on a GPU.
scaling : {None, 'temp-mean', 'spat-mean', 'temp-standard', 'spat-standard'}
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done, with
"spat-mean" then the spatial mean is subtracted, with "temp-standard"
temporal mean centering plus scaling to unit variance is done and with
"spat-standard" spatial mean centering plus scaling to unit variance is
performed.
mask_center_px : None or int
If None, no masking is done. If an integer > 1 then this value is the
radius of the circular mask.
source_xy : tuple of int, optional
For ADI PCA, this triggers a frame rejection in the PCA library.
source_xy are the coordinates X,Y of the center of the annulus where the
PA criterion will be used to reject frames from the library.
fwhm : float, optional
Known size of the FHWM in pixels to be used. Default value is 4.
delta_rot : int, optional
Factor for increasing the parallactic angle threshold, expressed in FWHM.
Default is 1 (excludes 1 FHWM on each side of the considered frame).
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
check_mem : {True, False}, bool optional
If True, it check that the input cube(s) are smaller than the available
system memory.
full_output: boolean, optional
Whether to return the final median combined image only or with other
intermediate arrays.
verbose : {True, False}, bool optional
If True prints intermediate info and timing.
debug : {False, True}, bool optional
Whether to print debug information or not.
Returns
-------
frame : array_like, 2d
Median combination of the de-rotated/re-scaled residuals cube.
If full_output is True then it returns: return pcs, recon, residuals_cube,
residuals_cube_ and frame. The PCs are not returned when a PA rejection
criterion is applied (when *source_xy* is entered).
pcs : array_like, 3d
Cube with the selected principal components.
recon : array_like, 3d
Reconstruction of frames using the selected PCs.
residuals_res : array_like, 3d
Cube of residuals.
residuals_res_der : array_like, 3d
Cube of residuals after de-rotation/re-scaling.
In the case of IFS+ADI data the it returns: residuals_cube_channels,
residuals_cube_channels_ and frame
residuals_cube_channels : array_like
Cube with the residuals of the first stage PCA.
residuals_cube_channels_ : array_like
Cube with the residuals of the second stage PCA after de-rotation.
"""
#***************************************************************************
# Helping function
#***************************************************************************
def subtract_projection(cube, cube_ref, ncomp, scaling, mask_center_px,
debug, svd_mode, verbose, full_output, indices=None,
frame=None):
""" Subtracts the reference PSF after PCA projection. Returns the cube
of residuals.
"""
_, y, x = cube.shape
if indices is not None and frame is not None:
matrix = prepare_matrix(cube, scaling, mask_center_px, mode='fullfr',
verbose=False)
else:
matrix = prepare_matrix(cube, scaling, mask_center_px,
mode='fullfr', verbose=verbose)
if cube_ref is not None:
ref_lib = prepare_matrix(cube_ref, scaling, mask_center_px,
mode = 'fullfr', verbose=verbose)
else:
ref_lib = matrix
if indices is not None and frame is not None: # one row (frame) at a time
ref_lib = ref_lib[indices]
if ref_lib.shape[0] <= 10:
msg = 'Too few frames left in the PCA library (<10). '
msg += 'Try decreasing the parameter delta_rot'
raise RuntimeError(msg)
curr_frame = matrix[frame] # current frame
V = svd_wrapper(ref_lib, svd_mode, ncomp, False, False)
transformed = np.dot(curr_frame, V.T)
reconstructed = np.dot(transformed.T, V)
residuals = curr_frame - reconstructed
if full_output:
return ref_lib.shape[0], residuals, reconstructed
else:
return ref_lib.shape[0], residuals
else: # the whole matrix
V = svd_wrapper(ref_lib, svd_mode, ncomp, debug, verbose)
if verbose: timing(start_time)
transformed = np.dot(V, matrix.T)
reconstructed = np.dot(transformed.T, V)
residuals = matrix - reconstructed
residuals_res = reshape_matrix(residuals,y,x)
if full_output:
return residuals_res, reconstructed, V
else:
return residuals_res
#***************************************************************************
# Validation of input parameters
#***************************************************************************
if not cube.ndim>2:
raise TypeError('Input array is not a 3d or 4d array')
if angle_list is not None:
if (cube.ndim==3 and not (cube.shape[0] == angle_list.shape[0])) or \
(cube.ndim==4 and not (cube.shape[1] == angle_list.shape[0])):
msg = "Angle list vector has wrong length. It must equal the number"
msg += " frames in the cube."
raise TypeError(msg)
if source_xy is not None and delta_rot is None or fwhm is None:
msg = 'Delta_rot or fwhm parameters missing. They are needed for the '
msg += 'PA-based rejection of frames from the library'
raise TypeError(msg)
if cube_ref is not None:
if not cube_ref.ndim==3:
raise TypeError('Input reference array is not a cube or 3d array')
if not cube_ref.shape[1]==cube.shape[1]:
msg = 'Frames in reference cube and target cube have different size'
raise TypeError(msg)
if scale_list is not None:
raise RuntimeError('RDI + SDI (IFS) is not a valid mode')
n, y, x = cube_ref.shape
else:
if scale_list is not None:
if cube.ndim==3:
z, y_in, x_in = cube.shape
if cube.ndim==4:
z, n, y_in, x_in = cube.shape
else:
n, y, x = cube.shape
if angle_list is None and scale_list is None:
msg = 'Either the angles list of scale factors list must be provided'
raise ValueError(msg)
if scale_list is not None:
if np.array(scale_list).ndim>1:
raise TypeError('Wrong scaling factors list. Must be a vector')
if not scale_list.shape[0]==cube.shape[0]:
raise TypeError('Scaling factors vector has wrong length')
if verbose: start_time = time_ini()
if check_mem:
input_bytes = cube.nbytes
if cube_ref is not None:
input_bytes += cube_ref.nbytes
if not check_enough_memory(input_bytes, 1.5, False):
msgerr = 'Input cubes are larger than available system memory. '
msgerr += 'Set check_mem=False to override this memory check or '
msgerr += 'use the incremental PCA (for ADI)'
raise RuntimeError(msgerr)
if angle_list is not None: angle_list = check_PA_vector(angle_list)
#***************************************************************************
# scale_list triggers SDI(IFS)
#***************************************************************************
if scale_list is not None:
if ncomp > z:
ncomp = min(ncomp, z)
msg = 'Number of PCs too high (max PCs={}), using instead {:} PCs.'
print(msg.format(z, ncomp))
scale_list = check_scal_vector(scale_list)
#***********************************************************************
# RDI (IFS): case of 3d cube with multiple spectral channels
#***********************************************************************
if cube.ndim==3:
if verbose:
print('{:} spectral channels in IFS cube'.format(z))
# cube has been re-scaled to have the planets moving radially
cube, _, y, x, _, _ = scale_cube_for_pca(cube, scale_list)
residuals_result = subtract_projection(cube, None, ncomp, scaling,
mask_center_px,debug,svd_mode,
verbose, full_output)
if full_output:
residuals_cube = residuals_result[0]
reconstructed = residuals_result[1]
V = residuals_result[2]
pcs = reshape_matrix(V, y, x)
recon = reshape_matrix(reconstructed, y, x)
residuals_cube_,frame,_,_,_,_ = scale_cube_for_pca(residuals_cube,
scale_list,
full_output=full_output,
inverse=True, y_in=y_in,
x_in=x_in)
else:
residuals_cube = residuals_result
frame = scale_cube_for_pca(residuals_cube, scale_list,
full_output=full_output,
inverse=True, y_in=y_in, x_in=x_in)
if verbose:
print('Done re-scaling and combining')
timing(start_time)
#***********************************************************************
# SDI (IFS) + ADI: cube with multiple spectral channels + rotation
# shape of cube: [# channels, # adi-frames, Y, X]
#***********************************************************************
elif cube.ndim==4 and angle_list is not None:
if verbose:
print('{:} spectral channels in IFS cube'.format(z))
print('{:} ADI frames in all channels'.format(n))
residuals_cube_channels = np.zeros((n, y_in, x_in))
bar = pyprind.ProgBar(n, stream=1,
title='Looping through ADI frames')
for i in range(n):
cube_res, _, y, x, _, _ = scale_cube_for_pca(cube[:,i,:,:],
scale_list)
residuals_result = subtract_projection(cube_res, None, ncomp,
scaling, mask_center_px,
debug, svd_mode, False,
full_output)
if full_output:
residuals_cube = residuals_result[0]
_,frame,_,_,_,_ = scale_cube_for_pca(residuals_cube,
scale_list,
full_output=full_output,
inverse=True, y_in=y_in,
x_in=x_in)
else:
residuals_cube = residuals_result
frame = scale_cube_for_pca(residuals_cube, scale_list,
full_output=full_output,
inverse=True,y_in=y_in,x_in=x_in)
residuals_cube_channels[i] = frame
bar.update()
# de-rotation of the PCA processed channels
if ncomp2 > z:
ncomp2 = min(ncomp2, z)
msg = 'Number of PCs too high (max PCs={}), using instead {:} PCs.'
print(msg.format(n, ncomp2))
elif ncomp2 is None:
residuals_cube_channels_ = cube_derotate(residuals_cube_channels,
angle_list)
frame = cube_collapse(residuals_cube_channels_, mode=collapse)
if verbose:
print('De-rotating and combining')
timing(start_time)
else:
res_ifs_adi = subtract_projection(residuals_cube_channels, None,
ncomp2, scaling, mask_center_px,
debug, svd_mode, False,
full_output)
residuals_cube_channels_ = cube_derotate(res_ifs_adi, angle_list)
frame = cube_collapse(residuals_cube_channels_, mode=collapse)
if verbose:
msg = 'Done PCA per ADI multi-spectral frame, de-rotating '
msg += 'and combining'
print(msg)
timing(start_time)
#***************************************************************************
# cube_ref triggers RDI+ADI
#***************************************************************************
elif cube_ref is not None:
if ncomp > n:
ncomp = min(ncomp,n)
msg = 'Number of PCs too high (max PCs={}), using instead {:} PCs.'
print(msg.format(n, ncomp))
residuals_result = subtract_projection(cube, cube_ref, ncomp, scaling,
mask_center_px, debug, svd_mode,
verbose, full_output)
if full_output:
residuals_cube = residuals_result[0]
reconstructed = residuals_result[1]
V = residuals_result[2]
pcs = reshape_matrix(V, y, x)
recon = reshape_matrix(reconstructed, y, x)
else:
residuals_cube = residuals_result
residuals_cube_ = cube_derotate(residuals_cube, angle_list)
frame = cube_collapse(residuals_cube_, mode=collapse)
if verbose:
print('Done de-rotating and combining')
timing(start_time)
#***************************************************************************
# normal ADI PCA
#***************************************************************************
else:
if ncomp > n:
ncomp = min(ncomp,n)
msg = 'Number of PCs too high (max PCs={}), using instead {:} PCs.'
print(msg.format(n, ncomp))
if source_xy is None:
residuals_result = subtract_projection(cube, None, ncomp, scaling,
mask_center_px, debug,
svd_mode,verbose,full_output)
if full_output:
residuals_cube = residuals_result[0]
reconstructed = residuals_result[1]
V = residuals_result[2]
pcs = reshape_matrix(V, y, x)
recon = reshape_matrix(reconstructed, y, x)
else:
residuals_cube = residuals_result
else:
nfrslib = []
residuals_cube = np.zeros_like(cube)
recon_cube = np.zeros_like(cube)
yc, xc = frame_center(cube[0], False)
x1, y1 = source_xy
ann_center = dist(yc, xc, y1, x1)
pa_thr = compute_pa_thresh(ann_center, fwhm, delta_rot)
mid_range = np.abs(np.amax(angle_list) - np.amin(angle_list))/2
if pa_thr >= mid_range - mid_range * 0.1:
new_pa_th = float(mid_range - mid_range * 0.1)
if verbose:
msg = 'PA threshold {:.2f} is too big, will be set to {:.2f}'
print(msg.format(pa_thr, new_pa_th))
pa_thr = new_pa_th
for frame in range(n):
if ann_center > fwhm*3: # TODO: 3 optimal value? new parameter?
ind = find_indices(angle_list, frame, pa_thr, True)
else:
ind = find_indices(angle_list, frame, pa_thr, False)
res_result = subtract_projection(cube, None, ncomp, scaling,
mask_center_px, debug,
svd_mode,verbose,full_output,
ind, frame)
if full_output:
nfrslib.append(res_result[0])
residual_frame = res_result[1]
recon_frame = res_result[2]
residuals_cube[frame] = residual_frame.reshape(cube[0].shape)
recon_cube[frame] = recon_frame.reshape(cube[0].shape)
else:
nfrslib.append(res_result[0])
residual_frame = res_result[1]
residuals_cube[frame] = residual_frame.reshape(cube[0].shape)
# number of frames in library printed for each annular quadrant
if verbose:
descriptive_stats(nfrslib, verbose=verbose, label='Size LIB: ')
residuals_cube_ = cube_derotate(residuals_cube, angle_list)
frame = cube_collapse(residuals_cube_, mode=collapse)
if verbose:
print('Done de-rotating and combining')
timing(start_time)
if full_output and cube.ndim<4:
if source_xy is not None:
return recon_cube, residuals_cube, residuals_cube_, frame
else:
return pcs, recon, residuals_cube, residuals_cube_, frame
elif full_output and cube.ndim==4:
return residuals_cube_channels, residuals_cube_channels_, frame
else:
return frame
def pca_optimize_snr(cube, angle_list, source_xy, fwhm, cube_ref=None,
mode='fullfr', annulus_width=20, range_pcs=None,
svd_mode='lapack', scaling=None, mask_center_px=None,
fmerit='px', min_snr=0, collapse='median', verbose=True,
full_output=False, debug=False, plot=True, save_plot=None,
plot_title=None):
""" Optimizes the number of principal components by doing a simple grid
search measuring the SNR for a given position in the frame (ADI, RDI).
The metric used could be the given pixel's SNR, the maximum SNR in a FWHM
circular aperture centered on the given coordinates or the mean SNR in the
same circular aperture. They yield slightly different results.
Parameters
----------
cube : array_like, 3d
Input cube.
angle_list : array_like, 1d
Corresponding parallactic angle for each frame.
source_xy : tuple of floats
X and Y coordinates of the pixel where the source is located and whose
SNR is going to be maximized.
fwhm : float
Size of the PSF's FWHM in pixels.
cube_ref : array_like, 3d, optional
Reference library cube. For Reference Star Differential Imaging.
mode : {'fullfr', 'annular'}, optional
Mode for PCA processing (full-frame or just in an annulus). There is a
catch: the optimal number of PCs in full-frame may not coincide with the
one in annular mode. This is due to the fact that the annulus matrix is
smaller (less noisy, probably not containing the central star) and also
its intrinsic rank (smaller that in the full frame case).
annulus_width : float, optional
Width in pixels of the annulus in the case of the "annular" mode.
range_pcs : tuple, optional
The interval of PCs to be tried. If None then the algorithm will find
a clever way to sample from 1 to 200 PCs. If a range is entered (as
(PC_INI, PC_MAX)) a sequential grid will be evaluated between PC_INI
and PC_MAX with step of 1. If a range is entered (as
(PC_INI, PC_MAX, STEP)) a grid will be evaluated between PC_INI and
PC_MAX with the given STEP.
svd_mode : {'lapack', 'arpack', 'eigen', 'randsvd', 'cupy', 'eigencupy', 'randcupy'}, str
Switch for the SVD method/library to be used. ``lapack`` uses the LAPACK
linear algebra library through Numpy and it is the most conventional way
of computing the SVD (deterministic result computed on CPU). ``arpack``
uses the ARPACK Fortran libraries accessible through Scipy (computation
on CPU). ``eigen`` computes the singular vectors through the
eigendecomposition of the covariance M.M' (computation on CPU).
``randsvd`` uses the randomized_svd algorithm implemented in Sklearn
(computation on CPU). ``cupy`` uses the Cupy library for GPU computation
of the SVD as in the LAPACK version. ``eigencupy`` offers the same
method as with the ``eigen`` option but on GPU (through Cupy).
``randcupy`` is an adaptation of the randomized_svd algorith, where all
the computations are done on a GPU.
scaling : {None, 'temp-mean', 'spat-mean', 'temp-standard', 'spat-standard'}
With None, no scaling is performed on the input data before SVD. With
"temp-mean" then temporal px-wise mean subtraction is done, with
"spat-mean" then the spatial mean is subtracted, with "temp-standard"
temporal mean centering plus scaling to unit variance is done and with
"spat-standard" spatial mean centering plus scaling to unit variance is
performed.
mask_center_px : None or int, optional
If None, no masking is done. If an integer > 1 then this value is the
radius of the circular mask.
fmerit : {'px', 'max', 'mean'}
The function of merit to be maximized. 'px' is *source_xy* pixel's SNR,
'max' the maximum SNR in a FWHM circular aperture centered on
*source_xy* and 'mean' is the mean SNR in the same circular aperture.
min_snr : float
Value for the minimum acceptable SNR. Setting this value higher will
reduce the steps.
collapse : {'median', 'mean', 'sum', 'trimmean'}, str optional
Sets the way of collapsing the frames for producing a final image.
verbose : {True, False}, bool optional
If True prints intermediate info and timing.
full_output : {False, True} bool optional
If True it returns the optimal number of PCs, the final PCA frame for
the optimal PCs and a cube with all the final frames for each number
of PC that was tried.
debug : {False, True}, bool optional
Whether to print debug information or not.
plot : {True, False}, optional
Whether to plot the SNR and flux as functions of PCs and final PCA
frame or not.
save_plot: string
If provided, the pc optimization plot will be saved to that path.
plot_title: string
If provided, the plot is titled
Returns
-------
opt_npc : int
Optimal number of PCs for given source.
If full_output is True, the final processed frame, and a cube with all the
PCA frames are returned along with the optimal number of PCs.
"""
def truncate_svd_get_finframe(matrix, angle_list, ncomp, V):
""" Projection, subtraction, derotation plus combination in one frame.
Only for full-frame"""
transformed = np.dot(V[:ncomp], matrix.T)
reconstructed = np.dot(transformed.T, V[:ncomp])
residuals = matrix - reconstructed
frsize = int(np.sqrt(matrix.shape[1])) # only for square frames
residuals_res = reshape_matrix(residuals, frsize, frsize)
residuals_res_der = cube_derotate(residuals_res, angle_list)
frame = cube_collapse(residuals_res_der, mode=collapse)
return frame
def truncate_svd_get_finframe_ann(matrix, indices, angle_list, ncomp, V):
""" Projection, subtraction, derotation plus combination in one frame.
Only for annular mode"""
transformed = np.dot(V[:ncomp], matrix.T)
reconstructed = np.dot(transformed.T, V[:ncomp])
residuals_ann = matrix - reconstructed
residuals_res = np.zeros_like(cube)
residuals_res[:,indices[0],indices[1]] = residuals_ann
residuals_res_der = cube_derotate(residuals_res, angle_list)
frame = cube_collapse(residuals_res_der, mode=collapse)
return frame
def get_snr(matrix, angle_list, y, x, mode, V, fwhm, ncomp, fmerit,
full_output):
if mode=='fullfr':
frame = truncate_svd_get_finframe(matrix, angle_list, ncomp, V)
elif mode=='annular':
frame = truncate_svd_get_finframe_ann(matrix, annind, angle_list,
ncomp, V)
else:
raise RuntimeError('Wrong mode. Choose either full or annular')
if fmerit=='max':
yy, xx = draw.circle(y, x, fwhm/2.)
res = [phot.snr_ss(frame, (x_,y_), fwhm, plot=False, verbose=False,
full_output=True) for y_, x_ in zip(yy, xx)]
snr_pixels = np.array(res)[:,-1]
fluxes = np.array(res)[:,2]
argm = np.argmax(snr_pixels)
if full_output:
# integrated fluxes for the max snr
return np.max(snr_pixels), fluxes[argm], frame
else:
return np.max(snr_pixels), fluxes[argm]
elif fmerit=='px':
res = phot.snr_ss(frame, (x,y), fwhm, plot=False, verbose=False,
full_output=True)
snrpx = res[-1]
fluxpx = np.array(res)[2]
if full_output:
# integrated fluxes for the given px
return snrpx, fluxpx, frame
else:
return snrpx, fluxpx
elif fmerit=='mean':
yy, xx = draw.circle(y, x, fwhm/2.)
res = [phot.snr_ss(frame, (x_,y_), fwhm, plot=False, verbose=False,
full_output=True) for y_, x_ in zip(yy, xx)]
snr_pixels = np.array(res)[:,-1]
fluxes = np.array(res)[:,2]
if full_output:
# mean of the integrated fluxes (shifting the aperture)
return np.mean(snr_pixels), np.mean(fluxes), frame
else:
return np.mean(snr_pixels), np.mean(fluxes)
def grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, step, inti, intf,
debug, full_output, truncate=True):
nsteps = 0
snrlist = []
pclist = []
fluxlist = []
if full_output: frlist = []
counter = 0
if debug:
print('Step current grid:', step)
print('PCs | SNR')
for pc in range(inti, intf+1, step):
if full_output:
snr, flux, frame = get_snr(matrix, angle_list, y, x, mode, V,
fwhm, pc, fmerit, full_output)
else:
snr, flux = get_snr(matrix, angle_list, y, x, mode, V, fwhm, pc,
fmerit, full_output)
if np.isnan(snr): snr=0
if nsteps>1 and snr<snrlist[-1]: counter += 1
snrlist.append(snr)
pclist.append(pc)
fluxlist.append(flux)
if full_output: frlist.append(frame)
nsteps += 1
if truncate and nsteps>2 and snr<min_snr:
if debug: print('SNR too small')
break
if debug: print('{} {:.3f}'.format(pc, snr))
if truncate and counter==5: break
argm = np.argmax(snrlist)
if len(pclist)==2: pclist.append(pclist[-1]+1)
if debug:
print('Finished current stage')
try:
pclist[argm+1]
print('Interval for next grid: ', pclist[argm-1], 'to',
pclist[argm+1])
except:
print('The optimal SNR seems to be outside of the given PC range')
print()
if argm==0: argm = 1
if full_output:
return argm, pclist, snrlist, fluxlist, frlist
else:
return argm, pclist, snrlist, fluxlist
#---------------------------------------------------------------------------
if not cube.ndim==3:
raise TypeError('Input array is not a cube or 3d array')
if verbose: start_time = time_ini()
n = cube.shape[0]
x, y = source_xy
if range_pcs is not None:
if len(range_pcs)==2:
pcmin, pcmax = range_pcs
pcmax = min(pcmax, n)
step = 1
elif len(range_pcs)==3:
pcmin, pcmax, step = range_pcs
pcmax = min(pcmax, n)
else:
msg = 'Range_pcs tuple must be entered as (PC_INI, PC_MAX, STEP) '
msg += 'or (PC_INI, PC_MAX)'
raise TypeError(msg)
else:
pcmin = 1
pcmax = 200
pcmax = min(pcmax, n)
# Getting `pcmax` principal components a single time
if mode=='fullfr':
matrix = prepare_matrix(cube, scaling, mask_center_px, verbose=False)
if cube_ref is not None:
ref_lib = prepare_matrix(cube_ref, scaling, mask_center_px,
verbose=False)
else:
ref_lib = matrix
elif mode=='annular':
y_cent, x_cent = frame_center(cube[0])
ann_radius = dist(y_cent, x_cent, y, x)
matrix, annind = prepare_matrix(cube, scaling, None, mode='annular',
annulus_radius=ann_radius,
annulus_width=annulus_width,
verbose=False)
if cube_ref is not None:
ref_lib, _ = prepare_matrix(cube_ref, scaling, mask_center_px,
mode='annular', annulus_radius=ann_radius,
annulus_width=annulus_width, verbose=False)
else:
ref_lib = matrix
else:
raise RuntimeError('Wrong mode. Choose either fullfr or annular')
V = svd_wrapper(ref_lib, svd_mode, pcmax, False, verbose)
# sequential grid
if range_pcs is not None:
grid1 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, step,
pcmin, pcmax, debug, full_output, False)
if full_output: argm, pclist, snrlist, fluxlist, frlist = grid1
else: argm, pclist, snrlist, fluxlist = grid1
opt_npc = pclist[argm]
if verbose:
print('Number of steps', len(pclist))
msg = 'Optimal number of PCs = {}, for SNR={}'
print(msg.format(opt_npc, snrlist[argm]))
print()
timing(start_time)
if full_output:
cubeout = np.array((frlist))
# Plot of SNR as function of PCs
if plot:
plt.figure(figsize=(8,4))
ax1 = plt.subplot(211)
ax1.plot(pclist, snrlist, '-', alpha=0.5)
ax1.plot(pclist, snrlist, 'o', alpha=0.5, color='blue')
ax1.set_xlim(np.array(pclist).min(), np.array(pclist).max())
ax1.set_ylim(0, np.array(snrlist).max()+1)
ax1.set_ylabel('SNR')
ax1.minorticks_on()
ax1.grid('on', 'major', linestyle='solid', alpha=0.4)
ax2 = plt.subplot(212)
ax2.plot(pclist, fluxlist, '-', alpha=0.5, color='green')
ax2.plot(pclist, fluxlist, 'o', alpha=0.5, color='green')
ax2.set_xlim(np.array(pclist).min(), np.array(pclist).max())
ax2.set_ylim(0, np.array(fluxlist).max()+1)
ax2.set_xlabel('Principal components')
ax2.set_ylabel('Flux in FWHM ap. [ADUs]')
ax2.minorticks_on()
ax2.grid('on', 'major', linestyle='solid', alpha=0.4)
print()
# automatic "clever" grid
else:
grid1 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit,
max(int(pcmax*0.1),1), pcmin, pcmax, debug, full_output)
if full_output: argm, pclist, snrlist, fluxlist, frlist1 = grid1
else: argm, pclist, snrlist, fluxlist = grid1
grid2 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit,
max(int(pcmax*0.05),1), pclist[argm-1], pclist[argm+1], debug,
full_output)
if full_output: argm2, pclist2, snrlist2, fluxlist2, frlist2 = grid2
else: argm2, pclist2, snrlist2, fluxlist2 = grid2
grid3 = grid(matrix, angle_list, y, x, mode, V, fwhm, fmerit, 1,
pclist2[argm2-1], pclist2[argm2+1], debug, full_output,
False)
if full_output: _, pclist3, snrlist3, fluxlist3, frlist3 = grid3
else: _, pclist3, snrlist3, fluxlist3 = grid3
argm = np.argmax(snrlist3)
opt_npc = pclist3[argm]
dfr = pd.DataFrame(np.array((pclist+pclist2+pclist3,
snrlist+snrlist2+snrlist3,
fluxlist+fluxlist2+fluxlist3)).T)
dfrs = dfr.sort_values(0)
dfrsrd = dfrs.drop_duplicates()
ind = np.array(dfrsrd.index)
if verbose:
print('Number of evaluated steps', ind.shape[0])
msg = 'Optimal number of PCs = {}, for SNR={}'
print(msg.format(opt_npc, snrlist3[argm]), '\n')
timing(start_time)
if full_output:
cubefrs = np.array((frlist1+frlist2+frlist3))
cubeout = cubefrs[ind]
# Plot of SNR as function of PCs
if plot:
alpha = 0.4
lw = 2
plt.figure(figsize=(6,4))
ax1 = plt.subplot(211)
ax1.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,1]), '-',
alpha=alpha, color='blue', lw=lw)
ax1.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,1]), 'o',
alpha=alpha/2, color='blue')
ax1.set_xlim(np.array(dfrsrd.loc[:,0]).min(), np.array(dfrsrd.loc[:,0]).max())
ax1.set_ylim(0, np.array(dfrsrd.loc[:,1]).max()+1)
#ax1.set_xlabel('')
ax1.set_ylabel('S/N')
ax1.minorticks_on()
ax1.grid('on', 'major', linestyle='solid', alpha=0.2)
if plot_title is not None:
ax1.set_title('Optimal pc: ' + str(opt_npc) + ' for ' + plot_title)
ax2 = plt.subplot(212)
ax2.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,2]), '-',
alpha=alpha, color='green', lw=lw)
ax2.plot(np.array(dfrsrd.loc[:,0]), np.array(dfrsrd.loc[:,2]), 'o',
alpha=alpha/2, color='green')
ax2.set_xlim(np.array(pclist).min(), np.array(pclist).max())
#ax2.set_ylim(0, np.array(fluxlist).max()+1)
ax2.set_xlabel('Principal components')
ax2.set_ylabel('Flux in FWHM aperture')
ax2.minorticks_on()
ax2.set_yscale('log')
ax2.grid('on', 'major', linestyle='solid', alpha=0.2)
#plt.savefig('figure.pdf', dpi=300, bbox_inches='tight')
print()
# Optionally, save the contrast curve
if save_plot != None:
plt.savefig(save_plot, dpi=100, bbox_inches='tight')
if mode == 'fullfr':
finalfr = pca(cube, angle_list, cube_ref=cube_ref, ncomp=opt_npc,
svd_mode=svd_mode, mask_center_px=mask_center_px,
scaling=scaling, collapse=collapse, verbose=False)
elif mode == 'annular':
finalfr = pca_annulus(cube, angle_list, ncomp=opt_npc,
annulus_width=annulus_width, r_guess=ann_radius,
cube_ref=cube_ref, svd_mode=svd_mode,
scaling=scaling, collapse=collapse)
_ = phot.frame_quick_report(finalfr, fwhm, (x,y), verbose=verbose)
if full_output:
return opt_npc, finalfr, cubeout
else:
return opt_npc
def pca_incremental(cubepath, angle_list=None, n=0, batch_size=None,
batch_ratio=0.1, ncomp=10, verbose=True, full_output=False):
""" Computes the full-frame PCA-ADI algorithm in batches, for processing
fits files larger than the available system memory. It uses the incremental
PCA algorithm from scikit-learn.
Parameters
----------
cubepath : str
String with the path to the fits file to be opened in memmap mode.
angle_list : array_like, 1d
Corresponding parallactic angle for each frame. If None the parallactic
angles are obtained from the same fits file (extension).
n : int optional
The index of the HDULIST contaning the data/cube.
batch_size : int optional
The number of frames in each batch. If None the size of the batch is
computed wrt the available memory in the system.
batch_ratio : float
If batch_size is None, batch_ratio indicates the % of the available
memory that should be used by every batch.
ncomp : int, optional
How many PCs are used as a lower-dimensional subspace to project the
target frames.
verbose : {True, False}, bool optional
If True prints intermediate info and timing.
full_output: boolean, optional
Whether to return the final median combined image only or with other
intermediate arrays.
Returns
-------
If full_output is True the algorithm returns the incremental PCA model of
scikit-learn, the PCs reshaped into images, the median of the derotated
residuals for each batch, and the final frame. If full_output is False then
the final frame is returned.
"""
if verbose: start = time_ini()
if not isinstance(cubepath, str):
msgerr = 'Cubepath must be a string with the full path of your fits file'
raise TypeError(msgerr)
fitsfilename = cubepath
hdulist = fits.open(fitsfilename, memmap=True)
if not hdulist[n].data.ndim>2:
raise TypeError('Input array is not a 3d or 4d array')
n_frames = hdulist[n].data.shape[0]
y = hdulist[n].data.shape[1]
x = hdulist[n].data.shape[2]
if angle_list is None:
try:
angle_list = hdulist[n+1].data
except:
raise RuntimeError('Parallactic angles were not provided')
if not n_frames==angle_list.shape[0]:
msg ='Angle list vector has wrong length. It must equal the number of \
frames in the cube.'
raise TypeError(msg)
ipca = IncrementalPCA(n_components=ncomp)
if batch_size is None:
aval_mem = get_available_memory(verbose)
total_size = hdulist[n].data.nbytes
batch_size = int(n_frames/(total_size/(batch_ratio*aval_mem)))
if verbose:
msg1 = "Cube with {:} frames ({:.3f} GB)"
print(msg1.format(n_frames, hdulist[n].data.nbytes/1e9))
msg2 = "Batch size set to {:} frames ({:.3f} GB)"
print(msg2.format(batch_size, hdulist[n].data[:batch_size].nbytes/1e9),
'\n')
res = n_frames%batch_size
for i in range(0, int(n_frames/batch_size)):
intini = i*batch_size
intfin = (i+1)*batch_size
batch = hdulist[n].data[intini:intfin]
msg = 'Processing batch [{},{}] with shape {}'
if verbose:
print(msg.format(intini, intfin, batch.shape))
print('Batch size in memory = {:.3f} MB'.format(batch.nbytes/1e6))
matrix = prepare_matrix(batch, verbose=False)
ipca.partial_fit(matrix)
if res>0:
batch = hdulist[n].data[intfin:]
msg = 'Processing batch [{},{}] with shape {}'
if verbose:
print(msg.format(intfin, n_frames, batch.shape))
print('Batch size in memory = {:.3f} MB'.format(batch.nbytes/1e6))
matrix = prepare_matrix(batch, verbose=False)
ipca.partial_fit(matrix)
if verbose: timing(start)
V = ipca.components_
mean = ipca.mean_.reshape(batch.shape[1], batch.shape[2])
if verbose:
print('\nReconstructing and obtaining residuals')
medians = []
for i in range(0, int(n_frames/batch_size)):
intini = i*batch_size
intfin = (i+1)*batch_size
batch = hdulist[n].data[intini:intfin]
batch = batch - mean
matrix = prepare_matrix(batch, verbose=False)
reconst = np.dot(np.dot(matrix, V.T), V)
resid = matrix - reconst
resid_der = cube_derotate(resid.reshape(batch.shape[0],
batch.shape[1],
batch.shape[2]),
angle_list[intini:intfin])
medians.append(cube_collapse(resid_der, 'median'))
if res>0:
batch = hdulist[n].data[intfin:]
batch = batch - mean
matrix = prepare_matrix(batch, verbose=False)
reconst = np.dot(np.dot(matrix, V.T), V)
resid = matrix - reconst
resid_der = cube_derotate(resid.reshape(batch.shape[0],
batch.shape[1],
batch.shape[2]),
angle_list[intfin:])
medians.append(cube_collapse(resid_der, 'median'))
del(matrix)
del(batch)
medians = np.array(medians)
frame = np.median(medians, axis=0)
if verbose: timing(start)
if full_output:
pcs = reshape_matrix(V, y, x)
return ipca, pcs, medians, frame
else:
return frame
| mit |
nomadcube/scikit-learn | examples/svm/plot_svm_regression.py | 249 | 1451 | """
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
| bsd-3-clause |
mjgrav2001/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
tridesclous/tridesclous | tridesclous/peeler_engine_geometry.py | 1 | 28001 | """
Here implementation that tale in account the geometry
of the probe to speed up template matching.
"""
import time
import numpy as np
import joblib
from concurrent.futures import ThreadPoolExecutor
import itertools
from .peeler_engine_base import PeelerEngineGeneric
from .peeler_tools import *
from .peeler_tools import _dtype_spike
import sklearn.metrics.pairwise
from .cltools import HAVE_PYOPENCL, OpenCL_Helper
if HAVE_PYOPENCL:
import pyopencl
mf = pyopencl.mem_flags
from .peakdetector import get_peak_detector_class
try:
import numba
HAVE_NUMBA = True
from .numba_tools import numba_explore_best_shift, numba_sparse_scalar_product
except ImportError:
HAVE_NUMBA = False
class PeelerEngineGeometrical(PeelerEngineGeneric):
def change_params(self, **kargs):
PeelerEngineGeneric.change_params(self, **kargs)
def initialize(self, **kargs):
PeelerEngineGeneric.initialize(self, **kargs)
# create peak detector
p = dict(self.catalogue['peak_detector_params'])
self.peakdetector_engine = p.pop('engine')
self.peakdetector_method = p.pop('method')
PeakDetector_class = get_peak_detector_class(self.peakdetector_method, self.peakdetector_engine)
chunksize = self.fifo_size-2*self.n_span # not the real chunksize here
self.peakdetector = PeakDetector_class(self.sample_rate, self.nb_channel,
chunksize, self.internal_dtype, self.geometry)
self.peakdetector.change_params(**p)
# some attrs
self.shifts = np.arange(-self.maximum_jitter_shift, self.maximum_jitter_shift+1)
self.nb_shift = self.shifts.size
#~ self.channel_distances = sklearn.metrics.pairwise.euclidean_distances(self.geometry).astype('float32')
#~ self.channels_adjacency = {}
#~ for c in range(self.nb_channel):
#~ if self.use_sparse_template:
#~ nearest, = np.nonzero(self.channel_distances[c, :]<self.adjacency_radius_um)
#~ self.channels_adjacency[c] = nearest
#~ else:
#~ self.channels_adjacency[c] = np.arange(self.nb_channel, dtype='int64')
self.mask_already_tested = np.zeros((self.fifo_size, self.nb_channel), dtype='bool')
def initialize_before_each_segment(self, **kargs):
PeelerEngineGeneric.initialize_before_each_segment(self, **kargs)
self.peakdetector.initialize_stream()
def detect_local_peaks_before_peeling_loop(self):
# reset tested mask
self.mask_already_tested[:] = False
# and detect peak
self.re_detect_local_peak()
#~ print('detect_local_peaks_before_peeling_loop', self.pending_peaks.size)
def re_detect_local_peak(self):
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
if mask.ndim ==1:
#~ mask &= ~self.mask_already_tested[self.n_span:-self.n_span, 0]
sample_indexes, = np.nonzero(mask)
sample_indexes += self.n_span
tested = self.mask_already_tested[sample_indexes, 0]
sample_indexes = sample_indexes[~tested]
chan_indexes = np.zeros(sample_indexes.size, dtype='int64')
else:
#~ mask &= ~self.mask_already_tested[self.n_span:-self.n_span, :]
sample_indexes, chan_indexes = np.nonzero(mask)
sample_indexes += self.n_span
tested = self.mask_already_tested[sample_indexes, chan_indexes]
sample_indexes = sample_indexes[~tested]
chan_indexes = chan_indexes[~tested]
amplitudes = np.abs(self.fifo_residuals[sample_indexes, chan_indexes])
order = np.argsort(amplitudes)[::-1]
dtype_peak = [('sample_index', 'int32'), ('chan_index', 'int32'), ('peak_value', 'float32')]
self.pending_peaks = np.zeros(sample_indexes.size, dtype=dtype_peak)
self.pending_peaks['sample_index'] = sample_indexes
self.pending_peaks['chan_index'] = chan_indexes
self.pending_peaks['peak_value'] = amplitudes
self.pending_peaks = self.pending_peaks[order]
#~ print('re_detect_local_peak', self.pending_peaks.size)
def select_next_peak(self):
#~ print(len(self.pending_peaks))
if len(self.pending_peaks)>0:
sample_ind, chan_ind, ampl = self.pending_peaks[0]
self.pending_peaks = self.pending_peaks[1:]
return sample_ind, chan_ind
else:
return LABEL_NO_MORE_PEAK, None
def on_accepted_spike(self, sample_ind, cluster_idx, jitter):
# remove spike prediction from fifo residuals
#~ t1 = time.perf_counter()
pos, pred = make_prediction_one_spike(sample_ind, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue)
#~ t2 = time.perf_counter()
#~ print(' make_prediction_one_spike', (t2-t1)*1000)
#~ t1 = time.perf_counter()
self.fifo_residuals[pos:pos+self.peak_width_long, :] -= pred
#~ t2 = time.perf_counter()
#~ print(' self.fifo_residuals -', (t2-t1)*1000)
# this prevent search peaks in the zone until next "reset_to_not_tested"
#~ t1 = time.perf_counter()
self.clean_pending_peaks_zone(sample_ind, cluster_idx)
#~ t2 = time.perf_counter()
#~ print(' self.clean_pending_peaks_zone -', (t2-t1)*1000)
def clean_pending_peaks_zone(self, sample_ind, cluster_idx):
# TODO test with sparse_mask_level3s!!!!!
mask = self.sparse_mask_level1[cluster_idx, :]
#~ t1 = time.perf_counter()
#~ keep = np.zeros(self.pending_peaks.size, dtype='bool')
#~ for i, peak in enumerate(self.pending_peaks):
#~ in_zone = mask[peak['chan_index']] and \
#~ (peak['sample_index']+self.n_left)<sample_ind and \
#~ sample_ind<(peak['sample_index']+self.n_right)
#~ keep[i] = not(in_zone)
peaks = self.pending_peaks
in_zone = mask[peaks['chan_index']] &\
((peaks['sample_index']+self.n_left)<sample_ind) & \
((peaks['sample_index']+self.n_right)>sample_ind)
keep = ~ in_zone
#~ t2 = time.perf_counter()
#~ print(' clean_pending_peaks_zone loop', (t2-t1)*1000)
self.pending_peaks = self.pending_peaks[keep]
#~ print('clean_pending_peaks_zone', self.pending_peaks.size)
def set_already_tested(self, sample_ind, peak_chan):
self.mask_already_tested[sample_ind, peak_chan] = True
def reset_to_not_tested(self, good_spikes):
for spike in good_spikes:
# each good spike can remove from
cluster_idx = self.catalogue['label_to_index'][spike.cluster_label]
chan_mask = self.sparse_mask_level1[cluster_idx, :]
self.mask_already_tested[spike.index + self.n_left_long:spike.index + self.n_right_long][:, chan_mask] = False
self.re_detect_local_peak()
def get_no_label_peaks(self):
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
nolabel_indexes, chan_indexes = np.nonzero(mask)
#~ nolabel_indexes, chan_indexes = np.nonzero(~self.mask_not_already_tested)
nolabel_indexes += self.n_span
nolabel_indexes = nolabel_indexes[nolabel_indexes<(self.chunksize+self.n_span)]
bad_spikes = np.zeros(nolabel_indexes.shape[0], dtype=_dtype_spike)
bad_spikes['index'] = nolabel_indexes
bad_spikes['cluster_label'] = LABEL_UNCLASSIFIED
return bad_spikes
def get_best_template(self, left_ind, chan_ind):
full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
centers0 = self.catalogue['centers0']
projections = self.catalogue['projections']
strict_low = self.catalogue['boundaries'][:, 0]
strict_high = self.catalogue['boundaries'][:, 1]
flexible_low = self.catalogue['boundaries'][:, 2]
flexible_high = self.catalogue['boundaries'][:, 3]
n = centers0.shape[0]
flat_waveform = full_waveform.flatten()
flat_centers0 = centers0.reshape(n, -1)
#~ scalar_products = np.zeros(n, dtype='float32')
#~ for i in range(n):
#~ sp = np.sum((flat_waveform - flat_centers0[i, :]) * projections[i, :])
#~ scalar_products[i] = sp
#~ scalar_products = np.sum((flat_waveform[np.newaxis, :] - flat_centers0[:, :]) * projections[:, :], axis=1)
#~ print(scalar_products)
#~ t1 = time.perf_counter()
scalar_products = numba_sparse_scalar_product(self.fifo_residuals, left_ind, centers0, projections, chan_ind,
self.sparse_mask_level1, )
#~ t2 = time.perf_counter()
#~ print('numba_sparse_scalar_product', (t2-t1)*1000)
#~ print(scalar_products)
possible_idx, = np.nonzero((scalar_products < strict_high) & (scalar_products > strict_low))
#~ possible_idx, = np.nonzero((scalar_products < flexible_high) & (scalar_products > flexible_low))
#~ print('possible_idx', possible_idx)
#~ print('scalar_products[possible_idx]', scalar_products[possible_idx])
#~ do_plot = False
if len(possible_idx) == 1:
extra_idx = None
candidates_idx =possible_idx
elif len(possible_idx) == 0:
#~ extra_idx, = np.nonzero((np.abs(scalar_products) < 0.5))
extra_idx, = np.nonzero((scalar_products < flexible_high) & (scalar_products > flexible_low))
#~ if len(extra_idx) ==0:
# give a try to very far ones.
#~ extra_idx, = np.nonzero((np.abs(scalar_products) < 1.))
#~ print('extra_idx', extra_idx)
#~ if len(extra_idx) ==0:
#~ candidates_idx = []
#~ else:
#~ candidates_idx = extra_idx
candidates_idx = extra_idx
#~ candidates_idx =possible_idx
#~ pass
elif len(possible_idx) > 1 :
extra_idx = None
candidates_idx = possible_idx
debug_plot_change = False
if len(candidates_idx) > 0:
#~ t1 = time.perf_counter()
candidates_idx = np.array(candidates_idx, dtype='int64')
common_mask = np.sum(self.sparse_mask_level3[candidates_idx, :], axis=0) > 0
shift_scalar_product, shift_distance = numba_explore_best_shift(self.fifo_residuals, left_ind, self.catalogue['centers0'],
self.catalogue['projections'], candidates_idx, self.maximum_jitter_shift, common_mask, self.sparse_mask_level1)
#~ i0, i1 = np.unravel_index(np.argmin(np.abs(shift_scalar_product), axis=None), shift_scalar_product.shape)
i0, i1 = np.unravel_index(np.argmin(shift_distance, axis=None), shift_distance.shape)
#~ best_idx = candidates_idx[i0]
shift = self.shifts[i1]
cluster_idx = candidates_idx[i0]
final_scalar_product = shift_scalar_product[i0, i1]
#~ t2 = time.perf_counter()
#~ print('numba_explore_best_shift', (t2-t1)*1000)
#~ print('shift', shift)
#~ print('cluster_idx', cluster_idx)
#~ print('final_scalar_product', final_scalar_product)
if np.abs(shift) == self.maximum_jitter_shift:
cluster_idx = None
shift = None
final_scalar_product = None
#~ print('maximum_jitter_shift >> cluster_idx = None ')
#~ do_plot = True
#~ i0_bis, i1_bis = np.unravel_index(np.argmin(np.abs(shift_scalar_product), axis=None), shift_scalar_product.shape)
#~ if i0 != i0_bis:
#~ debug_plot_change = True
#~ print('Warning')
#~ print(possible_idx)
#~ print(shift_scalar_product)
#~ print(shift_distance)
#~ if best_idx != cluster_idx:
#~ print('*'*50)
#~ print('best_idx != cluster_idx', best_idx, cluster_idx)
#~ print('*'*50)
#~ cluster_idx = best_idx
#~ debug_plot_change = True
else:
cluster_idx = None
shift = None
final_scalar_product = None
#~ import matplotlib.pyplot as plt
#~ fig, ax = plt.subplots()
#~ ax.plot(self.shifts, shift_scalar_product.T)
#~ plt.show()
#~ print('ici',)
# DEBUG OMP
#~ from sklearn.linear_model import orthogonal_mp_gram
#~ from sklearn.linear_model import OrthogonalMatchingPursuit
#~ n_nonzero_coefs = 2
#~ omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
#~ X = self.catalogue['centers0'].reshape(self.catalogue['centers0'].shape[0], -1).T
#~ waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:].flatten()
#~ y = waveform
#~ omp.fit(X, y)
#~ coef = omp.coef_
#~ idx_r, = coef.nonzero()
#~ cluster_idx_omp = np.argmin(np.abs(coef - 1))
#~ if cluster_idx_omp != cluster_idx and coef[cluster_idx_omp] > 0.5:
#~ if True:
if False:
#~ if cluster_idx in (3,6):
#~ if do_plot:
#~ if False:
#~ if final_scalar_product is not None and np.abs(final_scalar_product) > 0.5:
#~ if True:
#~ if len(possible_idx) != 1:
#~ if len(possible_idx) > 1:
#~ if len(candidates_idx) > 1:
#~ if 7 in possible_idx or cluster_idx == 7:
#~ if cluster_idx not in possible_idx and len(possible_idx) > 0:
#~ if debug_plot_change:
import matplotlib.pyplot as plt
print()
print('best cluster_idx', cluster_idx)
print('possible_idx', possible_idx)
print('extra_idx', extra_idx)
print(scalar_products[possible_idx])
print(strict_high[possible_idx])
print('cluster_idx_omp', cluster_idx_omp)
fig, ax = plt.subplots()
ax.plot(coef)
if cluster_idx is not None:
ax.axvline(cluster_idx)
ax.set_title(f'{cluster_idx} omp {cluster_idx_omp}')
#~ plt.show()
fig, ax = plt.subplots()
shift2 = 0 if shift is None else shift
full_waveform2 = self.fifo_residuals[left_ind+shift2:left_ind+shift2+self.peak_width,:]
ax.plot(full_waveform2.T.flatten(), color='k')
if shift !=0 and shift is not None:
ax.plot(full_waveform.T.flatten(), color='grey', ls='--')
for idx in candidates_idx:
ax.plot(self.catalogue['centers0'][idx, :].T.flatten(), color='m')
ax.plot(self.catalogue['centers0'][cluster_idx_omp, :].T.flatten(), color='y')
if cluster_idx is not None:
ax.plot(self.catalogue['centers0'][cluster_idx, :].T.flatten(), color='c', ls='--')
ax.set_title(f'best {cluster_idx} shift {shift} possible_idx {possible_idx}')
if shift is not None:
fig, ax = plt.subplots()
#~ ax.plot(self.shifts, np.abs(shift_scalar_product).T)
ax.plot(self.shifts, shift_scalar_product.T)
ax.axhline(0)
fig, ax = plt.subplots()
ax.plot(self.shifts, np.abs(shift_distance).T)
plt.show()
best_template_info = {'nb_candidate' : len(candidates_idx), 'final_scalar_product':final_scalar_product}
return cluster_idx, shift, best_template_info
def accept_tempate(self, left_ind, cluster_idx, jitter, best_template_info):
if jitter is None:
# this must have a jitter
jitter = 0
#~ if np.abs(jitter) > (self.maximum_jitter_shift - 0.5):
#~ return False
strict_low = self.catalogue['boundaries'][:, 0]
strict_high = self.catalogue['boundaries'][:, 1]
flexible_low = self.catalogue['boundaries'][:, 2]
flexible_high = self.catalogue['boundaries'][:, 3]
#~ flat_waveform = full_waveform.flatten()
#~ sp2 = np.sum((flat_waveform - centers0[cluster_idx, :].flatten()) * projections[cluster_idx, :])
sp = best_template_info['final_scalar_product']
nb_candidate = best_template_info['nb_candidate']
if nb_candidate == 1:
#~ accept_template = strict_low[cluster_idx] < sp < strict_high[cluster_idx]
accept_template = flexible_low[cluster_idx] < sp < flexible_high[cluster_idx]
else:
accept_template = flexible_low[cluster_idx] < sp < flexible_high[cluster_idx]
# waveform L2 on mask
#~ full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
#~ wf = full_waveform[:, mask]
# prediction with interpolation
#~ _, pred_wf = make_prediction_one_spike(left_ind - self.n_left, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue, long=False)
#~ pred_wf = pred_wf[:, mask]
#~ dist = (pred_wf - wf) ** 2
# criteria per channel
#~ residual_nrj_by_chan = np.sum(dist, axis=0)
#~ wf_nrj = np.sum(wf**2, axis=0)
#~ weight = self.weight_per_template_dict[cluster_idx]
#~ crietria_weighted = (wf_nrj>residual_nrj_by_chan).astype('float') * weight
#~ accept_template = np.sum(crietria_weighted) >= 0.7 * np.sum(weight)
# criteria per sample
#~ dist * np.abs(pred_wf) <
#~ dist_w = dist / np.abs(pred_wf)
#~ gain = (dist < wf**2).astype('float') * np.abs(pred_wf) / np.sum(np.abs(pred_wf))
#~ gain = (wf / pred_wf - 1) * np.abs(pred_wf) / np.sum(np.abs(pred_wf))
#~ gain = (pred_wf**2 / wf**1 - 1) * np.abs(pred_wf) / np.sum(np.abs(pred_wf))
#~ accept_template = np.sum(gain) > 0.8
#~ accept_template = np.sum(gain) > 0.7
#~ accept_template0 = np.sum(gain) > 0.6
#~ accept_template = np.sum(gain) > 0.5
# criteria max residual
#~ max_res = np.max(np.abs(pred_wf - wf))
#~ max_pred = np.max(np.abs(pred_wf))
#~ accept_template1 = max_pred > max_res
#~ accept_template = False
# debug
#~ limit_sp =self.catalogue['sp_normed_limit'][cluster_idx, :]
#~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform * self.catalogue['template_weight'])
#~ print('limit_sp', limit_sp, 'sp', sp)
#~ accept_template = False
#~ immediate_accept = False
# DEBUG always refuse!!!!!
#~ accept_template = False
#~ label = self.catalogue['cluster_labels'][cluster_idx]
# debug
#~ if label == 13:
#~ if accept_template and not immediate_accept:
#~ accept_template = False
# debug
#~ if label == 13:
#~ if not hasattr(self, 'count_accept'):
#~ self.count_accept = {}
#~ self.count_accept[label] = {'accept_template':0, 'immediate_accept':0, 'not_accepted':0}
#~ if accept_template:
#~ self.count_accept[label]['accept_template'] += 1
#~ if immediate_accept:
#~ self.count_accept[label]['immediate_accept'] += 1
#~ else:
#~ self.count_accept[label]['not_accepted'] += 1
#~ print(self.count_accept)
#~ if self._plot_debug:
#~ if not accept_template and label in []:
#~ if not accept_template:
#~ if accept_template:
#~ if True:
if False:
#~ if not immediate_accept:
#~ if immediate_accept:
#~ if immediate_accept:
#~ if label == 7 and not accept_template:
#~ if label == 7:
#~ if label == 121:
#~ if label == 5:
#~ if nb_candidate > 1:
#~ if label == 13 and accept_template and not immediate_accept:
#~ if label == 13 and not accept_template:
#~ if label in (7,9):
#~ nears = np.array([ 5813767, 5813767, 11200038, 11322540, 14989650, 14989673, 14989692, 14989710, 15119220, 15830377, 16138346, 16216666, 17078883])
#~ print(np.abs((left_ind - self.n_left) - nears))
#~ print(np.abs((left_ind - self.n_left) - nears) < 2)
#~ if label == 5 and np.any(np.abs((left_ind - self.n_left) - nears) < 50):
#~ if immediate_accept:
import matplotlib.pyplot as plt
mask = self.sparse_mask_level2[cluster_idx]
full_waveform = self.fifo_residuals[left_ind:left_ind+self.peak_width,:]
wf = full_waveform[:, mask]
_, pred_waveform = make_prediction_one_spike(left_ind - self.n_left, cluster_idx, jitter, self.fifo_residuals.dtype, self.catalogue, long=False)
pred_wf = pred_waveform[:, mask]
if accept_template:
color = 'g'
else:
color = 'r'
#~ if accept_template:
#~ if immediate_accept:
#~ color = 'g'
#~ else:
#~ color = 'c'
#~ else:
#~ color = 'r'
#~ if not immediate_accept:
#~ fig, ax = plt.subplots()
#~ ax.plot(gain.T.flatten(), color=color)
#~ ax.set_title('{}'.format(np.sum(gain)))
#~ fig, ax = plt.subplots()
#~ ax.plot(feat_centroids.T, alpha=0.5)
#~ ax.plot(feat_waveform, color='k')
fig, ax = plt.subplots()
ax.plot(full_waveform.T.flatten(), color='k')
ax.plot(pred_waveform.T.flatten(), color=color)
l0, l1 = strict_low[cluster_idx], strict_high[cluster_idx]
l2, l3 = flexible_low[cluster_idx], flexible_high[cluster_idx]
title = f'{cluster_idx} {sp:0.3f} lim [{l0:0.3f} {l1:0.3f}] [{l2:0.3f} {l3:0.3f}] {nb_candidate}'
ax.set_title(title)
#~ fig, ax = plt.subplots()
#~ ax.plot(wf.T.flatten(), color='k')
#~ ax.plot(pred_wf.T.flatten(), color=color)
#~ ax.plot( wf.T.flatten() - pred_wf.T.flatten(), color=color, ls='--')
print()
print('cluster_idx',cluster_idx, 'accept_template', accept_template)
#~ print(distance, self.distance_limit[cluster_idx])
#~ print('distance', distance, distance2, 'limit_distance', self.distance_limit[cluster_idx])
#~ limit_sp =self.catalogue['sp_normed_limit'][cluster_idx, :]
#~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform * self.catalogue['template_weight'])
#~ sp = np.sum(self.catalogue['centers0_normed'] * full_waveform)
#~ print('limit_sp', limit_sp, 'sp', sp)
#~ if not immediate_accept:
#~ print('np.sum(gain)', np.sum(gain))
#~ fig, ax = plt.subplots()
#~ res = wf - pred_wf
#~ count, bins = np.histogram(res, bins=150, weights=np.abs(pred_wf))
#~ ax.plot(bins[:-1], count)
#~ plt.show()
#~ if distance2 >= self.distance_limit[cluster_idx]:
#~ print(crietria_weighted, weight)
#~ print(np.sum(crietria_weighted), np.sum(weight))
#~ ax.plot(full_wf0.T.flatten(), color='y')
#~ ax.plot( full_wf.T.flatten() - full_wf0.T.flatten(), color='y')
#~ ax.set_title('not accepted')
plt.show()
return accept_template
def _plot_after_inner_peeling_loop(self):
pass
def _plot_before_peeling_loop(self):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plot_sigs = self.fifo_residuals.copy()
self._plot_sigs_before = plot_sigs
#~ chan_order = np.argsort(self.channel_distances[0, :])
for c in range(self.nb_channel):
#~ for c in chan_order:
plot_sigs[:, c] += c*30
ax.plot(plot_sigs, color='k')
ax.axvline(self.fifo_size - self.n_right_long, color='r')
ax.axvline(-self.n_left_long, color='r')
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
sample_inds, chan_inds= np.nonzero(mask)
sample_inds += self.n_span
ax.scatter(sample_inds, plot_sigs[sample_inds, chan_inds], color='r')
ax.set_title(f'nb peak {sample_inds.size}')
#~ plt.show()
def _plot_label_unclassified(self, left_ind, peak_chan, cluster_idx, jitter):
return
import matplotlib.pyplot as plt
#~ print('LABEL UNCLASSIFIED', left_ind, cluster_idx)
fig, ax = plt.subplots()
wf = self.fifo_residuals[left_ind:left_ind+self.peak_width, :]
wf0 = self.catalogue['centers0'][cluster_idx, :, :]
ax.plot(wf.T.flatten(), color='b')
#~ ax.plot(wf0.T.flatten(), color='g')
ax.set_title(f'label_unclassified {left_ind-self.n_left} {cluster_idx} chan{peak_chan}')
ax.axvline(peak_chan*self.peak_width-self.n_left)
plt.show()
def _plot_after_peeling_loop(self, good_spikes):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
plot_sigs = self.fifo_residuals.copy()
for c in range(self.nb_channel):
plot_sigs[:, c] += c*30
ax.plot(plot_sigs, color='k')
ax.plot(self._plot_sigs_before, color='b')
ax.axvline(self.fifo_size - self.n_right_long, color='r')
ax.axvline(-self.n_left_long, color='r')
mask = self.peakdetector.get_mask_peaks_in_chunk(self.fifo_residuals)
sample_inds, chan_inds= np.nonzero(mask)
sample_inds += self.n_span
ax.scatter(sample_inds, plot_sigs[sample_inds, chan_inds], color='r')
good_spikes = np.array(good_spikes, dtype=_dtype_spike)
pred = make_prediction_signals(good_spikes, self.internal_dtype, plot_sigs.shape, self.catalogue, safe=True)
plot_pred = pred.copy()
for c in range(self.nb_channel):
plot_pred[:, c] += c*30
ax.plot(plot_pred, color='m')
plt.show()
| mit |
chandlercr/aima-python | grading/bayesian-submissions.py | 15 | 2415 | import importlib
import traceback
from grading.util import roster, print_table
# from logic import FolKB
# from utils import expr
import os
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
def indent(howMuch = 1):
space = ' '
for i in range(1, howMuch):
space += ' '
return space
def printKB(label, kb):
print(indent(), label + ' example:')
print(indent(2), 'knowledge base:')
for clause in kb.clauses:
print(indent(3), str(clause))
def printResults(query, gen, limit=3):
for count in range(limit):
try:
long = next(gen)
except StopIteration:
print()
return
short = {}
for v in long:
if v in query.args:
short[v] = long[v]
print(short, end=' ')
print('...')
def tryOne(label, frame):
fit = gnb.fit(frame.data, frame.target)
print('')
print_table(fit.theta_,
header=[frame.feature_names],
topLeft=['Means:'],
leftColumn=frame.target_names,
numfmt='%6.3f',
njust='center',
tjust='rjust',
)
y_pred = fit.predict(frame.data)
print("Number of mislabeled points out of a total %d points : %d"
% (len(frame.data), (frame.target != y_pred).sum()))
def tryExamples(examples):
for label in examples:
tryOne(label, examples[label])
submissions = {}
scores = {}
message1 = 'Submissions that compile:'
root = os.getcwd()
for student in roster:
try:
os.chdir(root + '/submissions/' + student)
# http://stackoverflow.com/a/17136796/2619926
mod = importlib.import_module('submissions.' + student + '.myBayes')
submissions[student] = mod.Examples
message1 += ' ' + student
except ImportError:
pass
except:
traceback.print_exc()
os.chdir(root)
print(message1)
print('----------------------------------------')
for student in roster:
if not student in submissions.keys():
continue
scores[student] = []
try:
examples = submissions[student]
print('Bayesian Networks from:', student)
tryExamples(examples)
except:
traceback.print_exc()
print(student + ' scores ' + str(scores[student]) + ' = ' + str(sum(scores[student])))
print('----------------------------------------')
| mit |
Moriadry/tensorflow | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
dkoes/md-scripts | rmsf.py | 1 | 3438 | '''
Calculates the average RMSF for each residue in the trajectory using
MDAnalysis's RMSF method.
Output:
If called using shortcut for multiple residues, returned is a single csv called rmsfs.csv containg
the resname, resid, and trajectory number
e.g.
resname | resid | traj_1 | resid | traj_2 ...
LEU | 1 | 1.272 | 1 | 1.410
...
If called using a single trajectory, returned is a csv called rmsfs.csv containing resid, resname
and RMSF
resid | resname | RMSF
1 | LEU | 1.27
...
Running the script:
Since calculating RMSF is not a very intense calculation, it may be useful to just
produce a single csv for all trajectories. If your trajetories are .dcd and have
a naming scheme of prefix_number.dcd e.g. traj_1.dcd you can use the shortcut call.
python rmsf.py MDAnalysis_supported_topology trajectory_prefix total_number_of_trajs_starting_at_1
e.g.
python rmsf.py FXN_R165N_complex.prmtop FXN_R165N_compex_ 10
You can also call the script using a single trajectory:
python rmsfs.py MDAnalysis_supported_topology MDAnalysis_supported_trajectory
e.g.
python rmsf.py FXN_R165N_complex.prmtop FXN_R165N_compex_1.dcd
'''
import sys
import MDAnalysis
import pandas as pd
from pandas import DataFrame
import numpy as np # numpy 1.16.6
from MDAnalysis.analysis import align
from MDAnalysis.analysis.rms import RMSF
'''
Loads trajectory(s) into trajs
'''
def getTrajs():
top = str(sys.argv[1])
dcd_prefix = str(sys.argv[2])
trajs = [];
if (len(sys.argv) == 4):
dcd_total = int(sys.argv[3])
for i in range(dcd_total):
trajs.append(MDAnalysis.Universe(top, dcd_prefix + '{}.dcd'.format(i + 1)))
traj = MDAnalysis.Universe(top, [dcd_prefix + '{}.dcd'.format(i + 1) for i in range(10)])
else:
trajs.append(MDAnalysis.Universe(top, dcd_prefix))
traj = trajs[0]
return trajs, traj
'''
Calculates the avergae RMSF for each residue in the trajectory
'''
def get_rmsf(t,label=None):
sel = 'protein and not name H*'
prot = t.select_atoms(sel)
average_coordinates = t.trajectory.timeseries(asel=prot).mean(axis=1)
# make a reference structure (need to reshape into a 1-frame "trajectory")
reference = MDAnalysis.Merge(prot).load_new(average_coordinates[:, None, :], order="afc")
aligner = align.AlignTraj(t, reference, select=sel, in_memory=True).run()
print('\tDone Aligning... calculating RMSF')
rmsfer = RMSF(prot).run()
ret = pd.DataFrame(zip(prot.resids,prot.resnames,rmsfer.rmsf),columns=('resid','resname','rmsf'))
if label != None:
ret['label'] = label
return ret
'''
Creates and returns the final dataframe to become rmsfs.csv
'''
def rmsf_df(trajs, traj):
rmsfs = DataFrame()
protein = traj.select_atoms('protein')
rmsfs['resname'] = protein.residues.resnames
for i in range(len(trajs)):
print('\tCalculating rmsf for traj {}'.format(i + 1))
rmsf = get_rmsf(trajs[i])
if (len(trajs) == 1):
col_name = 'RMSF'
else:
col_name = 'traj_%s' % str(i + 1)
rmsf = rmsf.groupby(['resid']).mean().reset_index().rename(columns={'rmsf': col_name})
rmsfs = pd.concat([rmsfs, rmsf], axis=1)
if (len(trajs) == 1):
return rmsfs.set_index('resid')
else:
return rmsfs
if __name__ == '__main__' :
print('Loading trajectories...')
trajs, traj = getTrajs()
print('Calculating rmsfs')
rmsfs = rmsf_df(trajs, traj)
print('Outputing csv...')
rmsfs.to_csv('rmsfs.csv')
print('Done')
| bsd-3-clause |
kapilsaxena33/pyentropy | docs/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| gpl-2.0 |
elijah513/scikit-learn | sklearn/metrics/cluster/unsupervised.py | 230 | 8281 | """ Unsupervised evaluation metrics. """
# Authors: Robert Layton <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to randomly select a subset of samples if
``sample_size is not None``. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
return sil_samples
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
if not np.any(mask):
# cluster of size 1
return 0
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
| bsd-3-clause |
google-research/robustness_metrics | robustness_metrics/projects/revisiting_calibration/figures/clean_imagenet_temp_scaling.py | 1 | 5194 | # coding=utf-8
# Copyright 2021 The Robustness Metrics Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Figures for "Revisiting Calibration of Modern Neural Networks".
This module contains figures comparing ECE on in-distribution data between
unscaled and temperature-scaled predictions.
"""
from typing import List, Optional, Tuple
import matplotlib as mpl
from matplotlib import pyplot as plt
import pandas as pd
from robustness_metrics.projects.revisiting_calibration import display
from robustness_metrics.projects.revisiting_calibration import plotting
from robustness_metrics.projects.revisiting_calibration import utils
def plot(df_main: pd.DataFrame,
gce_prefix: str = plotting.STD_GCE_PREFIX,
rescaling_method: str = "temperature_scaling",
add_guo: bool = False) -> mpl.figure.Figure:
"""Plots acc/calib and reliability diagrams on clean ImageNet (Figure 1)."""
rescaling_methods = ["none", rescaling_method]
family_order = display.get_model_families_sorted()
if add_guo:
family_order.append("guo")
# Set up figure:
fig = plt.figure(figsize=(display.FULL_WIDTH/2, 1.4))
spec = fig.add_gridspec(ncols=2, nrows=1)
for ax_i, rescaling_method in enumerate(rescaling_methods):
df_plot, cmap = _get_data(df_main, gce_prefix, family_order,
rescaling_methods=[rescaling_method])
ax = fig.add_subplot(spec[:, ax_i])
big_ax = ax
for i, family in enumerate(family_order):
if family == "guo":
continue
data_sub = df_plot[df_plot.ModelFamily == family]
if data_sub.empty:
continue
ax.scatter(
data_sub["downstream_error"],
data_sub["MetricValue"],
s=plotting.model_to_scatter_size(data_sub.model_size),
c=data_sub.family_index,
cmap=cmap,
vmin=0,
vmax=len(family_order),
marker=utils.assert_and_get_constant(data_sub.family_marker),
alpha=0.7,
linewidth=0.0,
zorder=100 - i, # Z-order is same as model family order.
label=family)
# Manually add Guo et al data:
# From Table 1 and Table S2 in https://arxiv.org/pdf/1706.04599.pdf.
# First model is DenseNet161, second is ResNet152.
if add_guo:
size = plotting.model_to_scatter_size(1)
color = [len(family_order) - 1] * 2
marker = "x"
if rescaling_method == "none":
ax.scatter([0.2257, 0.2231], [0.0628, 0.0548],
s=size, c=color, marker=marker, alpha=0.7, label="guo")
if rescaling_method == "temperature_scaling":
ax.scatter([0.2257, 0.2231], [0.0199, 0.0186],
s=size, c=color, marker=marker, alpha=0.7, label="guo")
plotting.show_spines(ax)
# Aspect ratios are tuned manually for display in the paper:
ax.set_anchor("N")
ax.grid(False, which="minor")
ax.grid(True, axis="both")
ax.xaxis.set_major_locator(mpl.ticker.MultipleLocator(0.1))
ax.yaxis.set_major_locator(mpl.ticker.MultipleLocator(0.01))
ax.set_ylim(bottom=0.01, top=0.09)
ax.set_xlim(0.05, 0.5)
ax.set_xlabel(display.XLABEL_INET_ERROR)
if rescaling_method == "none":
ax.set_title("Unscaled")
elif rescaling_method == "temperature_scaling":
ax.set_title("Temperature-scaled")
ax.set_yticklabels("")
if ax.is_first_col():
ax.set_ylabel("ECE")
# Model family legend:
handles, labels = plotting.get_model_family_legend(big_ax, family_order)
legend = big_ax.legend(
handles=handles, labels=labels, loc="upper right", frameon=True,
labelspacing=0.3, handletextpad=0.1, borderpad=0.3, fontsize=4)
legend.get_frame().set_linewidth(mpl.rcParams["axes.linewidth"])
legend.get_frame().set_edgecolor("lightgray")
plotting.apply_to_fig_text(fig, display.prettify)
plotting.apply_to_fig_text(fig, lambda x: x.replace("EfficientNet", "EffNet"))
return fig
def _get_data(
df_main: pd.DataFrame,
gce_prefix: str,
family_order: List[str],
rescaling_methods: Optional[List[str]] = None,
dataset_name: str = "imagenet(split='validation[20%:]')"
) -> Tuple[pd.DataFrame, mpl.colors.ListedColormap]:
"""Selects data for plotting."""
# Select data:
mask = df_main.Metric.str.startswith(gce_prefix)
mask &= df_main.ModelName.isin(display.get_standard_model_list())
mask &= df_main.DatasetName.isin([dataset_name])
mask &= df_main.rescaling_method.isin(
rescaling_methods or ["temperature_scaling"])
mask &= df_main.ModelFamily.isin(family_order)
df_plot = df_main[mask].copy()
df_plot, cmap = display.add_display_data(df_plot, family_order)
return df_plot, cmap
| apache-2.0 |
xyguo/scikit-learn | sklearn/neural_network/tests/test_stochastic_optimizers.py | 146 | 4310 | import numpy as np
from sklearn.neural_network._stochastic_optimizers import (BaseOptimizer,
SGDOptimizer,
AdamOptimizer)
from sklearn.utils.testing import (assert_array_equal, assert_true,
assert_false, assert_equal)
shapes = [(4, 6), (6, 8), (7, 8, 9)]
def test_base_optimizer():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = BaseOptimizer(params, lr)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_no_momentum():
params = [np.zeros(shape) for shape in shapes]
for lr in [10 ** i for i in range(-3, 4)]:
optimizer = SGDOptimizer(params, lr, momentum=0, nesterov=False)
grads = [np.random.random(shape) for shape in shapes]
expected = [param - lr * grad for param, grad in zip(params, grads)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=False)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_sgd_optimizer_trigger_stopping():
params = [np.zeros(shape) for shape in shapes]
lr = 2e-6
optimizer = SGDOptimizer(params, lr, lr_schedule='adaptive')
assert_false(optimizer.trigger_stopping('', False))
assert_equal(lr / 5, optimizer.learning_rate)
assert_true(optimizer.trigger_stopping('', False))
def test_sgd_optimizer_nesterovs_momentum():
params = [np.zeros(shape) for shape in shapes]
lr = 0.1
for momentum in np.arange(0.5, 0.9, 0.1):
optimizer = SGDOptimizer(params, lr, momentum=momentum, nesterov=True)
velocities = [np.random.random(shape) for shape in shapes]
optimizer.velocities = velocities
grads = [np.random.random(shape) for shape in shapes]
updates = [momentum * velocity - lr * grad
for velocity, grad in zip(velocities, grads)]
updates = [momentum * update - lr * grad
for update, grad in zip(updates, grads)]
expected = [param + update for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
def test_adam_optimizer():
params = [np.zeros(shape) for shape in shapes]
lr = 0.001
epsilon = 1e-8
for beta_1 in np.arange(0.9, 1.0, 0.05):
for beta_2 in np.arange(0.995, 1.0, 0.001):
optimizer = AdamOptimizer(params, lr, beta_1, beta_2, epsilon)
ms = [np.random.random(shape) for shape in shapes]
vs = [np.random.random(shape) for shape in shapes]
t = 10
optimizer.ms = ms
optimizer.vs = vs
optimizer.t = t - 1
grads = [np.random.random(shape) for shape in shapes]
ms = [beta_1 * m + (1 - beta_1) * grad
for m, grad in zip(ms, grads)]
vs = [beta_2 * v + (1 - beta_2) * (grad ** 2)
for v, grad in zip(vs, grads)]
learning_rate = lr * np.sqrt(1 - beta_2 ** t) / (1 - beta_1**t)
updates = [-learning_rate * m / (np.sqrt(v) + epsilon)
for m, v in zip(ms, vs)]
expected = [param + update
for param, update in zip(params, updates)]
optimizer.update_params(grads)
for exp, param in zip(expected, optimizer.params):
assert_array_equal(exp, param)
| bsd-3-clause |
IssamLaradji/scikit-learn | sklearn/linear_model/tests/test_coordinate_descent.py | 11 | 21992 | # Authors: Olivier Grisel <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
from sys import version_info
import numpy as np
from scipy import interpolate, sparse
from copy import deepcopy
from sklearn.datasets import load_boston
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import Lasso, \
LassoCV, ElasticNet, ElasticNetCV, MultiTaskLasso, MultiTaskElasticNet, \
MultiTaskElasticNetCV, MultiTaskLassoCV, lasso_path, enet_path
from sklearn.linear_model import LassoLarsCV, lars_path
def check_warnings():
if version_info < (2, 6):
raise SkipTest("Testing for warnings is not supported in versions \
older than Python 2.6")
def test_lasso_zero():
"""Check that the lasso can handle zero data without crashing"""
X = [[0], [0], [0]]
y = [0, 0, 0]
clf = Lasso(alpha=0.1).fit(X, y)
pred = clf.predict([[1], [2], [3]])
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_lasso_toy():
"""
Test Lasso on a toy example for various values of alpha.
When validating this against glmnet notice that glmnet divides it
against nobs.
"""
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
T = [[2], [3], [4]] # test sample
clf = Lasso(alpha=1e-8)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.85])
assert_array_almost_equal(pred, [1.7, 2.55, 3.4])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.25])
assert_array_almost_equal(pred, [0.5, 0.75, 1.])
assert_almost_equal(clf.dual_gap_, 0)
clf = Lasso(alpha=1)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [.0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy():
"""
Test ElasticNet for various parameters of alpha and l1_ratio.
Actually, the parameters alpha = 0 should not be allowed. However,
we test it as a border case.
ElasticNet is tested with and without precomputed Gram matrix
"""
X = np.array([[-1.], [0.], [1.]])
Y = [-1, 0, 1] # just a straight line
T = [[2.], [3.], [4.]] # test sample
# this should be the same as lasso
clf = ElasticNet(alpha=1e-8, l1_ratio=1.0)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=100,
precompute=False)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=True)
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf.set_params(max_iter=100, precompute=np.dot(X.T, X))
clf.fit(X, Y) # with Gram
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def build_dataset(n_samples=50, n_features=200, n_informative_features=10,
n_targets=1):
"""
build an ill-posed linear regression problem with many noisy features and
comparatively few samples
"""
random_state = np.random.RandomState(0)
if n_targets > 1:
w = random_state.randn(n_features, n_targets)
else:
w = random_state.randn(n_features)
w[n_informative_features:] = 0.0
X = random_state.randn(n_samples, n_features)
y = np.dot(X, w)
X_test = random_state.randn(n_samples, n_features)
y_test = np.dot(X_test, w)
return X, y, X_test, y_test
def test_lasso_cv():
X, y, X_test, y_test = build_dataset()
max_iter = 150
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter).fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
clf = LassoCV(n_alphas=10, eps=1e-3, max_iter=max_iter, precompute=True)
clf.fit(X, y)
assert_almost_equal(clf.alpha_, 0.056, 2)
# Check that the lars and the coordinate descent implementation
# select a similar alpha
lars = LassoLarsCV(normalize=False, max_iter=30).fit(X, y)
# for this we check that they don't fall in the grid of
# clf.alphas further than 1
assert_true(np.abs(
np.searchsorted(clf.alphas_[::-1], lars.alpha_)
- np.searchsorted(clf.alphas_[::-1], clf.alpha_)) <= 1)
# check that they also give a similar MSE
mse_lars = interpolate.interp1d(lars.cv_alphas_, lars.cv_mse_path_.T)
np.testing.assert_approx_equal(mse_lars(clf.alphas_[5]).mean(),
clf.mse_path_[5].mean(), significant=2)
# test set
assert_greater(clf.score(X_test, y_test), 0.99)
def test_lasso_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
clf_unconstrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter, cv=2,
n_jobs=1)
clf_unconstrained.fit(X, y)
assert_true(min(clf_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
clf_constrained = LassoCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
positive=True, cv=2, n_jobs=1)
clf_constrained.fit(X, y)
assert_true(min(clf_constrained.coef_) >= 0)
def test_lasso_path_return_models_vs_new_return_gives_same_coefficients():
# Test that lasso_path with lars_path style output gives the
# same result
# Some toy data
X = np.array([[1, 2, 3.1], [2.3, 5.4, 4.3]]).T
y = np.array([1, 2, 3.1])
alphas = [5., 1., .5]
# Compute the lasso_path
f = ignore_warnings
coef_path = [e.coef_ for e in f(lasso_path)(X, y, alphas=alphas,
return_models=True,
fit_intercept=False)]
# Use lars_path and lasso_path(new output) with 1D linear interpolation
# to compute the the same path
alphas_lars, _, coef_path_lars = lars_path(X, y, method='lasso')
coef_path_cont_lars = interpolate.interp1d(alphas_lars[::-1],
coef_path_lars[:, ::-1])
alphas_lasso2, coef_path_lasso2, _ = lasso_path(X, y, alphas=alphas,
fit_intercept=False,
return_models=False)
coef_path_cont_lasso = interpolate.interp1d(alphas_lasso2[::-1],
coef_path_lasso2[:, ::-1])
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
np.asarray(coef_path).T, decimal=1)
np.testing.assert_array_almost_equal(coef_path_cont_lasso(alphas),
coef_path_cont_lars(alphas),
decimal=1)
def test_enet_path():
# We use a large number of samples and of informative features so that
# the l1_ratio selected is more toward ridge than lasso
X, y, X_test, y_test = build_dataset(n_samples=200, n_features=100,
n_informative_features=100)
max_iter = 150
# Here we have a small number of iterations, and thus the
# ElasticNet might not converge. This is to speed up tests
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
clf = ElasticNetCV(alphas=[0.01, 0.05, 0.1], eps=2e-3,
l1_ratio=[0.5, 0.7], cv=3,
max_iter=max_iter, precompute=True)
ignore_warnings(clf.fit)(X, y)
# Well-conditioned settings, we should have selected our
# smallest penalty
assert_almost_equal(clf.alpha_, min(clf.alphas_))
# Non-sparse ground truth: we should have seleted an elastic-net
# that is closer to ridge than to lasso
assert_equal(clf.l1_ratio_, min(clf.l1_ratio))
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
# Multi-output/target case
X, y, X_test, y_test = build_dataset(n_features=10, n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7],
cv=3, max_iter=max_iter)
ignore_warnings(clf.fit)(X, y)
# We are in well-conditioned settings with low noise: we should
# have a good test-set performance
assert_greater(clf.score(X_test, y_test), 0.99)
assert_equal(clf.coef_.shape, (3, 10))
# Mono-output should have same cross-validated alpha_ and l1_ratio_
# in both cases.
X, y, _, _ = build_dataset(n_features=10)
clf1 = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
clf2 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf2.fit(X, y[:, np.newaxis])
assert_almost_equal(clf1.l1_ratio_, clf2.l1_ratio_)
assert_almost_equal(clf1.alpha_, clf2.alpha_)
def test_path_parameters():
X, y, _, _ = build_dataset()
max_iter = 100
clf = ElasticNetCV(n_alphas=50, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, tol=1e-3)
clf.fit(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(50, clf.n_alphas)
assert_equal(50, len(clf.alphas_))
def test_warm_start():
X, y, _, _ = build_dataset()
clf = ElasticNet(alpha=0.1, max_iter=5, warm_start=True)
ignore_warnings(clf.fit)(X, y)
ignore_warnings(clf.fit)(X, y) # do a second round with 5 iterations
clf2 = ElasticNet(alpha=0.1, max_iter=10)
ignore_warnings(clf2.fit)(X, y)
assert_array_almost_equal(clf2.coef_, clf.coef_)
def test_lasso_alpha_warning():
X = [[-1], [0], [1]]
Y = [-1, 0, 1] # just a straight line
clf = Lasso(alpha=0)
assert_warns(UserWarning, clf.fit, X, Y)
def test_lasso_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
lasso = Lasso(alpha=0.1, max_iter=1000, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
lasso = Lasso(alpha=0.1, max_iter=1000, precompute=True, positive=True)
lasso.fit(X, y)
assert_true(min(lasso.coef_) >= 0)
def test_enet_positive_constraint():
X = [[-1], [0], [1]]
y = [1, 0, -1] # just a straight line with negative slope
enet = ElasticNet(alpha=0.1, max_iter=1000, positive=True)
enet.fit(X, y)
assert_true(min(enet.coef_) >= 0)
def test_enet_cv_positive_constraint():
X, y, X_test, y_test = build_dataset()
max_iter = 500
# Ensure the unconstrained fit has a negative coefficient
enetcv_unconstrained = ElasticNetCV(n_alphas=3, eps=1e-1,
max_iter=max_iter,
cv=2, n_jobs=1)
enetcv_unconstrained.fit(X, y)
assert_true(min(enetcv_unconstrained.coef_) < 0)
# On same data, constrained fit has non-negative coefficients
enetcv_constrained = ElasticNetCV(n_alphas=3, eps=1e-1, max_iter=max_iter,
cv=2, positive=True, n_jobs=1)
enetcv_constrained.fit(X, y)
assert_true(min(enetcv_constrained.coef_) >= 0)
def test_multi_task_lasso_and_enet():
X, y, X_test, y_test = build_dataset()
Y = np.c_[y, y]
#Y_test = np.c_[y_test, y_test]
clf = MultiTaskLasso(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
clf = MultiTaskElasticNet(alpha=1, tol=1e-8).fit(X, Y)
assert_true(0 < clf.dual_gap_ < 1e-5)
assert_array_almost_equal(clf.coef_[0], clf.coef_[1])
def test_enet_multitarget():
n_targets = 3
X, y, _, _ = build_dataset(n_samples=10, n_features=8,
n_informative_features=10, n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True)
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_, estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_multioutput_enetcv_error():
X = np.random.randn(10, 2)
y = np.random.randn(10, 2)
clf = ElasticNetCV()
assert_raises(ValueError, clf.fit, X, y)
def test_multitask_enet_and_lasso_cv():
X, y, _, _ = build_dataset(n_features=100, n_targets=3)
clf = MultiTaskElasticNetCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00556, 3)
clf = MultiTaskLassoCV().fit(X, y)
assert_almost_equal(clf.alpha_, 0.00278, 3)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskElasticNetCV(n_alphas=50, eps=1e-3, max_iter=100,
l1_ratio=[0.3, 0.5], tol=1e-3)
clf.fit(X, y)
assert_equal(0.5, clf.l1_ratio_)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((2, 50, 3), clf.mse_path_.shape)
assert_equal((2, 50), clf.alphas_.shape)
X, y, _, _ = build_dataset(n_targets=3)
clf = MultiTaskLassoCV(n_alphas=50, eps=1e-3, max_iter=100, tol=1e-3)
clf.fit(X, y)
assert_equal((3, X.shape[1]), clf.coef_.shape)
assert_equal((3, ), clf.intercept_.shape)
assert_equal((50, 3), clf.mse_path_.shape)
assert_equal(50, len(clf.alphas_))
def test_1d_multioutput_enet_and_multitask_enet_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = ElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf.fit(X, y[:, 0])
clf1 = MultiTaskElasticNetCV(n_alphas=5, eps=2e-3, l1_ratio=[0.5, 0.7])
clf1.fit(X, y)
assert_almost_equal(clf.l1_ratio_, clf1.l1_ratio_)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_1d_multioutput_lasso_and_multitask_lasso_cv():
X, y, _, _ = build_dataset(n_features=10)
y = y[:, np.newaxis]
clf = LassoCV(n_alphas=5, eps=2e-3)
clf.fit(X, y[:, 0])
clf1 = MultiTaskLassoCV(n_alphas=5, eps=2e-3)
clf1.fit(X, y)
assert_almost_equal(clf.alpha_, clf1.alpha_)
assert_almost_equal(clf.coef_, clf1.coef_[0])
assert_almost_equal(clf.intercept_, clf1.intercept_[0])
def test_sparse_input_dtype_enet_and_lassocv():
X, y, _, _ = build_dataset(n_features=10)
clf = ElasticNetCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = ElasticNetCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
clf = LassoCV(n_alphas=5)
clf.fit(sparse.csr_matrix(X), y)
clf1 = LassoCV(n_alphas=5)
clf1.fit(sparse.csr_matrix(X, dtype=np.float32), y)
assert_almost_equal(clf.alpha_, clf1.alpha_, decimal=6)
assert_almost_equal(clf.coef_, clf1.coef_, decimal=6)
def test_precompute_invalid_argument():
X, y, _, _ = build_dataset()
for clf in [ElasticNetCV(precompute="invalid"),
LassoCV(precompute="invalid")]:
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_convergence():
X, y, _, _ = build_dataset()
model = ElasticNet(alpha=1e-3, tol=1e-3).fit(X, y)
n_iter_reference = model.n_iter_
# This dataset is not trivial enough for the model to converge in one pass.
assert_greater(n_iter_reference, 2)
# Check that n_iter_ is invariant to multiple calls to fit
# when warm_start=False, all else being equal.
model.fit(X, y)
n_iter_cold_start = model.n_iter_
assert_equal(n_iter_cold_start, n_iter_reference)
# Fit the same model again, using a warm start: the optimizer just performs
# a single pass before checking that it has already converged
model.set_params(warm_start=True)
model.fit(X, y)
n_iter_warm_start = model.n_iter_
assert_equal(n_iter_warm_start, 1)
def test_warm_start_convergence_with_regularizer_decrement():
boston = load_boston()
X, y = boston.data, boston.target
# Train a model to converge on a lightly regularized problem
final_alpha = 1e-5
low_reg_model = ElasticNet(alpha=final_alpha).fit(X, y)
# Fitting a new model on a more regularized version of the same problem.
# Fitting with high regularization is easier it should converge faster
# in general.
high_reg_model = ElasticNet(alpha=final_alpha * 10).fit(X, y)
assert_greater(low_reg_model.n_iter_, high_reg_model.n_iter_)
# Fit the solution to the original, less regularized version of the
# problem but from the solution of the highly regularized variant of
# the problem as a better starting point. This should also converge
# faster than the original model that starts from zero.
warm_low_reg_model = deepcopy(high_reg_model)
warm_low_reg_model.set_params(warm_start=True, alpha=final_alpha)
warm_low_reg_model.fit(X, y)
assert_greater(low_reg_model.n_iter_, warm_low_reg_model.n_iter_)
def test_random_descent():
"""Test that both random and cyclic selection give the same results.
Ensure that the test models fully converge and check a wide
range of conditions.
"""
# This uses the coordinate descent algo using the gram trick.
X, y, _, _ = build_dataset(n_samples=50, n_features=20)
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X, y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# This uses the descent algo without the gram trick
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X.T, y[:20])
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(X.T, y[:20])
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Sparse Case
clf_cyclic = ElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(sparse.csr_matrix(X), y)
clf_random = ElasticNet(selection='random', tol=1e-8, random_state=42)
clf_random.fit(sparse.csr_matrix(X), y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Multioutput case.
new_y = np.hstack((y[:, np.newaxis], y[:, np.newaxis]))
clf_cyclic = MultiTaskElasticNet(selection='cyclic', tol=1e-8)
clf_cyclic.fit(X, new_y)
clf_random = MultiTaskElasticNet(selection='random', tol=1e-8,
random_state=42)
clf_random.fit(X, new_y)
assert_array_almost_equal(clf_cyclic.coef_, clf_random.coef_)
assert_almost_equal(clf_cyclic.intercept_, clf_random.intercept_)
# Raise error when selection is not in cyclic or random.
clf_random = ElasticNet(selection='invalid')
assert_raises(ValueError, clf_random.fit, X, y)
def test_deprection_precompute_enet():
"""
Test that setting precompute="auto" gives a Deprecation Warning.
"""
X, y, _, _ = build_dataset(n_samples=20, n_features=10)
clf = ElasticNet(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
clf = Lasso(precompute="auto")
assert_warns(DeprecationWarning, clf.fit, X, y)
def test_enet_path_positive():
"""
Test that the coefs returned by positive=True in enet_path are positive
"""
X, y, _, _ = build_dataset(n_samples=50, n_features=50)
for path in [enet_path, lasso_path]:
pos_path_coef = path(X, y, positive=True)[1]
assert_true(np.all(pos_path_coef >= 0))
if __name__ == '__main__':
import nose
nose.runmodule()
| bsd-3-clause |
liang42hao/bokeh | bokeh/charts/builder/tests/test_scatter_builder.py | 4 | 2880 | """ This is the Bokeh charts testing interface.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from collections import OrderedDict
import unittest
import numpy as np
from numpy.testing import assert_array_equal
import pandas as pd
from bokeh.charts import Scatter
from bokeh.util.testing import create_chart
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class TestScatter(unittest.TestCase):
def test_supported_input(self):
xyvalues = OrderedDict()
xyvalues['python'] = [(1, 2), (3, 3), (4, 7), (5, 5), (8, 26)]
xyvalues['pypy'] = [(1, 12), (2, 23), (4, 47), (5, 15), (8, 46)]
xyvalues['jython'] = [(1, 22), (2, 43), (4, 10), (6, 25), (8, 26)]
xyvaluesdf = pd.DataFrame(xyvalues)
y_python = [2, 3, 7, 5, 26]
y_pypy = [12, 23, 47, 15, 46]
y_jython = [22, 43, 10, 25, 26]
x_python = [1, 3, 4, 5, 8]
x_pypy = [1, 2, 4, 5, 8]
x_jython = [1, 2, 4, 6, 8]
for i, _xy in enumerate([xyvalues, xyvaluesdf]):
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(sorted(builder._groups), sorted(list(xyvalues.keys())))
assert_array_equal(builder._data['y_python'], y_python)
assert_array_equal(builder._data['y_jython'], y_jython)
assert_array_equal(builder._data['y_pypy'], y_pypy)
assert_array_equal(builder._data['x_python'], x_python)
assert_array_equal(builder._data['x_jython'], x_jython)
assert_array_equal(builder._data['x_pypy'], x_pypy)
lvalues = [xyvalues['python'], xyvalues['pypy'], xyvalues['jython']]
for _xy in [lvalues, np.array(lvalues)]:
hm = create_chart(Scatter, _xy)
builder = hm._builders[0]
self.assertEqual(builder._groups, ['0', '1', '2'])
assert_array_equal(builder._data['y_0'], y_python)
assert_array_equal(builder._data['y_1'], y_pypy)
assert_array_equal(builder._data['y_2'], y_jython)
assert_array_equal(builder._data['x_0'], x_python)
assert_array_equal(builder._data['x_1'], x_pypy)
assert_array_equal(builder._data['x_2'], x_jython)
| bsd-3-clause |
rhuelga/sms-tools | lectures/08-Sound-transformations/plots-code/FFT-filtering.py | 2 | 1728 | import math
import matplotlib.pyplot as plt
import numpy as np
import time, os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import dftModel as DFT
import utilFunctions as UF
(fs, x) = UF.wavread('../../../sounds/orchestra.wav')
N = 2048
start = int(1.0*fs)
x1 = x[start:start+N]
plt.figure(1, figsize=(12, 9))
plt.subplot(321)
plt.plot(np.arange(N)/float(fs), x1*np.hamming(N), 'b', lw=1.5)
plt.axis([0, N/float(fs), min(x1*np.hamming(N)), max(x1*np.hamming(N))])
plt.title('x (orchestra.wav)')
mX, pX = DFT.dftAnal(x1, np.hamming(N), N)
startBin = int(N*500.0/fs)
nBins = int(N*4000.0/fs)
bandpass = (np.hanning(nBins) * 60.0) - 60
filt = np.zeros(mX.size)-60
filt[startBin:startBin+nBins] = bandpass
mY = mX + filt
plt.subplot(323)
plt.plot(fs*np.arange(mX.size)/float(mX.size), mX, 'r', lw=1.5, label = 'mX')
plt.plot(fs*np.arange(mX.size)/float(mX.size), filt+max(mX), 'k', lw=1.5, label='filter')
plt.legend(prop={'size':10})
plt.axis([0,fs/4.0,-90,max(mX)+2])
plt.title('mX + filter')
plt.subplot(325)
plt.plot(fs*np.arange(pX.size)/float(pX.size), pX, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pX),8])
plt.title('pX')
y = DFT.dftSynth(mY, pX, N)*sum(np.hamming(N))
mY1, pY = DFT.dftAnal(y, np.hamming(N), N)
plt.subplot(322)
plt.plot(np.arange(N)/float(fs), y, 'b')
plt.axis([0, float(N)/fs, min(y), max(y)])
plt.title('y')
plt.subplot(324)
plt.plot(fs*np.arange(mY1.size)/float(mY1.size), mY1, 'r', lw=1.5)
plt.axis([0,fs/4.0,-90,max(mY1)+2])
plt.title('mY')
plt.subplot(326)
plt.plot(fs*np.arange(pY.size)/float(pY.size), pY, 'c', lw=1.5)
plt.axis([0,fs/4.0,min(pY),8])
plt.title('pY')
plt.tight_layout()
plt.savefig('FFT-filtering.png')
plt.show()
| agpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/indexes/test_category.py | 7 | 38793 | # -*- coding: utf-8 -*-
# TODO(wesm): fix long line flake8 issues
# flake8: noqa
import pandas.util.testing as tm
from pandas.indexes.api import Index, CategoricalIndex
from .common import Base
from pandas.compat import range, PY3
import numpy as np
from pandas import Categorical, compat, notnull
from pandas.util.testing import assert_almost_equal
import pandas.core.config as cf
import pandas as pd
if PY3:
unicode = lambda x: x
class TestCategoricalIndex(Base, tm.TestCase):
_holder = CategoricalIndex
def setUp(self):
self.indices = dict(catIndex=tm.makeCategoricalIndex(100))
self.setup_indices()
def create_index(self, categories=None, ordered=False):
if categories is None:
categories = list('cab')
return CategoricalIndex(
list('aabbca'), categories=categories, ordered=ordered)
def test_construction(self):
ci = self.create_index(categories=list('abcd'))
categories = ci.categories
result = Index(ci)
tm.assert_index_equal(result, ci, exact=True)
self.assertFalse(result.ordered)
result = Index(ci.values)
tm.assert_index_equal(result, ci, exact=True)
self.assertFalse(result.ordered)
# empty
result = CategoricalIndex(categories=categories)
self.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes, np.array([], dtype='int8'))
self.assertFalse(result.ordered)
# passing categories
result = CategoricalIndex(list('aabbca'), categories=categories)
self.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1, 1, 2, 0], dtype='int8'))
c = pd.Categorical(list('aabbca'))
result = CategoricalIndex(c)
self.assert_index_equal(result.categories, Index(list('abc')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1, 1, 2, 0], dtype='int8'))
self.assertFalse(result.ordered)
result = CategoricalIndex(c, categories=categories)
self.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1, 1, 2, 0], dtype='int8'))
self.assertFalse(result.ordered)
ci = CategoricalIndex(c, categories=list('abcd'))
result = CategoricalIndex(ci)
self.assert_index_equal(result.categories, Index(categories))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1, 1, 2, 0], dtype='int8'))
self.assertFalse(result.ordered)
result = CategoricalIndex(ci, categories=list('ab'))
self.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1, 1, -1, 0],
dtype='int8'))
self.assertFalse(result.ordered)
result = CategoricalIndex(ci, categories=list('ab'), ordered=True)
self.assert_index_equal(result.categories, Index(list('ab')))
tm.assert_numpy_array_equal(result.codes,
np.array([0, 0, 1, 1, -1, 0],
dtype='int8'))
self.assertTrue(result.ordered)
# turn me to an Index
result = Index(np.array(ci))
self.assertIsInstance(result, Index)
self.assertNotIsInstance(result, CategoricalIndex)
def test_construction_with_dtype(self):
# specify dtype
ci = self.create_index(categories=list('abc'))
result = Index(np.array(ci), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
result = Index(np.array(ci).tolist(), dtype='category')
tm.assert_index_equal(result, ci, exact=True)
# these are generally only equal when the categories are reordered
ci = self.create_index()
result = Index(
np.array(ci), dtype='category').reorder_categories(ci.categories)
tm.assert_index_equal(result, ci, exact=True)
# make sure indexes are handled
expected = CategoricalIndex([0, 1, 2], categories=[0, 1, 2],
ordered=True)
idx = Index(range(3))
result = CategoricalIndex(idx, categories=idx, ordered=True)
tm.assert_index_equal(result, expected, exact=True)
def test_disallow_set_ops(self):
# GH 10039
# set ops (+/-) raise TypeError
idx = pd.Index(pd.Categorical(['a', 'b']))
self.assertRaises(TypeError, lambda: idx - idx)
self.assertRaises(TypeError, lambda: idx + idx)
self.assertRaises(TypeError, lambda: idx - ['a', 'b'])
self.assertRaises(TypeError, lambda: idx + ['a', 'b'])
self.assertRaises(TypeError, lambda: ['a', 'b'] - idx)
self.assertRaises(TypeError, lambda: ['a', 'b'] + idx)
def test_method_delegation(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.set_categories(list('cab'))
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.rename_categories(list('efg'))
tm.assert_index_equal(result, CategoricalIndex(
list('ffggef'), categories=list('efg')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.add_categories(['d'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabd')))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
result = ci.remove_categories(['c'])
tm.assert_index_equal(result, CategoricalIndex(
list('aabb') + [np.nan] + ['a'], categories=list('ab')))
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_unordered()
tm.assert_index_equal(result, ci)
ci = CategoricalIndex(list('aabbca'), categories=list('cabdef'))
result = ci.as_ordered()
tm.assert_index_equal(result, CategoricalIndex(
list('aabbca'), categories=list('cabdef'), ordered=True))
# invalid
self.assertRaises(ValueError, lambda: ci.set_categories(
list('cab'), inplace=True))
def test_contains(self):
ci = self.create_index(categories=list('cabdef'))
self.assertTrue('a' in ci)
self.assertTrue('z' not in ci)
self.assertTrue('e' not in ci)
self.assertTrue(np.nan not in ci)
# assert codes NOT in index
self.assertFalse(0 in ci)
self.assertFalse(1 in ci)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ci = CategoricalIndex(
list('aabbca'), categories=list('cabdef') + [np.nan])
self.assertFalse(np.nan in ci)
ci = CategoricalIndex(
list('aabbca') + [np.nan], categories=list('cabdef'))
self.assertTrue(np.nan in ci)
def test_min_max(self):
ci = self.create_index(ordered=False)
self.assertRaises(TypeError, lambda: ci.min())
self.assertRaises(TypeError, lambda: ci.max())
ci = self.create_index(ordered=True)
self.assertEqual(ci.min(), 'c')
self.assertEqual(ci.max(), 'b')
def test_map(self):
ci = pd.CategoricalIndex(list('ABABC'), categories=list('CBA'),
ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('cba'),
ordered=True)
tm.assert_categorical_equal(result, exp)
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False, name='XXX')
result = ci.map(lambda x: x.lower())
exp = pd.Categorical(list('ababc'), categories=list('bac'),
ordered=False)
tm.assert_categorical_equal(result, exp)
tm.assert_numpy_array_equal(ci.map(lambda x: 1),
np.array([1] * 5, dtype=np.int64))
# change categories dtype
ci = pd.CategoricalIndex(list('ABABC'), categories=list('BAC'),
ordered=False)
def f(x):
return {'A': 10, 'B': 20, 'C': 30}.get(x)
result = ci.map(f)
exp = pd.Categorical([10, 20, 10, 20, 30], categories=[20, 10, 30],
ordered=False)
tm.assert_categorical_equal(result, exp)
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.CategoricalIndex([np.nan, np.nan] + i[2:].tolist(),
categories=i.categories)
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_append(self):
ci = self.create_index()
categories = ci.categories
# append cats with the same categories
result = ci[:3].append(ci[3:])
tm.assert_index_equal(result, ci, exact=True)
foos = [ci[:1], ci[1:3], ci[3:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, ci, exact=True)
# empty
result = ci.append([])
tm.assert_index_equal(result, ci, exact=True)
# appending with different categories or reoreded is not ok
self.assertRaises(
TypeError,
lambda: ci.append(ci.values.set_categories(list('abcd'))))
self.assertRaises(
TypeError,
lambda: ci.append(ci.values.reorder_categories(list('abc'))))
# with objects
result = ci.append(Index(['c', 'a']))
expected = CategoricalIndex(list('aabbcaca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid objects
self.assertRaises(TypeError, lambda: ci.append(Index(['a', 'd'])))
# GH14298 - if base object is not categorical -> coerce to object
result = Index(['c', 'a']).append(ci)
expected = Index(list('caaabbca'))
tm.assert_index_equal(result, expected, exact=True)
def test_insert(self):
ci = self.create_index()
categories = ci.categories
# test 0th element
result = ci.insert(0, 'a')
expected = CategoricalIndex(list('aaabbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test Nth element that follows Python list behavior
result = ci.insert(-1, 'a')
expected = CategoricalIndex(list('aabbcaa'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# test empty
result = CategoricalIndex(categories=categories).insert(0, 'a')
expected = CategoricalIndex(['a'], categories=categories)
tm.assert_index_equal(result, expected, exact=True)
# invalid
self.assertRaises(TypeError, lambda: ci.insert(0, 'd'))
def test_delete(self):
ci = self.create_index()
categories = ci.categories
result = ci.delete(0)
expected = CategoricalIndex(list('abbca'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
result = ci.delete(-1)
expected = CategoricalIndex(list('aabbc'), categories=categories)
tm.assert_index_equal(result, expected, exact=True)
with tm.assertRaises((IndexError, ValueError)):
# either depeidnig on numpy version
result = ci.delete(10)
def test_astype(self):
ci = self.create_index()
result = ci.astype('category')
tm.assert_index_equal(result, ci, exact=True)
result = ci.astype(object)
self.assert_index_equal(result, Index(np.array(ci)))
# this IS equal, but not the same class
self.assertTrue(result.equals(ci))
self.assertIsInstance(result, Index)
self.assertNotIsInstance(result, CategoricalIndex)
def test_reindex_base(self):
# determined by cat ordering
idx = self.create_index()
expected = np.array([4, 0, 1, 5, 2, 3], dtype=np.intp)
actual = idx.get_indexer(idx)
tm.assert_numpy_array_equal(expected, actual)
with tm.assertRaisesRegexp(ValueError, 'Invalid fill method'):
idx.get_indexer(idx, method='invalid')
def test_reindexing(self):
ci = self.create_index()
oidx = Index(np.array(ci))
for n in [1, 2, 5, len(ci)]:
finder = oidx[np.random.randint(0, len(ci), size=n)]
expected = oidx.get_indexer_non_unique(finder)[0]
actual = ci.get_indexer(finder)
tm.assert_numpy_array_equal(expected.values, actual, check_dtype=False)
def test_reindex_dtype(self):
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(['a', 'c'])
tm.assert_index_equal(res, Index(['a', 'a', 'c']), exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.int64))
c = CategoricalIndex(['a', 'b', 'c', 'a'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.int64))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(['a', 'c'])
exp = Index(['a', 'a', 'c'], dtype='object')
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.int64))
c = CategoricalIndex(['a', 'b', 'c', 'a'],
categories=['a', 'b', 'c', 'd'])
res, indexer = c.reindex(Categorical(['a', 'c']))
exp = CategoricalIndex(['a', 'a', 'c'], categories=['a', 'c'])
tm.assert_index_equal(res, exp, exact=True)
tm.assert_numpy_array_equal(indexer,
np.array([0, 3, 2], dtype=np.int64))
def test_duplicates(self):
idx = CategoricalIndex([0, 0, 0], name='foo')
self.assertFalse(idx.is_unique)
self.assertTrue(idx.has_duplicates)
expected = CategoricalIndex([0], name='foo')
self.assert_index_equal(idx.drop_duplicates(), expected)
self.assert_index_equal(idx.unique(), expected)
def test_get_indexer(self):
idx1 = CategoricalIndex(list('aabcde'), categories=list('edabc'))
idx2 = CategoricalIndex(list('abf'))
for indexer in [idx2, list('abf'), Index(list('abf'))]:
r1 = idx1.get_indexer(idx2)
assert_almost_equal(r1, np.array([0, 1, 2, -1], dtype=np.intp))
self.assertRaises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='pad'))
self.assertRaises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='backfill'))
self.assertRaises(NotImplementedError,
lambda: idx2.get_indexer(idx1, method='nearest'))
def test_get_loc(self):
# GH 12531
cidx1 = CategoricalIndex(list('abcde'), categories=list('edabc'))
idx1 = Index(list('abcde'))
self.assertEqual(cidx1.get_loc('a'), idx1.get_loc('a'))
self.assertEqual(cidx1.get_loc('e'), idx1.get_loc('e'))
for i in [cidx1, idx1]:
with tm.assertRaises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique
cidx2 = CategoricalIndex(list('aacded'), categories=list('edabc'))
idx2 = Index(list('aacded'))
# results in bool array
res = cidx2.get_loc('d')
self.assert_numpy_array_equal(res, idx2.get_loc('d'))
self.assert_numpy_array_equal(res, np.array([False, False, False,
True, False, True]))
# unique element results in scalar
res = cidx2.get_loc('e')
self.assertEqual(res, idx2.get_loc('e'))
self.assertEqual(res, 4)
for i in [cidx2, idx2]:
with tm.assertRaises(KeyError):
i.get_loc('NOT-EXIST')
# non-unique, slicable
cidx3 = CategoricalIndex(list('aabbb'), categories=list('abc'))
idx3 = Index(list('aabbb'))
# results in slice
res = cidx3.get_loc('a')
self.assertEqual(res, idx3.get_loc('a'))
self.assertEqual(res, slice(0, 2, None))
res = cidx3.get_loc('b')
self.assertEqual(res, idx3.get_loc('b'))
self.assertEqual(res, slice(2, 5, None))
for i in [cidx3, idx3]:
with tm.assertRaises(KeyError):
i.get_loc('c')
def test_repr_roundtrip(self):
ci = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
str(ci)
tm.assert_index_equal(eval(repr(ci)), ci, exact=True)
# formatting
if PY3:
str(ci)
else:
compat.text_type(ci)
# long format
# this is not reprable
ci = CategoricalIndex(np.random.randint(0, 5, size=100))
if PY3:
str(ci)
else:
compat.text_type(ci)
def test_isin(self):
ci = CategoricalIndex(
list('aabca') + [np.nan], categories=['c', 'a', 'b'])
tm.assert_numpy_array_equal(
ci.isin(['c']),
np.array([False, False, False, True, False, False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b']), np.array([True] * 5 + [False]))
tm.assert_numpy_array_equal(
ci.isin(['c', 'a', 'b', np.nan]), np.array([True] * 6))
# mismatched categorical -> coerced to ndarray so doesn't matter
tm.assert_numpy_array_equal(
ci.isin(ci.set_categories(list('abcdefghi'))), np.array([True] *
6))
tm.assert_numpy_array_equal(
ci.isin(ci.set_categories(list('defghi'))),
np.array([False] * 5 + [True]))
def test_identical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
self.assertTrue(ci1.identical(ci1))
self.assertTrue(ci1.identical(ci1.copy()))
self.assertFalse(ci1.identical(ci2))
def test_ensure_copied_data(self):
# Check the "copy" argument of each Index.__new__ is honoured
# GH12309
# Must be tested separately from other indexes because
# self.value is not an ndarray
_base = lambda ar : ar if ar.base is None else ar.base
for index in self.indices.values():
result = CategoricalIndex(index.values, copy=True)
tm.assert_index_equal(index, result)
self.assertIsNot(_base(index.values), _base(result.values))
result = CategoricalIndex(index.values, copy=False)
self.assertIs(_base(index.values), _base(result.values))
def test_equals_categorical(self):
ci1 = CategoricalIndex(['a', 'b'], categories=['a', 'b'], ordered=True)
ci2 = CategoricalIndex(['a', 'b'], categories=['a', 'b', 'c'],
ordered=True)
self.assertTrue(ci1.equals(ci1))
self.assertFalse(ci1.equals(ci2))
self.assertTrue(ci1.equals(ci1.astype(object)))
self.assertTrue(ci1.astype(object).equals(ci1))
self.assertTrue((ci1 == ci1).all())
self.assertFalse((ci1 != ci1).all())
self.assertFalse((ci1 > ci1).all())
self.assertFalse((ci1 < ci1).all())
self.assertTrue((ci1 <= ci1).all())
self.assertTrue((ci1 >= ci1).all())
self.assertFalse((ci1 == 1).all())
self.assertTrue((ci1 == Index(['a', 'b'])).all())
self.assertTrue((ci1 == ci1.values).all())
# invalid comparisons
with tm.assertRaisesRegexp(ValueError, "Lengths must match"):
ci1 == Index(['a', 'b', 'c'])
self.assertRaises(TypeError, lambda: ci1 == ci2)
self.assertRaises(
TypeError, lambda: ci1 == Categorical(ci1.values, ordered=False))
self.assertRaises(
TypeError,
lambda: ci1 == Categorical(ci1.values, categories=list('abc')))
# tests
# make sure that we are testing for category inclusion properly
ci = CategoricalIndex(list('aabca'), categories=['c', 'a', 'b'])
self.assertFalse(ci.equals(list('aabca')))
self.assertFalse(ci.equals(CategoricalIndex(list('aabca'))))
self.assertTrue(ci.equals(ci.copy()))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
ci = CategoricalIndex(list('aabca'),
categories=['c', 'a', 'b', np.nan])
self.assertFalse(ci.equals(list('aabca')))
self.assertFalse(ci.equals(CategoricalIndex(list('aabca'))))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
self.assertTrue(ci.equals(ci.copy()))
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
self.assertFalse(ci.equals(list('aabca')))
self.assertFalse(ci.equals(CategoricalIndex(list('aabca'))))
self.assertTrue(ci.equals(ci.copy()))
ci = CategoricalIndex(list('aabca') + [np.nan],
categories=['c', 'a', 'b'])
self.assertFalse(ci.equals(list('aabca') + [np.nan]))
self.assertFalse(ci.equals(CategoricalIndex(list('aabca') + [np.nan])))
self.assertTrue(ci.equals(ci.copy()))
def test_string_categorical_index_repr(self):
# short
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'])
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc'], categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# multiple lines
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 10)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc',
u'a', u'bb', u'ccc', u'a', u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# truncated
idx = pd.CategoricalIndex(['a', 'bb', 'ccc'] * 100)
if PY3:
expected = u"""CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a', u'bb',
u'ccc', u'a',
...
u'ccc', u'a', u'bb', u'ccc', u'a', u'bb', u'ccc', u'a',
u'bb', u'ccc'],
categories=[u'a', u'bb', u'ccc'], ordered=False, dtype='category', length=300)"""
self.assertEqual(unicode(idx), expected)
# larger categories
idx = pd.CategoricalIndex(list('abcdefghijklmmo'))
if PY3:
expected = u"""CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm', 'm', 'o'],
categories=['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', ...], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', u'i', u'j',
u'k', u'l', u'm', u'm', u'o'],
categories=[u'a', u'b', u'c', u'd', u'e', u'f', u'g', u'h', ...], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう',
u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)"""
self.assertEqual(unicode(idx), expected)
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',
'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', u'け', u'こ',
u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# Emable Unicode option -----------------------------------------
with cf.option_context('display.unicode.east_asian_width', True):
# short
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'])
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう'], categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# multiple lines
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 10)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
# truncated
idx = pd.CategoricalIndex([u'あ', u'いい', u'ううう'] * 100)
if PY3:
expected = u"""CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',
'ううう', 'あ',
...
'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',
'あ', 'いい', 'ううう'],
categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'いい', u'ううう', u'あ', u'いい', u'ううう', u'あ',
u'いい', u'ううう', u'あ',
...
u'ううう', u'あ', u'いい', u'ううう', u'あ', u'いい',
u'ううう', u'あ', u'いい', u'ううう'],
categories=[u'あ', u'いい', u'ううう'], ordered=False, dtype='category', length=300)"""
self.assertEqual(unicode(idx), expected)
# larger categories
idx = pd.CategoricalIndex(list(u'あいうえおかきくけこさしすせそ'))
if PY3:
expected = u"""CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',
'さ', 'し', 'す', 'せ', 'そ'],
categories=['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', ...], ordered=False, dtype='category')"""
self.assertEqual(repr(idx), expected)
else:
expected = u"""CategoricalIndex([u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く',
u'け', u'こ', u'さ', u'し', u'す', u'せ', u'そ'],
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
def test_fillna_categorical(self):
# GH 11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
self.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
with tm.assertRaisesRegexp(ValueError,
'fill value must be in categories'):
idx.fillna(2.0)
def test_take_fill_value(self):
# GH 12631
# numeric category
idx = pd.CategoricalIndex([1, 2, 3], name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3],
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex([2, 1, 3], name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# object category
idx = pd.CategoricalIndex(list('CBA'), categories=list('ABC'),
ordered=True, name='xxx')
result = idx.take(np.array([1, 0, -1]))
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.CategoricalIndex(['B', 'C', np.nan],
categories=list('ABC'), ordered=True,
name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.CategoricalIndex(list('BCA'), categories=list('ABC'),
ordered=True, name='xxx')
tm.assert_index_equal(result, expected)
tm.assert_categorical_equal(result.values, expected.values)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_take_fill_value_datetime(self):
# datetime category
idx = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx')
idx = pd.CategoricalIndex(idx)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx')
exp_cats = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'])
expected = pd.CategoricalIndex(expected, categories=exp_cats)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.DatetimeIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx')
expected = pd.CategoricalIndex(expected)
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with tm.assertRaisesRegexp(ValueError, msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
with tm.assertRaises(IndexError):
idx.take(np.array([1, -5]))
def test_take_invalid_kwargs(self):
idx = pd.CategoricalIndex([1, 2, 3], name='foo')
indices = [1, 0, -1]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
| gpl-3.0 |
bthirion/scikit-learn | benchmarks/bench_sgd_regression.py | 61 | 5612 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
| bsd-3-clause |
jmschrei/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |
JMill/edX-Learning-From-Data-Solutions-jm | Homework_7/Python/hw7_by_kirbs.py | 3 | 11985 | #-------------------------------------------------------------------------------
# Name: homework 7
# Author: kirbs#
# Created: 11/16/2013
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import numpy
import urllib
import random
import sys
from sklearn import svm
from sklearn.grid_search import GridSearchCV
# ###################################################
# ################## Helpers ######################
# ###################################################
def in_dta():
fpin = urllib.urlopen("http://work.caltech.edu/data/in.dta")
return ([map(float,(line.strip('\n').split('\r')[0].split())) for line in fpin])
def out_dta():
fpin = urllib.urlopen("http://work.caltech.edu/data/out.dta")
return ([map(float,(line.strip('\n').split('\r')[0].split())) for line in fpin])
def transformPoint(p):
return [1, p[0], p[1], p[0]**2, p[1]**2, p[0]*p[1], abs(p[0]-p[1]), abs(p[0]+p[1]), p[2]]
def transformPoints(points, slicePosition):
transformedPoints = []
for point in points:
out = transformPoint(point)[:slicePosition + 1]
out.append(point[-1])
transformedPoints.append(out)
return transformedPoints
"""
Calculate weights using linear regression.
Return list of weights.
"""
def linearRegression(samplePoints):
X = []
y = []
y_location = len(samplePoints[0]) -1 # y's location is assumed to be the last element in the list
# Construct X space and split y values out
for point in samplePoints:
X.append(numpy.array(point[:y_location]))
y.append(point[y_location])
X = numpy.array(X)
y = numpy.array(y)
X_inverse = numpy.linalg.pinv(X)
return numpy.dot(X_inverse, y)
def linRegWithRegularization(samplePoints, l):
X = []
y = []
y_location = len(samplePoints[0]) -1 # y's location is assumed to be the last element in the list
# Construct X space and split y values out
for point in samplePoints:
X.append(numpy.array(point[:y_location]))
y.append(point[y_location])
weights = linearRegression(samplePoints)
X = numpy.array(X)
X_inverse = numpy.linalg.pinv(X + numpy.array(l/len(samplePoints)*numpy.dot(weights, weights)))
return numpy.dot(X_inverse, y)
def eVal(validationPoints, weights):
y_location = len(validationPoints[0]) - 1
e_val = 0
for p in validationPoints:
if numpy.sign(numpy.dot(weights, p[:y_location])) != numpy.sign(p[y_location]):
e_val += 1
return e_val/float(len(validationPoints))
# ##############################################################################
def q1():
data = in_dta()
for k in range(3,8):
weights = linearRegression(transformPoints(data[:25], k))
e_val = eVal(transformPoints(data[25:],k), weights)
print "k={}, e={}".format(k, e_val)
#q1()
def q2():
data = in_dta()
testData = out_dta()
for k in range(3,8):
weights = linearRegression(transformPoints(data[:25], k))
e_val = eVal(transformPoints(testData[25:],k), weights)
print "k={}, e={}".format(k, e_val)
#q2()
def q3():
data = in_dta()
for k in range(3,8):
weights = linearRegression(transformPoints(data[25:], k))
e_val = eVal(transformPoints(data[:25],k), weights)
print "k={}, e={}".format(k, e_val)
#q3()
def q4():
data = in_dta()
testData = out_dta()
for k in range(3,8):
weights = linearRegression(transformPoints(data[25:], k))
e_val = eVal(transformPoints(testData[:25],k), weights)
print "k={}, e={}".format(k, e_val)
#q4()
def q6():
e = []
e1 = []
e2 = []
for i in range(1000000):
_e1 = random.uniform(0,1)
_e2 = random.uniform(0,1)
e.append(min(_e1, _e2))
e1.append(_e1)
e2.append(_e2)
print "e_1={}, e_2={}, e={}".format(numpy.mean(e1), numpy.mean(e2), numpy.mean(e))
#q6()
def q7():
def getPoints(val):
return [[1, -1,0],[1, val,1],[1, 1,0]]
def linear(points):
return numpy.linalg.lstsq(points)
answers = {"a": (3**.5+4)**.5, "b":(3**.5-1)**.5, "c": (3+4*6**.5)**.5, "d":(9-6**.5)**.5}
e_cv = {}
for key, ans in answers.iteritems():
e_constant = []
e_linear = []
for i in range(3):
points = getPoints(ans)
del points[i]
weights = linearRegression(points)
# squared error
for p in points:
e_linear.append((numpy.dot(weights, p[:2]) - p[2])**2)
e_constant.append((weights[0] - p[2])**2)
print "ans={}, e_constant={}, e_linear={}".format(key, numpy.mean(e_constant), numpy.mean(e_linear))
#q7()
# #####################################################
# ########################################
# Perceptron helper functions from HW 1 ##
# ########################################
def generatePoints(numberOfPoints):
## random.seed(1) #used for testing
x1 = random.uniform(-1, 1)
y1 = random.uniform(-1, 1)
x2 = random.uniform(-1, 1)
y2 = random.uniform(-1, 1)
points = []
for i in range (numberOfPoints):
## random.seed(1)
x = random.uniform (-1, 1)
y = random.uniform (-1, 1)
points.append([1, x, y, hw1TargetFunction(x1, y1, x2, y2, x, y)]) # add 1/-1 indicator to the end of each point list
return x1, y1, x2, y2, points
def hw1TargetFunction(x1,y1,x2,y2,x3,y3):
u = (x2-x1)*(y3-y1) - (y2-y1)*(x3-x1)
if u >= 0:
return 1
elif u < 0:
return -1
# ##########################################
"""
Helper function to visualize 2D data in [-1,1]x[-1,1] plane.
samplePoints is required; all other parameters are optional.
weights takes a list of weights and plots a line.
x1, y1, x2, y2 represents two points in a 2D target function;
this also plots the line.
"""
def plot(samplePoints, weights = None, x1 = None, y1 = None, x2 = None, y2 = None):
red_x = []
red_y = []
blue_x = []
blue_y = []
for point in samplePoints:
if point[3] == -1.0:
red_x.append(point[1])
red_y.append(point[2])
else:
blue_x.append(point[1])
blue_y.append(point[2])
pylab.plot(red_x, red_y, 'ro', label = '-1\'s')
pylab.plot(blue_x, blue_y , 'bo', label = '1\'s')
x = numpy.array( [-1,1] )
if x1 is not None:
# plot target function(black) and hypothesis function(red) lines
slope = (y2-y1)/(x2-x1)
intercept = y2 - slope * x2
pylab.plot(x, slope*x + intercept, 'r')
if weights is not None:
pylab.plot( x, -weights[1]/weights[2] * x - weights[0] / weights[2] , linewidth = 2, c ='g', label = 'g') # this will throw an error if w[2] == 0
pylab.ylim([-1,1])
pylab.xlim([-1,1])
pylab.legend()
pylab.show()
# ###############################
# ######### Perceptron #########
# ###############################
"""
Plots sample points, and two lines based on weight lists.
Useful in showing how the perceptron is updating its weights during each
iteration.
"""
def showPlot(samplePoints, w1, w2):
green_x = []
green_y = []
blue_x = []
blue_y = []
for x in samplePoints:
if x[3] == 1:
green_x.append(x[1])
green_y.append(x[2])
else:
blue_x.append(x[1])
blue_y.append(x[2])
pylab.plot(green_x, green_y, 'go')
pylab.plot(blue_x, blue_y, 'bo')
# plot target function(black) and hypothesis function(red) lines
x = numpy.array( [-1,1] )
pylab.plot( x, -w1[1]/w1[2] * x - w1[0] / w1[2] ,lw=3.0, c='r', ls='-.', label = 'Before update') # this will throw an error if w[2] == 0
pylab.plot( x, -w2[1]/w2[2] * x - w2[0] / w2[2] , 'm--', label = 'After update weights') # this will throw an error if w[2] == 0
pylab.legend()
pylab.show()
"""
Primary perceptron method.
Returns iteration count and final weight list.
"""
def train(training_points, iterationLimit, weights):
w = weights[:] # initialize weights for w[0], w[1], w[2]
learned = False
iterations = 0
def updateWeights():
random.shuffle(training_points) #randomize training points
for point in training_points:
res = numpy.sign(numpy.dot(w, point[:3])) #caclulate point
if point[3] != res: # does point's y match our calculated y?
#if not update weights
w[0] += point[0]*point[3]
w[1] += point[1]*point[3]
w[2] += point[2]*point[3]
## showPlot(training_points, weights, w)
return False # break out of loop and return
return True # if the loop reaches this point all calculated points in the training points match their expected y's
while not learned:
noErrors = updateWeights()
if iterations == iterationLimit or noErrors:
learned = True
break
if iterations >1:
i = 0
iterations += 1
return iterations, w
"""
Calculates the average E_out error of desired number of trials, using a new
set of sample points each time and selecting a number of random points defined
in numberOfPoints parameter.
Returns average out of sample error.
"""
def eOutPLA(testPoints, weights, x1, y1, x2, y2):
errorCount = 0
for point in testPoints:
if numpy.sign(numpy.dot(point[:3], weights)) != numpy.sign(hw1TargetFunction(x1, y1, x2, y2, point[1], point[2])):
errorCount += 1
return errorCount/float(len(testPoints))
def eOutSVM(testPoints, svm, x1, y1, x2, y2):
errorCount = 0
for point in testPoints:
if svm.predict([point[:3]])[0] != numpy.sign(hw1TargetFunction(x1, y1, x2, y2, point[1], point[2])):
errorCount += 1
return errorCount/float(len(testPoints))
def machinery(points, c):
X = []
y = []
y_location = len(points[0]) -1 # y's location is assumed to be the last element in the list
for point in points:
X.append(numpy.array(point[:y_location]))
y.append(point[y_location])
machine = svm.SVC(kernel = 'linear', C=c)
return machine.fit(X, y)
def estimator(points):
X = []
y = []
y_location = len(points[0]) -1 # y's location is assumed to be the last element in the list
for point in points:
X.append(numpy.array(point[:y_location]))
y.append(numpy.array(point[y_location]))
params = {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}
machine = GridSearchCV(svm.SVC(), params)
machine.fit(X, numpy.array(y))
return machine.best_estimator
def validPoints(points):
has_0 = False
has_1 = False
for p in points:
if p[-1] == -1:
has_0 = True
else:
has_1 = True
if has_0 and has_1:
return True
return False
# #############################################
def plaVSsvm(numOfTrials, numOfPoints):
svm_better_cnt = 0
sv_count = []
for i in range(numOfTrials):
x1, y1, x2, y2, points = generatePoints(numOfPoints)
iterations, perc_weights = train(points, 100, [0,0,0])
if(validPoints(points)):
a,b,c,d, testPoints = generatePoints(10000)
pla_e_out = eOutPLA(testPoints, perc_weights, x1, y1, x2, y2)
machine = machinery(points, 1.0e6)
svm_e_out = eOutSVM(testPoints, machine, x1, y1, x2, y2)
## print "PLA {}, SVM {}, SVM better? {}".format(pla_e_out, svm_e_out, svm_e_out < pla_e_out)
if svm_e_out < pla_e_out:
svm_better_cnt += 1
sv_count.append(numpy.sum(machine.n_support_))
## print machine.n_support_
return svm_better_cnt/float(numOfTrials), numpy.mean(sv_count)
# Question 8
#print plaVSsvm(1000,10)
# Question 9/10
#print plaVSsvm(1000, 100, 100.0)
| apache-2.0 |
appapantula/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
dominicelse/scipy | scipy/ndimage/fourier.py | 25 | 11866 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import numpy
from . import _ni_support
from . import _nd_image
__all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid',
'fourier_shift']
def _get_output_fourier(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128,
numpy.float32]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.float64)
return_value = output
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128,
numpy.float32, numpy.float64]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
return_value = output
else:
if output.shape != input.shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def _get_output_fourier_complex(output, input):
if output is None:
if input.dtype.type in [numpy.complex64, numpy.complex128]:
output = numpy.zeros(input.shape, dtype=input.dtype)
else:
output = numpy.zeros(input.shape, dtype=numpy.complex128)
return_value = output
elif type(output) is type:
if output not in [numpy.complex64, numpy.complex128]:
raise RuntimeError("output type not supported")
output = numpy.zeros(input.shape, dtype=output)
return_value = output
else:
if output.shape != input.shape:
raise RuntimeError("output shape not correct")
return_value = None
return output, return_value
def fourier_gaussian(input, sigma, n=-1, axis=-1, output=None):
"""
Multi-dimensional Gaussian fourier filter.
The array is multiplied with the fourier transform of a Gaussian
kernel.
Parameters
----------
input : array_like
The input array.
sigma : float or sequence
The sigma of the Gaussian kernel. If a float, `sigma` is the same for
all axes. If a sequence, `sigma` has to contain one value for each
axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_gaussian : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_gaussian(input_, sigma=4)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sigmas = _ni_support._normalize_sequence(sigma, input.ndim)
sigmas = numpy.asarray(sigmas, dtype=numpy.float64)
if not sigmas.flags.contiguous:
sigmas = sigmas.copy()
_nd_image.fourier_filter(input, sigmas, n, axis, output, 0)
return return_value
def fourier_uniform(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional uniform fourier filter.
The array is multiplied with the fourier transform of a box of given
size.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_uniform : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_uniform(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 1)
return return_value
def fourier_ellipsoid(input, size, n=-1, axis=-1, output=None):
"""
Multi-dimensional ellipsoid fourier filter.
The array is multiplied with the fourier transform of a ellipsoid of
given sizes.
Parameters
----------
input : array_like
The input array.
size : float or sequence
The size of the box used for filtering.
If a float, `size` is the same for all axes. If a sequence, `size` has
to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of filtering the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_ellipsoid : ndarray or None
The filtered input. If `output` is given as a parameter, None is
returned.
Notes
-----
This function is implemented for arrays of rank 1, 2, or 3.
Examples
--------
>>> from scipy import ndimage, misc
>>> import numpy.fft
>>> import matplotlib.pyplot as plt
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_ellipsoid(input_, size=20)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
sizes = _ni_support._normalize_sequence(size, input.ndim)
sizes = numpy.asarray(sizes, dtype=numpy.float64)
if not sizes.flags.contiguous:
sizes = sizes.copy()
_nd_image.fourier_filter(input, sizes, n, axis, output, 2)
return return_value
def fourier_shift(input, shift, n=-1, axis=-1, output=None):
"""
Multi-dimensional fourier shift filter.
The array is multiplied with the fourier transform of a shift operation.
Parameters
----------
input : array_like
The input array.
shift : float or sequence
The size of the box used for filtering.
If a float, `shift` is the same for all axes. If a sequence, `shift`
has to contain one value for each axis.
n : int, optional
If `n` is negative (default), then the input is assumed to be the
result of a complex fft.
If `n` is larger than or equal to zero, the input is assumed to be the
result of a real fft, and `n` gives the length of the array before
transformation along the real transform direction.
axis : int, optional
The axis of the real transform.
output : ndarray, optional
If given, the result of shifting the input is placed in this array.
None is returned in this case.
Returns
-------
fourier_shift : ndarray or None
The shifted input. If `output` is given as a parameter, None is
returned.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> import numpy.fft
>>> fig, (ax1, ax2) = plt.subplots(1, 2)
>>> plt.gray() # show the filtered result in grayscale
>>> ascent = misc.ascent()
>>> input_ = numpy.fft.fft2(ascent)
>>> result = ndimage.fourier_shift(input_, shift=200)
>>> result = numpy.fft.ifft2(result)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result.real) # the imaginary part is an artifact
>>> plt.show()
"""
input = numpy.asarray(input)
output, return_value = _get_output_fourier_complex(output, input)
axis = _ni_support._check_axis(axis, input.ndim)
shifts = _ni_support._normalize_sequence(shift, input.ndim)
shifts = numpy.asarray(shifts, dtype=numpy.float64)
if not shifts.flags.contiguous:
shifts = shifts.copy()
_nd_image.fourier_shift(input, shifts, n, axis, output)
return return_value
| bsd-3-clause |
dsullivan7/scikit-learn | examples/semi_supervised/plot_label_propagation_digits_active_learning.py | 294 | 3417 | """
========================================
Label Propagation digits active learning
========================================
Demonstrates an active learning technique to learn handwritten digits
using label propagation.
We start by training a label propagation model with only 10 labeled points,
then we select the top five most uncertain points to label. Next, we train
with 15 labeled points (original 10 + 5 new ones). We repeat this process
four times to have a model trained with 30 labeled examples.
A plot will appear showing the top 5 most uncertain digits for each iteration
of training. These may or may not contain mistakes, but we will train the next
model with their true labels.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import classification_report, confusion_matrix
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 10
unlabeled_indices = np.arange(n_total_samples)[n_labeled_points:]
f = plt.figure()
for i in range(5):
y_train = np.copy(y)
y_train[unlabeled_indices] = -1
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_indices]
true_labels = y[unlabeled_indices]
cm = confusion_matrix(true_labels, predicted_labels,
labels=lp_model.classes_)
print('Iteration %i %s' % (i, 70 * '_'))
print("Label Spreading model: %d labeled & %d unlabeled (%d total)"
% (n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# compute the entropies of transduced label distributions
pred_entropies = stats.distributions.entropy(
lp_model.label_distributions_.T)
# select five digit examples that the classifier is most uncertain about
uncertainty_index = uncertainty_index = np.argsort(pred_entropies)[-5:]
# keep track of indices that we get labels for
delete_indices = np.array([])
f.text(.05, (1 - (i + 1) * .183),
"model %d\n\nfit with\n%d labels" % ((i + 1), i * 5 + 10), size=10)
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(5, 5, index + 1 + (5 * i))
sub.imshow(image, cmap=plt.cm.gray_r)
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]), size=10)
sub.axis('off')
# labeling 5 points, remote from labeled set
delete_index, = np.where(unlabeled_indices == image_index)
delete_indices = np.concatenate((delete_indices, delete_index))
unlabeled_indices = np.delete(unlabeled_indices, delete_indices)
n_labeled_points += 5
f.suptitle("Active learning with Label Propagation.\nRows show 5 most "
"uncertain labels to learn with the next model.")
plt.subplots_adjust(0.12, 0.03, 0.9, 0.8, 0.2, 0.45)
plt.show()
| bsd-3-clause |
harpribot/deep-summarization | evaluation_plot_script.py | 1 | 9114 | from helpers.plotter import Plotter
from helpers.metric import Calculator
import matplotlib.pyplot as plt
############## ALL GRU PLOTS ############################
result_file_1 = 'result/simple/gru/no_attention.csv'
result_file_2 = 'result/bidirectional/gru/no_attention.csv'
result_file_3 = 'result/stacked_simple/gru/no_attention.csv'
result_file_4 = 'result/stacked_bidirectional/gru/no_attention.csv'
result_file_description = ['gru_smpl', 'gru_bidr', 'gru_stack_smpl', 'gru_stack_bidr']
hypothesis_dir = 'metrics/hypothesis'
reference_dir = 'metrics/reference'
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_3)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_4)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
########## ALL LSTM PLOTS ####################
result_file_1 = 'result/simple/lstm/no_attention.csv'
result_file_2 = 'result/bidirectional/lstm/no_attention.csv'
result_file_3 = 'result/stacked_simple/lstm/no_attention.csv'
result_file_4 = 'result/stacked_bidirectional/lstm/no_attention.csv'
result_file_description = ['lstm_smpl','lstm_bidr','lstm_stack_smpl','lstm_stack_bidr']
hypothesis_dir = 'metrics/hypothesis'
reference_dir = 'metrics/reference'
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_3)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_4)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
#### GRU and LSTM Comparison plots #####
## SIMPLE
result_file_1 = 'result/simple/gru/no_attention.csv'
result_file_2 = 'result/simple/lstm/no_attention.csv'
result_file_description = ['gru_simple','lstm_simple']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
## BIDIRECTIONAL
result_file_1 = 'result/bidirectional/gru/no_attention.csv'
result_file_2 = 'result/bidirectional/lstm/no_attention.csv'
result_file_description = ['gru_bidir','lstm_bidir']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
## STACKED_SIMPLE
result_file_1 = 'result/stacked_simple/gru/no_attention.csv'
result_file_2 = 'result/stacked_simple/lstm/no_attention.csv'
result_file_description = ['gru_stacked','lstm_stacked']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
## STACKED BIDIRECTIONAL
result_file_1 = 'result/stacked_bidirectional/gru/no_attention.csv'
result_file_2 = 'result/stacked_bidirectional/lstm/no_attention.csv'
result_file_description = ['gru_stack_bidir','lstm_stack_bidir']
bleu_1 = []
bleu_2 = []
bleu_3 = []
bleu_4 = []
rouge = []
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_1)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
calculator = Calculator(3,hypothesis_dir,reference_dir)
calculator.load_result(result_file_2)
calculator.evaluate_all_ref_hyp_pairs()
bleu_1_val,bleu_2_val,bleu_3_val,bleu_4_val,rouge_val = calculator.get_all_metrics()
bleu_1.append(bleu_1_val)
bleu_2.append(bleu_2_val)
bleu_3.append(bleu_3_val)
bleu_4.append(bleu_4_val)
rouge.append(rouge_val)
steps = calculator.get_steps()
plotter = Plotter()
plotter.set_metrics(bleu_1,bleu_2,bleu_3,bleu_4,rouge)
plotter.set_file_description(result_file_description)
plotter.set_steps(steps)
plotter.plot_all_metrics()
# SHOW ALL PLOTS
plt.show()
| mit |
sourcepole/kadas-albireo | python/plugins/processing/algs/qgis/PolarPlot.py | 5 | 3040 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BarPlot.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from matplotlib.pyplot import figure
import numpy as np
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterTable
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class PolarPlot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
NAME_FIELD = 'NAME_FIELD'
VALUE_FIELD = 'VALUE_FIELD'
def defineCharacteristics(self):
self.name = 'Polar plot'
self.group = 'Graphics'
self.addParameter(ParameterTable(self.INPUT,
self.tr('Input table')))
self.addParameter(ParameterTableField(self.NAME_FIELD,
self.tr('Category name field'), self.INPUT))
self.addParameter(ParameterTableField(self.VALUE_FIELD,
self.tr('Value field'), self.INPUT))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Output')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
namefieldname = self.getParameterValue(self.NAME_FIELD)
valuefieldname = self.getParameterValue(self.VALUE_FIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, namefieldname, valuefieldname)
plt.close()
fig = figure(figsize=(8, 8))
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8], polar=True)
N = len(values[valuefieldname])
theta = np.arange(0.0, 2 * np.pi, 2 * np.pi / N)
radii = values[valuefieldname]
width = 2 * np.pi / N
ax.bar(theta, radii, width=width, bottom=0.0)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<img src="' + plotFilename + '"/>')
f.close()
| gpl-2.0 |
rubikloud/scikit-learn | examples/neighbors/plot_kde_1d.py | 347 | 5100 | """
===================================
Simple 1D Kernel Density Estimation
===================================
This example uses the :class:`sklearn.neighbors.KernelDensity` class to
demonstrate the principles of Kernel Density Estimation in one dimension.
The first plot shows one of the problems with using histograms to visualize
the density of points in 1D. Intuitively, a histogram can be thought of as a
scheme in which a unit "block" is stacked above each point on a regular grid.
As the top two panels show, however, the choice of gridding for these blocks
can lead to wildly divergent ideas about the underlying shape of the density
distribution. If we instead center each block on the point it represents, we
get the estimate shown in the bottom left panel. This is a kernel density
estimation with a "top hat" kernel. This idea can be generalized to other
kernel shapes: the bottom-right panel of the first figure shows a Gaussian
kernel density estimate over the same distribution.
Scikit-learn implements efficient kernel density estimation using either
a Ball Tree or KD Tree structure, through the
:class:`sklearn.neighbors.KernelDensity` estimator. The available kernels
are shown in the second figure of this example.
The third figure compares kernel density estimates for a distribution of 100
samples in 1 dimension. Though this example uses 1D distributions, kernel
density estimation is easily and efficiently extensible to higher dimensions
as well.
"""
# Author: Jake Vanderplas <[email protected]>
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from sklearn.neighbors import KernelDensity
#----------------------------------------------------------------------
# Plot the progression of histograms to kernels
np.random.seed(1)
N = 20
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
bins = np.linspace(-5, 10, 10)
fig, ax = plt.subplots(2, 2, sharex=True, sharey=True)
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# histogram 1
ax[0, 0].hist(X[:, 0], bins=bins, fc='#AAAAFF', normed=True)
ax[0, 0].text(-3.5, 0.31, "Histogram")
# histogram 2
ax[0, 1].hist(X[:, 0], bins=bins + 0.75, fc='#AAAAFF', normed=True)
ax[0, 1].text(-3.5, 0.31, "Histogram, bins shifted")
# tophat KDE
kde = KernelDensity(kernel='tophat', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 0].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 0].text(-3.5, 0.31, "Tophat Kernel Density")
# Gaussian KDE
kde = KernelDensity(kernel='gaussian', bandwidth=0.75).fit(X)
log_dens = kde.score_samples(X_plot)
ax[1, 1].fill(X_plot[:, 0], np.exp(log_dens), fc='#AAAAFF')
ax[1, 1].text(-3.5, 0.31, "Gaussian Kernel Density")
for axi in ax.ravel():
axi.plot(X[:, 0], np.zeros(X.shape[0]) - 0.01, '+k')
axi.set_xlim(-4, 9)
axi.set_ylim(-0.02, 0.34)
for axi in ax[:, 0]:
axi.set_ylabel('Normalized Density')
for axi in ax[1, :]:
axi.set_xlabel('x')
#----------------------------------------------------------------------
# Plot all available kernels
X_plot = np.linspace(-6, 6, 1000)[:, None]
X_src = np.zeros((1, 1))
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
fig.subplots_adjust(left=0.05, right=0.95, hspace=0.05, wspace=0.05)
def format_func(x, loc):
if x == 0:
return '0'
elif x == 1:
return 'h'
elif x == -1:
return '-h'
else:
return '%ih' % x
for i, kernel in enumerate(['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']):
axi = ax.ravel()[i]
log_dens = KernelDensity(kernel=kernel).fit(X_src).score_samples(X_plot)
axi.fill(X_plot[:, 0], np.exp(log_dens), '-k', fc='#AAAAFF')
axi.text(-2.6, 0.95, kernel)
axi.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
axi.xaxis.set_major_locator(plt.MultipleLocator(1))
axi.yaxis.set_major_locator(plt.NullLocator())
axi.set_ylim(0, 1.05)
axi.set_xlim(-2.9, 2.9)
ax[0, 1].set_title('Available Kernels')
#----------------------------------------------------------------------
# Plot a 1D density example
N = 100
np.random.seed(1)
X = np.concatenate((np.random.normal(0, 1, 0.3 * N),
np.random.normal(5, 1, 0.7 * N)))[:, np.newaxis]
X_plot = np.linspace(-5, 10, 1000)[:, np.newaxis]
true_dens = (0.3 * norm(0, 1).pdf(X_plot[:, 0])
+ 0.7 * norm(5, 1).pdf(X_plot[:, 0]))
fig, ax = plt.subplots()
ax.fill(X_plot[:, 0], true_dens, fc='black', alpha=0.2,
label='input distribution')
for kernel in ['gaussian', 'tophat', 'epanechnikov']:
kde = KernelDensity(kernel=kernel, bandwidth=0.5).fit(X)
log_dens = kde.score_samples(X_plot)
ax.plot(X_plot[:, 0], np.exp(log_dens), '-',
label="kernel = '{0}'".format(kernel))
ax.text(6, 0.38, "N={0} points".format(N))
ax.legend(loc='upper left')
ax.plot(X[:, 0], -0.005 - 0.01 * np.random.random(X.shape[0]), '+k')
ax.set_xlim(-4, 9)
ax.set_ylim(-0.02, 0.4)
plt.show()
| bsd-3-clause |
traxys/EyeTrackingInfo | room/room.py | 1 | 1031 | from PIL import Image, ImageTk
import numpy as np
import tkinter as tk
from random import randrange
import progressbar
#import matplotlib.pyplot as plt
def loadRoom():
'''Returns an array containg 359 frames as PIL objects'''
bar = progressbar.ProgressBar(
widgets=[
"Loading Room",
progressbar.Bar(),
"(",progressbar.ETA(),")"
],
max_value=360)
room = []
name = "Loading Room"
for i in bar(range(1,361)):
room.append( Image.open("deg_"+str(i)+".png") )
return room
def getRoomAsNP():
room = loadRoom()
npRoom = []
bar = progressbar.ProgressBar(
widgets=[
"Converting Room",
progressbar.Bar(),
"(",progressbar.ETA(),")"
],
max_value=360)
for i in bar(room):
npRoom.append(np.array(i))
return np.array(npRoom)
def showImage():
t = randrange(359)
img_tk = ImageTk.PhotoImage(room[t])
label.configure(image=img_tk)
label.image = img_tk
root = tk.Tk()
label = tk.Label(root)
label.pack()
btn = tk.Button(root,text="Start",command=showImage)
btn.pack()
room = loadRoom()
root.mainloop() | mit |
zhanglab/psamm | setup.py | 1 | 4946 | #!/usr/bin/env python
# This file is part of PSAMM.
#
# PSAMM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PSAMM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PSAMM. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2014-2017 Jon Lund Steffensen <[email protected]>
# Copyright 2015-2020 Keith Dufault-Thompson <[email protected]>
from __future__ import print_function
import sys
from setuptools import setup, find_packages
import pkg_resources
# Read long description
with open('README.rst') as f:
long_description = f.read()
# Test whether psamm-import is currently installed. Since the psamm-import
# functionality was moved to this package (except Excel importers), only newer
# versions of psamm-import are compatible with recent versions of PSAMM.
try:
pkg_resources.get_distribution('psamm-import <= 0.15.2')
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict):
pass
else:
msg = (
'Please upgrade or uninstall psamm-import before upgrading psamm:\n'
'$ pip install --upgrade psamm-import\n'
' OR\n'
'$ pip uninstall psamm-import'
'\n\n'
' The functionality of the psamm-import package has been moved into'
' the psamm package, and the psamm-import package now only contains'
' the model-specific Excel importers.')
print(msg, file=sys.stderr)
sys.exit(1)
setup(
name='psamm',
version='1.1.2',
description='PSAMM metabolic modeling tools',
maintainer='Jon Lund Steffensen',
maintainer_email='[email protected]',
url='https://github.com/zhanglab/psamm',
license='GNU GPLv3+',
long_description=long_description,
classifiers=[
'Development Status :: 4 - Beta',
(
'License :: OSI Approved :: '
'GNU General Public License v3 or later (GPLv3+)'),
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
packages=find_packages(),
entry_points='''
[console_scripts]
psamm-model = psamm.command:main
psamm-sbml-model = psamm.command:main_sbml
psamm-list-lpsolvers = psamm.lpsolver.generic:list_solvers
psamm-import = psamm.importer:main
psamm-import-bigg = psamm.importer:main_bigg
[psamm.commands]
chargecheck = psamm.commands.chargecheck:ChargeBalanceCommand
console = psamm.commands.console:ConsoleCommand
dupcheck = psamm.commands.duplicatescheck:DuplicatesCheck
excelexport = psamm.commands.excelexport:ExcelExportCommand
fastgapfill = psamm.commands.fastgapfill:FastGapFillCommand
fba = psamm.commands.fba:FluxBalanceCommand
fluxcheck = psamm.commands.fluxcheck:FluxConsistencyCommand
fluxcoupling = psamm.commands.fluxcoupling:FluxCouplingCommand
formulacheck = psamm.commands.formulacheck:FormulaBalanceCommand
fva = psamm.commands.fva:FluxVariabilityCommand
gapcheck = psamm.commands.gapcheck:GapCheckCommand
gapfill = psamm.commands.gapfill:GapFillCommand
genedelete = psamm.commands.genedelete:GeneDeletionCommand
gimme = psamm.commands.gimme:GimmeCommand
masscheck = psamm.commands.masscheck:MassConsistencyCommand
primarypairs = psamm.commands.primarypairs:PrimaryPairsCommand
randomsparse = psamm.commands.randomsparse:RandomSparseNetworkCommand
robustness = psamm.commands.robustness:RobustnessCommand
sbmlexport = psamm.commands.sbmlexport:SBMLExport
search = psamm.commands.search:SearchCommand
tableexport = psamm.commands.tableexport:ExportTableCommand
psammotate = psamm.commands.psammotate:PsammotateCommand
modelmapping = psamm.commands.model_mapping:ModelMappingCommand
vis = psamm.commands.vis:VisualizationCommand
tmfa = psamm.commands.tmfa:TMFACommand
[psamm.importer]
JSON = psamm.importers.cobrajson:Importer
SBML = psamm.importers.sbml:NonstrictImporter
SBML-strict = psamm.importers.sbml:StrictImporter
MATLAB = psamm.importers.matlab:Importer
''',
test_suite='psamm.tests',
install_requires=[
'pyyaml>=4.2b1',
'six',
'xlsxwriter',
'numpy',
'scipy',
'future',
'pandas'
],
extras_require={
'docs': ['sphinx', 'sphinx_rtd_theme', 'mock']
})
| gpl-3.0 |
beni55/networkx | doc/make_examples_rst.py | 35 | 5461 | """
generate the rst files for the examples by iterating over the networkx examples
"""
# This code was developed from the Matplotlib gen_rst.py module
# and is distributed with the same license as Matplotlib
from __future__ import print_function
import os, glob
import os
import re
import sys
#fileList = []
#rootdir = '../../examples'
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
TODO: this check isn't adequate in some cases. Eg, if we discover
a bug when building the examples, the original and derived
will be unchanged but we still want to fource a rebuild. We can
manually remove from _static, but we may need another solution
"""
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def main(exampledir,sourcedir):
noplot_regex = re.compile(r"#\s*-\*-\s*noplot\s*-\*-")
datad = {}
for root, subFolders, files in os.walk(exampledir):
for fname in files:
if ( fname.startswith('.') or fname.startswith('#') or fname.startswith('_') or
fname.find('.svn')>=0 or not fname.endswith('.py') ):
continue
fullpath = os.path.join(root,fname)
contents = file(fullpath).read()
# indent
relpath = os.path.split(root)[-1]
datad.setdefault(relpath, []).append((fullpath, fname, contents))
subdirs = datad.keys()
subdirs.sort()
output_dir=os.path.join(sourcedir,'examples')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
fhindex = file(os.path.join(sourcedir,'examples','index.rst'), 'w')
fhindex.write("""\
.. _examples-index:
*****************
NetworkX Examples
*****************
.. only:: html
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 2
""")
for subdir in subdirs:
output_dir= os.path.join(sourcedir,'examples',subdir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
static_dir = os.path.join(sourcedir, 'static', 'examples')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
subdirIndexFile = os.path.join(subdir, 'index.rst')
fhsubdirIndex = file(os.path.join(output_dir,'index.rst'), 'w')
fhindex.write(' %s\n\n'%subdirIndexFile)
#thumbdir = '../_static/plot_directive/mpl_examples/%s/thumbnails/'%subdir
#for thumbname in glob.glob(os.path.join(thumbdir,'*.png')):
# fhindex.write(' %s\n'%thumbname)
fhsubdirIndex.write("""\
.. _%s-examples-index:
##############################################
%s
##############################################
.. only:: html
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 1
"""%(subdir, subdir.title()))
data = datad[subdir]
data.sort()
#parts = os.path.split(static_dir)
#thumb_dir = ('../'*(len(parts)-1)) + os.path.join(static_dir, 'thumbnails')
for fullpath, fname, contents in data:
basename, ext = os.path.splitext(fname)
static_file = os.path.join(static_dir, fname)
#thumbfile = os.path.join(thumb_dir, '%s.png'%basename)
#print ' static_dir=%s, basename=%s, fullpath=%s, fname=%s, thumb_dir=%s, thumbfile=%s'%(static_dir, basename, fullpath, fname, thumb_dir, thumbfile)
rstfile = '%s.rst'%basename
outfile = os.path.join(output_dir, rstfile)
fhsubdirIndex.write(' %s\n'%rstfile)
if (not out_of_date(fullpath, static_file) and
not out_of_date(fullpath, outfile)):
continue
print('%s/%s' % (subdir,fname))
fhstatic = file(static_file, 'w')
fhstatic.write(contents)
fhstatic.close()
fh = file(outfile, 'w')
fh.write('.. _%s-%s:\n\n'%(subdir, basename))
base=fname.partition('.')[0]
title = '%s'%(base.replace('_',' ').title())
#title = '<img src=%s> %s example code: %s'%(thumbfile, subdir, fname)
fh.write(title + '\n')
fh.write('='*len(title) + '\n\n')
pngname=base+".png"
png=os.path.join(static_dir,pngname)
linkname = os.path.join('..', '..', 'static', 'examples')
if os.path.exists(png):
fh.write('.. image:: %s \n\n'%os.path.join(linkname,pngname))
linkname = os.path.join('..', '..', '_static', 'examples')
fh.write("[`source code <%s>`_]\n\n::\n\n" % os.path.join(linkname,fname))
# indent the contents
contents = '\n'.join([' %s'%row.rstrip() for row in contents.split('\n')])
fh.write(contents)
# fh.write('\n\nKeywords: python, matplotlib, pylab, example, codex (see :ref:`how-to-search-examples`)')
fh.close()
fhsubdirIndex.close()
fhindex.close()
if __name__ == '__main__':
import sys
try:
arg0,arg1,arg2=sys.argv[:3]
except:
arg0=sys.argv[0]
print("""
Usage: %s exampledir sourcedir
exampledir: a directory containing the python code for the examples.
sourcedir: a directory to put the generated documentation source for these examples.
""" % (arg0))
else:
main(arg1,arg2)
| bsd-3-clause |
mattilyra/scikit-learn | examples/cluster/plot_affinity_propagation.py | 349 | 2304 | """
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.close('all')
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
plt.plot(X[class_members, 0], X[class_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
plt.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
neet-ai/AndroidApp | drag-sort-listview-master/tools/dslv.py | 6 | 7243 | from matplotlib.lines import Line2D
from matplotlib.text import Text
from matplotlib.patches import Rectangle
import pylab
from xml.etree.ElementTree import ElementTree
width = 300
class ItemArtist:
def __init__(self, position, state):
self.position = position
indx = state.positions.index(position)
self.top = -state.tops[indx]
self.top_line, = pylab.plot([0,width], 2*[self.top], c='b')
self.bottom = -state.bottoms[indx]
self.bottom_line, = pylab.plot([0,width], 2*[self.bottom], c='b')
self.edge = -state.edges[indx]
self.edge_line, = pylab.plot([0,width], 2*[self.edge], c='g')
self.label = Text(width/2, (self.top+self.bottom)/2,
str(position), va='center', ha='center')
self.axes = pylab.gca()
self.axes.add_artist(self.label)
self.src_box = None
self.exp_box = None
self._check_boxes(state)
def _check_boxes(self, state):
if self.position == state.src:
if self.src_box == None:
self.src_box = Rectangle((0, self.bottom), width,
self.top - self.bottom, fill=True, ec=None, fc='0.7')
self.axes.add_patch(self.src_box)
else:
self.src_box.set_y(self.bottom)
self.src_box.set_height(self.top - self.bottom)
elif self.position == state.exp1:
if state.exp1 < state.src:
gap_bottom = self.top - state.exp1_gap
else:
gap_bottom = self.bottom
if self.exp_box == None:
self.exp_box = Rectangle((0,gap_bottom), width,
state.exp1_gap, fill=True, ec=None, fc='0.7')
self.axes.add_patch(self.exp_box)
else:
self.exp_box.set_y(gap_bottom)
self.exp_box.set_height(state.exp1_gap)
elif self.position == state.exp2:
if state.exp2 < state.src:
gap_bottom = self.top - state.exp2_gap
else:
gap_bottom = self.bottom
if self.exp_box == None:
self.exp_box = Rectangle((0,gap_bottom), width, state.exp2_gap,
fill=True, ec=None, fc='0.7')
self.axes.add_patch(self.exp_box)
else:
self.exp_box.set_y(gap_bottom)
self.exp_box.set_height(state.exp2_gap)
else:
if self.src_box != None:
self.src_box.remove()
self.src_box = None
if self.exp_box != None:
self.exp_box.remove()
self.exp_box = None
def inState(self, state):
return self.position in state.positions
def update(self, position, state):
moved = False
if position != self.position:
self.position = position
self.label.set_text(str(position))
indx = state.positions.index(self.position)
old_top = self.top
self.top = -state.tops[indx]
if old_top != self.top:
self.top_line.set_ydata(2*[self.top])
moved = True
old_bottom = self.bottom
self.bottom = -state.bottoms[indx]
if old_bottom != self.bottom:
self.bottom_line.set_ydata(2*[self.bottom])
moved = True
old_edge = self.edge
self.edge = -state.edges[indx]
if old_edge != self.edge:
self.edge_line.set_ydata(2*[self.edge])
if moved:
# adjust label, blank spot, etc.
self.label.set_y((self.top + self.bottom)/2)
self._check_boxes(state)
def remove(self):
self.edge_line.remove()
self.top_line.remove()
self.bottom_line.remove()
self.label.remove()
if self.src_box != None:
self.src_box.remove()
if self.exp_box != None:
self.exp_box.remove()
class StateArtist:
xbuff = 40
ybuff = 100
def __init__(self, state):
self.fig = pylab.figure(figsize=(5,9))
self.axes = self.fig.add_subplot(111)
self.axes.set_aspect('equal')
self.axes.set_ylim((-self.ybuff - state.height, self.ybuff))
self.axes.set_xlim((-self.xbuff, width + self.xbuff))
self.float_y = -state.float_y
self.float_y_line, = pylab.plot([0,width], 2*[self.float_y],
c='r', lw=2)
self.items = []
self.update(state)
self.axes.add_patch(Rectangle((0, -state.height), width, state.height,
fill=False, ls='dashed', ec='0.7'))
def update(self, state):
# update floatView location
old_float_y = self.float_y
self.float_y = -state.float_y
if old_float_y != self.float_y:
self.float_y_line.set_ydata(2*[self.float_y])
updatedPos = []
toRecycle = []
for item in self.items:
if item.inState(state):
item.update(item.position, state)
updatedPos.append(item.position)
else:
toRecycle.append(item)
posSet = set(state.positions)
updatedPosSet = set(updatedPos)
unupdatedPosSet = posSet.symmetric_difference(updatedPosSet)
for position in unupdatedPosSet:
if len(toRecycle) != 0:
item = toRecycle.pop(-1)
item.update(position, state)
else:
item = ItemArtist(position, state)
self.items.append(item)
if len(toRecycle) != 0:
for item in toRecycle:
item.remove() #remove artists from current plot
self.items.remove(item)
self.fig.canvas.draw()
class State:
def __init__(self, element):
self.positions = map(int, element.find("Positions").text.split(",")[:-1])
self.tops = map(int, element.find("Tops").text.split(",")[:-1])
self.bottoms = map(int, element.find("Bottoms").text.split(",")[:-1])
self.count = len(self.positions)
self.edges = map(int, element.find("ShuffleEdges").text.split(",")[:-1])
self.src = int(element.find("SrcPos").text)
self.src_h = int(element.find("SrcHeight").text)
self.exp1 = int(element.find("FirstExpPos").text)
self.exp1_gap = int(element.find("FirstExpBlankHeight").text)
self.exp2 = int(element.find("SecondExpPos").text)
self.exp2_gap = int(element.find("SecondExpBlankHeight").text)
self.height = int(element.find("ViewHeight").text)
self.lasty = int(element.find("LastY").text)
self.float_y = int(element.find("FloatY").text)
class StateAnimator:
page_frames = 30
def __init__(self, states, startFrame=0):
self.states = states
self.count = len(states)
if startFrame < 0 or startFrame >= self.count:
self.curr = self.count - 1
else:
self.curr = startFrame
self.state_artist = StateArtist(self.states[self.curr])
self.state_artist.fig.canvas.mpl_connect('key_press_event', self.flip)
pylab.show()
def flip(self, event):
#print event.key
if event.key == 'right':
self.curr += 1
elif event.key == 'left':
self.curr -= 1
elif event.key == 'up':
self.curr -= self.page_frames
elif event.key == 'down':
self.curr += self.page_frames
else:
return
if self.curr >= self.count:
print "reached end of saved motions"
self.curr = self.count - 1
elif self.curr < 0:
print "reached beginning of saved motions"
self.curr = 0
else:
print "flipped to frame " + str(self.curr)
self.state_artist.update(self.states[self.curr])
#self.ax.clear()
def getStates(file):
tree = ElementTree();
tree.parse(file);
root = tree.getroot()
return map(State, list(root.iter("DSLVState")))
if __name__ == "__main__":
states = getStates("dslv_state.txt")
StateAnimator(states, startFrame=-1)
| mit |
kleskjr/scipy | scipy/stats/_binned_statistic.py | 28 | 25272 | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable, xrange
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = {}
for i in xrange(Ndim):
sampBin[i] = np.digitize(sample[:, i], edges[i])
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
# `binnumbers` is which bin (in linearized `Ndim` space) each sample goes
binnumbers = np.zeros(Dlen, int)
for i in xrange(0, Ndim - 1):
binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()
binnumbers += sampBin[ni[-1]]
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, np.sort(nbin)))
for i in xrange(nbin.size):
j = ni.argsort()[i]
# Accomodate the extra `Vdim` dimension-zero with `+1`
result = result.swapaxes(i+1, j+1)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
| bsd-3-clause |
ahaque/dsbowl | runTestSet.py | 1 | 1264 | import numpy as np
import matplotlib.pyplot as plt
# Make sure that caffe is on the python path:
caffe_root = '/home/albert/caffe/' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
# Set the right path to your model definition file, pretrained model weights,
# and the image you would like to classify.
MODEL_FILE = '/home/albert/caffe/models/bvlc_reference_caffenet/deploy.prototxt'
PRETRAINED = '/home/albert/caffe/models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
IMAGE_FILE = '/home/albert/caffe/examples/images/cat.jpg'
caffe.set_mode_gpu()
net = caffe.Classifier(MODEL_FILE, PRETRAINED,
mean=np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1),
channel_swap=(2,1,0),
raw_scale=255,
image_dims=(256, 256))
input_image = caffe.io.load_image(IMAGE_FILE)
plt.imshow(input_image)
prediction = net.predict([input_image]) # predict takes any number of images, and formats them for the Caffe net automatically
print 'prediction shape:', prediction[0].shape
print prediction
plt.plot(prediction[0])
print 'predicted class:', prediction[0].argmax()
| mit |
vshtanko/scikit-learn | examples/svm/plot_svm_kernels.py | 329 | 1971 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/manifold/isomap.py | 229 | 7169 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <[email protected]>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto'):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.nbrs_ = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=neighbors_algorithm)
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance')
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
#Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
#This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min((self.dist_matrix_[indices[i]]
+ distances[i][:, None]), 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
Eric89GXL/scikit-learn | examples/linear_model/plot_lasso_lars.py | 8 | 1059 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import pylab as pl
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
pl.plot(xx, coefs.T)
ymin, ymax = pl.ylim()
pl.vlines(xx, ymin, ymax, linestyle='dashed')
pl.xlabel('|coef| / max|coef|')
pl.ylabel('Coefficients')
pl.title('LASSO Path')
pl.axis('tight')
pl.show()
| bsd-3-clause |
pratapvardhan/pandas | pandas/tests/series/test_asof.py | 11 | 5289 | # coding=utf-8
import pytest
import numpy as np
from pandas import (offsets, Series, notna,
isna, date_range, Timestamp)
import pandas.util.testing as tm
from .common import TestData
class TestSeriesAsof(TestData):
def test_basic(self):
# array or list or dates
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
assert ts[ub] == val
def test_scalar(self):
N = 30
rng = date_range('1/1/1990', periods=N, freq='53s')
ts = Series(np.arange(N), index=rng)
ts[5:10] = np.NaN
ts[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
result = ts.asof(ts.index[3])
assert result == ts[3]
# no as of value
d = ts.index[0] - offsets.BDay()
assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
rng = date_range('1/1/2000', '1/2/2000', freq='4h')
s = Series(np.arange(len(rng)), index=rng)
r = s.resample('2h').mean()
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series([0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.],
index=date_range('1/1/2000', '1/2/2000', freq='2h'))
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import period_range, PeriodIndex
# array or list or dates
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
ts[15:30] = np.nan
dates = date_range('1/1/1990', periods=N * 3, freq='37min')
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq='H')
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
ts[5:10] = np.nan
ts[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
assert ts.asof(ts.index[3]) == ts[3]
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
assert isna(ts.asof(d))
def test_errors(self):
s = Series([1, 2, 3],
index=[Timestamp('20130101'),
Timestamp('20130103'),
Timestamp('20130102')])
# non-monotonic
assert not s.index.is_monotonic
with pytest.raises(ValueError):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range('1/1/1990', periods=N, freq='53s')
s = Series(np.random.randn(N), index=rng)
with pytest.raises(ValueError):
s.asof(s.index[0], subset='foo')
def test_all_nans(self):
# GH 15713
# series is all nans
result = Series([np.nan]).asof([0])
expected = Series([np.nan])
tm.assert_series_equal(result, expected)
# testing non-default indexes
N = 50
rng = date_range('1/1/1990', periods=N, freq='53s')
dates = date_range('1/1/1990', periods=N * 3, freq='25s')
result = Series(np.nan, index=rng).asof(dates)
expected = Series(np.nan, index=dates)
tm.assert_series_equal(result, expected)
# testing scalar input
date = date_range('1/1/1990', periods=N * 3, freq='25s')[0]
result = Series(np.nan, index=rng).asof(date)
assert isna(result)
# test name is propagated
result = Series(np.nan, index=[1, 2, 3, 4], name='test').asof([4, 5])
expected = Series(np.nan, index=[4, 5], name='test')
tm.assert_series_equal(result, expected)
| bsd-3-clause |
xzh86/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <[email protected]>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
robcarver17/pysystemtrade | sysproduction/reporting/roll_report.py | 1 | 6345 | import datetime
import pandas as pd
from sysproduction.data.volumes import diagVolumes
from sysproduction.data.contracts import dataContracts
from sysproduction.data.prices import diagPrices
from sysproduction.data.positions import diagPositions
from syscore.objects import header, table, body_text
# We want a roll report (We could merge this into another kind of report)
# We want to be able to have it emailed, or run it offline
# To have it emailed, we'll call the report function and optionally pass the output to a text file not stdout
# Reports consist of multiple calls to functions with data object, each of which returns a displayable object
# We also chuck in a title and a timestamp
ALL_ROLL_INSTRUMENTS = "ALL"
def roll_info(data, instrument_code=ALL_ROLL_INSTRUMENTS):
"""
Get some roll info. For all markets which are:
- currently rolling
- need to have roll status changed now or in the near future
We calculate:
- Volume data
- Curve data
- Length to expiry data (contract and/or carry)
- Current roll status
- Suggested roll status
:param: data blob
:return: list of pd.DataFrame
"""
diag_prices = diagPrices(data)
if instrument_code == ALL_ROLL_INSTRUMENTS:
list_of_instruments = diag_prices.get_list_of_instruments_in_multiple_prices()
else:
list_of_instruments = [instrument_code]
results_dict = {}
for instrument_code in list_of_instruments:
roll_data = get_roll_data_for_instrument(instrument_code, data)
results_dict[instrument_code] = roll_data
formatted_output = format_roll_data_for_instrument(results_dict)
return formatted_output
def get_roll_data_for_instrument(instrument_code, data):
"""
Get roll data for an individual instrument
:param instrument_code: str
:param data: dataBlob
:return:
"""
c_data = dataContracts(data)
relevant_contract_dict = c_data.get_labelled_dict_of_current_contracts(
instrument_code
)
list_of_relevant_contract_date_str = relevant_contract_dict["contracts"]
contract_labels = relevant_contract_dict["labels"]
v_data = diagVolumes(data)
volumes = v_data.get_normalised_smoothed_volumes_of_contract_list(
instrument_code, list_of_relevant_contract_date_str
)
# length to expiries / length to suggested roll
price_expiry = c_data.get_priced_expiry(instrument_code)
carry_expiry = c_data.get_carry_expiry(instrument_code)
when_to_roll = c_data.when_to_roll_priced_contract(instrument_code)
now = datetime.datetime.now()
price_expiry_days = (price_expiry - now).days
carry_expiry_days = (carry_expiry - now).days
when_to_roll_days = (when_to_roll - now).days
# roll status
diag_positions = diagPositions(data)
roll_status = diag_positions.get_name_of_roll_state(instrument_code)
# Positions
positions = diag_positions.get_positions_for_instrument_and_contract_list(
instrument_code, list_of_relevant_contract_date_str
)
results_dict_code = dict(
code=instrument_code,
status=roll_status,
roll_expiry=when_to_roll_days,
price_expiry=price_expiry_days,
carry_expiry=carry_expiry_days,
contract_labels=contract_labels,
volumes=volumes,
positions=positions,
)
return results_dict_code
def format_roll_data_for_instrument(results_dict):
"""
Put the results into a printable format
:param results_dict: dict, keys are instruments, contains roll information
:return:
"""
instrument_codes = list(results_dict.keys())
formatted_output = []
formatted_output.append(
header(
"Roll status report produced on %s" % str(
datetime.datetime.now())))
table1_df = pd.DataFrame(
dict(
Status=[results_dict[code]["status"] for code in instrument_codes],
Roll_exp=[results_dict[code]["roll_expiry"] for code in instrument_codes],
Prc_exp=[results_dict[code]["price_expiry"] for code in instrument_codes],
Crry_exp=[results_dict[code]["carry_expiry"] for code in instrument_codes],
),
index=instrument_codes,
)
# sort by time to theoretical roll, and apply same sort order for all
# tables
table1_df = table1_df.sort_values("Roll_exp")
instrument_codes = list(table1_df.index)
table1 = table("Status and time to roll in days", table1_df)
formatted_output.append(table1)
formatted_output.append(
body_text(
"Roll_exp is days until preferred roll set by roll parameters. Prc_exp is days until price contract rolls, Crry_exp is days until carry contract rolls"
)
)
# will always be 6 wide
width_contract_columns = len(
results_dict[instrument_codes[0]]["contract_labels"])
table2_dict = {}
for col_number in range(width_contract_columns):
table2_dict["C%d" % col_number] = [
str(results_dict[code]["contract_labels"][col_number])
for code in instrument_codes
]
table2_df = pd.DataFrame(table2_dict, index=instrument_codes)
table2 = table("List of contracts", table2_df)
formatted_output.append(table2)
formatted_output.append(body_text("Suffix: p=price, f=forward, c=carry"))
table2b_dict = {}
for col_number in range(width_contract_columns):
table2b_dict["Pos%d" % col_number] = [results_dict[code][
"positions"][col_number] for code in instrument_codes]
table2b_df = pd.DataFrame(table2b_dict, index=instrument_codes)
table2b = table("Positions", table2b_df)
formatted_output.append(table2b)
table3_dict = {}
for col_number in range(width_contract_columns):
table3_dict["V%d" % col_number] = [results_dict[code][
"volumes"][col_number] for code in instrument_codes]
table3_df = pd.DataFrame(table3_dict, index=instrument_codes)
table3_df = table3_df.round(2)
table3 = table("Relative volumes", table3_df)
formatted_output.append(table3)
formatted_output.append(
body_text(
"Contract volumes over recent days, normalised so largest volume is 1.0"
)
)
formatted_output.append(header("END OF ROLL REPORT"))
return formatted_output
| gpl-3.0 |
secimTools/SECIMTools | src/scripts/remove_selected_features_samples.py | 1 | 10537 | #!/usr/bin/env python
################################################################################
# SCRIPT: remove_selected_features_samples.py
#
# LAST VERSION: Includes support to select the type of drop to perform instead
# of automatically decide.
#
# AUTHOR: Miguel Ibarra Arellano ([email protected]).
#
# DESCRIPTION: This script takes a Wide format file (wide), a flag file and a
# design file (only for drop by column) and drops either rows or columns for a
# given criteria. This criteria could be either numeric,string or a flag (1,0).
#
# OUTPUT:
# Drop by row:
# Wide file with just the dropped rows
# Wide file without the dropped rows
# Drop by column:
# Wide file with just the dropped columns
# Wide file without the dropped columns
#
################################################################################
# Import built-in libraries
import os
import re
import copy
import logging
import argparse
from argparse import RawDescriptionHelpFormatter
# Import add-on libraries
import pandas as pd
# Import local data libraries
from secimtools.dataManager import logger as sl
from secimtools.dataManager.interface import wideToDesign
"""Function to pull arguments"""
def getOptions():
parser = argparse.ArgumentParser(description="Removes rows or columns from the data" \
"using user-defined cut-offs.")
# Standard Input
standard = parser.add_argument_group(title='Required Input',
description="Standard inputs for SECIM tools.")
standard.add_argument('-i',"--input",dest="input", action="store",
required=True,help="Input dataset in wide format.")
standard.add_argument('-d',"--design",dest="design", action="store",
required=True, help="Design file.")
standard.add_argument('-id',"--ID",dest="uniqID",action="store",
required=True,help="Name of the column with unique "\
"identifiers.")
# Tool Input
tool = parser.add_argument_group(title="Tool Input",
description="Tool especific input.")
tool.add_argument('-f',"--flags",dest="flags", action="store",
required=True,help="Flag file.")
tool.add_argument('-fft',"--flagfiletype",dest="flagfiletype",action='store',
required=True, default="row", choices=["row","column"],
help="Type of flag file")
tool.add_argument('-fid',"--flagUniqID",dest="flagUniqID",action="store",
required=False, default=False,help="Name of the column "\
"with unique identifiers in the flag files.")
tool.add_argument('-fd',"--flagDrop",dest="flagDrop",action='store',
required=True, help="Name of the flag/field you want to"\
" access.")
tool.add_argument('-val',"--value",dest="value",action='store',
required=False, default="1",help="Cut Value")
tool.add_argument('-con',"--condition",dest="condition",action='store',
required=False, default="0",help="Condition for the cut" \
"where 0=Equal to, 1=Greater than and 2=less than.")
# Tool Output
output = parser.add_argument_group(title='Output',
description="Output of the script.")
output.add_argument('-ow',"--outWide",dest="outWide",action="store",required=True,
help="Output file without the Drops.")
output.add_argument('-of',"--outFlags",dest="outFlags",action="store",
required=True,help="Output file for Drops.")
args = parser.parse_args()
# Standardize paths
args.input = os.path.abspath(args.input)
args.flags = os.path.abspath(args.flags)
args.design = os.path.abspath(args.design)
args.outWide = os.path.abspath(args.outWide)
args.outFlags = os.path.abspath(args.outFlags)
return(args);
def dropRows(df_wide, df_flags,cut_value, condition, args):
"""
Drop rows in a wide file based on its flag file and the specified flag
values to keep.
:Arguments:
:type df_wide: pandas.DataFrame
:param df: A data frame in wide format
:type df_flags: pandas.DataFrame
:param df: A data frame of flag values corresponding to the wide file
:type cut_value: string
:param args: Cut Value for evaluation
:type condition: string
:param args: Condition to evaluate
:type args: argparse.ArgumentParser
:param args: Command line arguments.
:Returns:
:rtype: pandas.DataFrame
:returns: Updates the wide DataFrame with dropped rows and writes to a
TSV.
:rtype: pandas.DataFrame
:returns: Fron wide DataFrame Dropped rows and writes to a TSV.
"""
#Dropping flags from flag files, first asks for the type of value, then asks
# for the diferent type of conditions new conditios can be added here
if not((re.search(r'[a-zA-Z]+', cut_value))):
cut_value = float(cut_value)
if condition == '>':
df_filtered = df_flags[df_flags[args.flagDrop]<cut_value]
elif condition == '<':
df_filtered = df_flags[df_flags[args.flagDrop]>cut_value]
elif condition == '==':
df_filtered = df_flags[df_flags[args.flagDrop]!=cut_value]
else:
logger.error(u'The {0} is not supported by the program, please use <,== or >'.format(condition))
quit()
else:
cut_value = str(cut_value)
if condition == '==':
df_filtered = df_flags[df_flags[args.flagDrop]!=cut_value]
else:
logger.error(u'The {0} conditional is not supported for string flags, please use =='.format(condition))
quit()
#Create a mask over the original data to determinate what to delete
mask = df_wide.index.isin(df_filtered.index)
#Create a mask over the original flags to determinate what to delete
mask_flags = df_flags.index.isin(df_filtered.index)
# Use mask to drop values form original data
df_wide_keeped = df_wide[mask]
# Use mas to drop values out of original flags
df_flags_keeped = df_flags[mask_flags]
# Returning datasets
return df_wide_keeped,df_flags_keeped
def dropColumns(df_wide, df_flags,cut_value, condition, args):
"""
Drop columns in a wide file based on its flag file and the specified flag
values to keep.
:Arguments:
:type df_wide: pandas.DataFrame
:param df: A data frame in wide format
:type df_flags: pandas.DataFrame
:param df: A data frame of flag values corresponding to the wide file
:type cut_value: string
:param args: Cut Value for evaluation
:type condition: string
:param args: Condition to evaluate
:type args: argparse.ArgumentParser
:param args: Command line arguments.
:Returns:
:rtype: pandas.DataFrame
:returns: Updates the wide DataFrame with dropped columns and writes to
a TSV.
:rtype: pandas.DataFrame
:returns: Fron wide DataFrame Dropped columns and writes to a TSV.
"""
#Getting list of filtered columns from flag files
if not(re.search(r'[a-zA-Z]+', cut_value)):
cut_value = float(cut_value)
if condition == '>':
to_keep = df_flags.index[df_flags[args.flagDrop]<cut_value]
elif condition == '<':
to_keep = df_flags.index[df_flags[args.flagDrop]>cut_value]
elif condition == '==':
to_keep = df_flags.index[df_flags[args.flagDrop]!=cut_value]
else:
logger.error(u'The {0} is not supported by the program, please use <,== or >'.format(condition))
quit()
else:
cut_value = str(cut_value)
if condition == '==':
to_keep = df_flags.index[df_flags[args.flagDrop]!=cut_value]
else:
logger.error(u'The {0} conditional is not supported for string flags, please use =='.format(condition))
quit()
# Subsetting
dropped_flags = df_flags.T[to_keep].T
df_wide = df_wide[to_keep]
# Returning
return df_wide,dropped_flags
def main(args):
# Import data with the interface
dat = wideToDesign(wide=args.input, design=args.design,
uniqID=args.uniqID, logger=logger)
# Cleaning from missing data
dat.dropMissing()
# Read flag file
df_flags = pd.read_table(args.flags)
# Select index on flag file if none then rise an error
if args.flagUniqID:
df_flags.set_index(args.flagUniqID, inplace=True)
else:
logger.error("Not flagUniqID provided")
raise
# Drop either rows or columns
logger.info("Running drop flags by {0}".format(args.flagfiletype))
if args.flagfiletype=="column":
kpd_wide,kpd_flag = dropColumns(df_wide=dat.wide, df_flags=df_flags,
cut_value=args.value, condition=args.condition, args=args)
else:
kpd_wide,kpd_flag = dropRows(df_wide=dat.wide, df_flags=df_flags,
cut_value=args.value, condition=args.condition, args=args)
# Wide and flags
kpd_wide.to_csv(args.outWide, sep='\t')
kpd_flag.to_csv(args.outFlags, sep='\t')
# Finishing script
logger.info("Script complete.")
if __name__ == '__main__':
#Gettign arguments from parser
args = getOptions()
#Stablishing logger
logger = logging.getLogger()
sl.setLogger(logger)
#Change condition
if args.condition == "0":
args.condition="=="
elif args.condition == "1":
args.condition=">"
elif args.condition == "2":
args.condition="<"
#Starting script
logger.info(u"Importing data with following parameters: "\
"\n\tWide: {0}"\
"\n\tFlags: {1}"\
"\n\tDesign: {2}"\
"\n\tID: {3}"\
"\n\toutput: {4}"\
"\n\tVariable: {5}"\
"\n\tCondition: {6}"\
"\n\tValue: {7}"\
"\n\tType Of Flag File: {8}" .format(args.input,args.flags,args.design,
args.uniqID,args.outWide,args.outFlags,
args.condition,args.value,args.flagfiletype))
#Main script
main(args)
| mit |
Barmaley-exe/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
mit-ll/LO-PHI | experiments/artifacts/memory/memory_aggregate_data.py | 1 | 10774 | #!/usr/bin/env python
"""
This is a script to aggregate data from ramspeed
(c) 2015 Massachusetts Institute of Technology
"""
import argparse
import sys
import os
import numpy
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
def parse_ramspeed(filename):
"""
Simple function to parse our output from ramspeed into a dict
"""
ramspeed_struct = {}
f = open(filename,"r")
# Read every file until we hit the rows with data
init = False
empty_count = 0
for line in f:
# Ignore header data
if line.strip() == "":
empty_count += 1
continue
if empty_count < 3:
continue
cols = line.split()
# Ensure it's a valid line
if len(cols) < 3:
continue
# Extract values
mem_operation = cols[0]
mem_type = cols[1].strip(":")
if mem_type == "BatchRun":
continue
mem_rate = float(cols[2])
# Store in our nested structure
if mem_operation not in ramspeed_struct:
ramspeed_struct[mem_operation] = {}
if mem_type not in ramspeed_struct[mem_operation]:
ramspeed_struct[mem_operation][mem_type] = [mem_rate]
else:
ramspeed_struct[mem_operation][mem_type].append(mem_rate)
f.close()
return ramspeed_struct
def parse_data_rate(filename):
f = open(filename,"r")
s = f.read()
s_list = s.split("\t")
f.close()
time_elapsed = float(s_list[0])
bytes = float(s_list[1])
return bytes/time_elapsed
def parse_dir(input_dir):
# parse all of our files
ramspeed_data = []
data_rate_list = []
for (dirpath, dirnames, filenames) in os.walk(input_dir):
for file in filenames:
# Is this iozone output?
if file.endswith("txt"):
print "* Parsing %s..."%file
if file.find("sensor") != -1:
rate = parse_data_rate(os.path.join(dirpath,file))
data_rate_list.append(rate)
else:
data = parse_ramspeed(os.path.join(dirpath,file))
ramspeed_data.append(data)
data_rate = numpy.average(data_rate_list)/(1.0*10**6)
print "Data Rate: ", data_rate, "MB/sec"
# Aggregate data into one big struct
aggregate_data = {}
for rd in ramspeed_data:
for mem_operation in rd:
if mem_operation in aggregate_data:
for mem_type in rd[mem_operation]:
if mem_type in aggregate_data[mem_operation]:
aggregate_data[mem_operation][mem_type] += rd[mem_operation][mem_type]
else:
aggregate_data[mem_operation][mem_type] = rd[mem_operation][mem_type]
else:
aggregate_data[mem_operation] = {}
for mem_type in rd[mem_operation]:
aggregate_data[mem_operation][mem_type] = rd[mem_operation][mem_type]
return (aggregate_data, data_rate_list)
def extract_graph_data(aggregate_data):
rate_data = {}
for mem_operation in aggregate_data:
for mem_type in aggregate_data[mem_operation]:
rates = aggregate_data[mem_operation][mem_type]
if mem_type != "AVERAGE":
continue
print "Operation: %s, Type: %s"%(mem_operation,mem_type)
print " Avg: %f, StDev: %f, Count: %d"%(numpy.average(rates),
numpy.std(rates),
len(rates))
rate_data[mem_operation] = rates
return rate_data
def aggregate_files(options):
# Get aggregate data from input 1
aggregate_data_sensor, data_rate_list1 = parse_dir(options.input_dir_sensor)
aggregate_data_base, data_rate_list2 = parse_dir(options.input_dir_base)
# Extract the data to graph
rate_data_sensor = extract_graph_data(aggregate_data_sensor)
rate_data_base = extract_graph_data(aggregate_data_base)
# f = open("tmp.csv", "w+")
# for x in range(100):
# tmp_list = []
# for y in range(len(rate_data_sensor)):
# print y,x
# tmp_list.append(str(rate_data_sensor[y][x]))
# f.write(",".join(tmp_list)+"\n")
#
# f.close()
# figure()
fig, ax1 = plt.subplots(figsize=(10,6))
plot_data_sensor = []
plot_data_base = []
labels = []
for mem_operation in ['SSE','MMX','INTEGER','FL-POINT']:
# Output medians
median_without = numpy.median(rate_data_base[mem_operation])
median_with = numpy.median(rate_data_sensor[mem_operation])
stdev_without = numpy.std(rate_data_base[mem_operation])
stdev_with = numpy.std(rate_data_sensor[mem_operation])
print " * %s: With: %f, Without: %f"%(mem_operation, median_with,
median_without)
print " * %s: With (Stdev): %f, Without (Stdev): %f"%(mem_operation,
stdev_with,
stdev_without)
prct_change = (median_without-median_with)/median_without
print " * %s: Percent Change: %f"%(mem_operation,prct_change)
labels.append(mem_operation)
plot_data_base += [rate_data_base[mem_operation]]
plot_data_sensor += [rate_data_sensor[mem_operation]]
index = numpy.arange(len(rate_data_sensor))+1
bar_width=.2
widths = numpy.ones(len(rate_data_sensor))*bar_width*2
print index
bp = plt.boxplot(plot_data_base,
positions=index-bar_width,
widths=widths,
sym='')
bp2 = plt.boxplot(plot_data_sensor,
positions=index+bar_width,
widths=widths,
sym='')
# Color bps
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='grey', marker='+')
plt.setp(bp2['boxes'], color='black')
plt.setp(bp2['whiskers'], color='black')
plt.setp(bp2['fliers'], color='grey', marker='+')
boxColors = ['white','grey']
numBoxes = len(rate_data_sensor)
medians = range(numBoxes)
for i in range(numBoxes):
# Box 1
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
# Alternate between Dark Khaki and Royal Blue
k = i % 2
boxPolygon = plt.Polygon(boxCoords, facecolor=boxColors[0])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Box 2
box = bp2['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = zip(boxX,boxY)
# Alternate between Dark Khaki and Royal Blue
boxPolygon = plt.Polygon(boxCoords, facecolor=boxColors[1])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp2['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
plt.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
plt.grid('on')
plt.xlim(0,len(labels)+1)
plt.xticks(index, labels)
plt.xlabel("Memory Operation Type", fontsize=20)
plt.ylabel("Memory Throughput (MB/sec)", fontsize=20)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
# plt.title(options.title)
# rate_data_base = [[]] + rate_data_base
# plt.boxplot(rate_data_base)
plt.figtext(0.15, 0.18, 'Uninstrumented' ,
backgroundcolor=boxColors[0], color='black', weight='roman',
size=15,
bbox=dict(facecolor=boxColors[0],
edgecolor='black',
boxstyle='round,pad=1'))
plt.figtext(0.38, 0.18, 'With Instrumentation',
backgroundcolor=boxColors[1],
color='white', weight='roman', size=15,
bbox=dict(facecolor=boxColors[1],
edgecolor='black',
boxstyle='round,pad=1'))
# plt.show()
plt.tight_layout()
plt.savefig(options.output_filename, format='eps', dpi=1000)
if __name__ == "__main__":
# Import our command line parser
args = argparse.ArgumentParser()
# args.add_argument("-t", "--target", action="store", type=str, default=None,
# help="Target for control sensor. (E.g. 172.20.1.20 or VMName)")
# Add any options we want here
args.add_argument("input_dir_sensor", action="store", type=str, default=None,
help="Directory with experiment output.")
args.add_argument("input_dir_base", action="store", type=str, default=None,
help="Directory with experiment output.")
args.add_argument("-t", "--title", action="store", type=str, default=None,
help="Title of graph")
args.add_argument("-o", "--output_filename", action="store", type=str,
default=None, help="Output filename")
# Get arguments
options = args.parse_args()
if options.input_dir_sensor is None or options.input_dir_base is None:
print "ERROR: Must provide input directory"
args.print_help()
sys.exit(0)
# if options.title is None:
# print "ERROR: Must provide a title."
# args.print_help()
# sys.exit(0)
if options.output_filename is None:
print "ERROR: Must provide an output filename."
args.print_help()
sys.exit(0)
aggregate_files(options) | bsd-3-clause |
alexsavio/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 81 | 5461 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x].astype(np.float64)
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
rohanp/scikit-learn | examples/tree/plot_tree_regression_multioutput.py | 22 | 1848 | """
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("data")
plt.ylabel("target")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
rosswhitfield/mantid | Framework/PythonInterface/test/python/plugins/algorithms/WorkflowAlgorithms/SavePlot1DTest.py | 3 | 3186 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest, os
from mantid import AnalysisDataServiceImpl, config, simpleapi
try:
import plotly
havePlotly = True
except ImportError:
havePlotly = False
# check if matplotlib is available and a new enough version
matplotlibissue = None # indicates there are no issues
try:
import matplotlib
from distutils.version import LooseVersion
if LooseVersion(matplotlib.__version__) < LooseVersion("1.2.0"):
matplotlibissue = 'Wrong version of matplotlib. Required >= 1.2.0'
matplotlib.use("agg")
import matplotlib.pyplot as plt
except:
matplotlibissue = 'Problem importing matplotlib'
class SavePlot1DTest(unittest.TestCase):
def makeWs(self):
simpleapi.CreateWorkspace(OutputWorkspace='test1', DataX='1,2,3,4,5,1,2,3,4,5', DataY='1,2,3,4,2,3,4,5',
DataE='1,2,3,4,2,3,4,5', NSpec='2', UnitX='dSpacing', Distribution='1', YUnitlabel="S(q)")
simpleapi.CreateWorkspace(OutputWorkspace='test2', DataX='1,2,3,4,5,1,2,3,4,5', DataY='1,2,3,4,2,3,4,5',
DataE='1,2,3,4,2,3,4,5', NSpec='2',
UnitX='Momentum', VerticalAxisUnit='TOF', VerticalAxisValues='1,2', Distribution='1',
YUnitLabel='E', WorkspaceTitle='x')
simpleapi.GroupWorkspaces("test1,test2", OutputWorkspace="group")
self.plotfile = os.path.join(config.getString('defaultsave.directory'), 'plot.png')
def cleanup(self):
ads = AnalysisDataServiceImpl.Instance()
ads.remove("group")
ads.remove("test1")
ads.remove("test2")
if os.path.exists(self.plotfile):
os.remove(self.plotfile)
@unittest.skipIf(matplotlibissue is not None, matplotlibissue)
def testPlotSingle(self):
self.makeWs()
simpleapi.SavePlot1D('test1', self.plotfile)
self.assertGreater(os.path.getsize(self.plotfile), 1e4)
self.cleanup()
@unittest.skipIf(matplotlibissue is not None, matplotlibissue)
def testPlotGroup(self):
self.makeWs()
simpleapi.SavePlot1D('group', self.plotfile)
self.assertGreater(os.path.getsize(self.plotfile), 1e4)
self.cleanup()
@unittest.skipIf(not havePlotly, 'Do not have plotly installed')
def testPlotlySingle(self):
self.makeWs()
div = simpleapi.SavePlot1D(InputWorkspace='test1', OutputType='plotly')
self.cleanup()
self.assertGreater(len(div), 0) # confirm result is non-empty
@unittest.skipIf(not havePlotly, 'Do not have plotly installed')
def testPlotlyGroup(self):
self.makeWs()
div = simpleapi.SavePlot1D(InputWorkspace='group', OutputType='plotly')
self.cleanup()
self.assertGreater(len(div), 0) # confirm result is non-empty
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
cmcantalupo/geopm | integration/test/test_profile_policy.py | 1 | 6589 | #!/usr/bin/env python
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import
import os
import sys
import pandas
import signal
import unittest
import subprocess
import geopmpy.launcher
import geopmpy.io
import geopmpy.policy_store
import util
@util.skip_unless_do_launch()
@util.skip_unless_batch()
class TestIntegrationProfilePolicy(unittest.TestCase):
def setUp(self):
# clean up stale keys
try:
os.unlink("/dev/shm/geopm*")
except:
pass
self._files = []
self.default_power_cap = 142
self.custom_power_cap = 152
# file name must match name in .cpp file
script_dir = os.path.dirname(os.path.realpath(__file__))
exe_path = os.path.join(script_dir, '.libs', 'test_profile_policy')
policy_db_path = os.path.join(script_dir, "policystore.db")
self._files.append(policy_db_path)
geopmpy.policy_store.connect(policy_db_path)
geopmpy.policy_store.set_default("power_balancer",
[self.default_power_cap])
geopmpy.policy_store.set_best("power_balancer", "power_custom",
[self.custom_power_cap, 0, 0, 0])
geopmpy.policy_store.disconnect()
# common run parameters
self._num_rank = 1
self._num_node = 1
agent = 'power_balancer'
self._app_conf = geopmpy.io.BenchConf('test_profile_policy_app.config')
self._app_conf.append_region('sleep', 1.0)
self._app_conf.write()
self._files.append(self._app_conf.get_path())
# must match prefix in .cpp file
endpoint_prefix = '/geopm_endpoint_profile_policy_test'
# test launcher sets profile, have to use real launcher for now
self._argv = ['dummy', 'srun',
'--geopm-endpoint', endpoint_prefix,
'--geopm-agent', agent,
'--geopm-timeout', "5"]
# this must be launched on the same node as the root controller
# for now limit test to one node
self._endpoint_mgr = subprocess.Popen([exe_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def tearDown(self):
# kill policy handler in case it didn't exit on its own
self._endpoint_mgr.kill()
sys.stdout.write(self._endpoint_mgr.stdout.read().decode())
sys.stdout.write(self._endpoint_mgr.stderr.read().decode())
# clean up shared memory
try:
os.unlink("/dev/shm/geopm*")
except:
pass
@util.skip_unless_config_enable('beta')
def test_policy_default(self):
profile = 'unknown'
report_path = profile + '.report'
policy_trace = profile + '.trace-policy'
self._files.append(report_path)
self._files.append(policy_trace)
self._argv.extend(['--geopm-profile', profile])
self._argv.extend(['--geopm-trace-endpoint-policy', policy_trace])
self._argv.extend(['--geopm-report', report_path])
self._argv.append(self._app_conf.get_exec_path())
self._argv.extend(self._app_conf.get_exec_args())
launcher = geopmpy.launcher.Factory().create(self._argv, self._num_rank, self._num_node)
launcher.run()
report_data = geopmpy.io.RawReport(report_path).meta_data()
self.assertEqual(report_data['Profile'], profile)
policy = report_data['Policy']
self.assertEqual(policy, 'DYNAMIC')
# check profile trace for single line with this power cap
csv_data = pandas.read_csv(policy_trace, delimiter='|', comment='#')
self.assertEqual(csv_data['POWER_PACKAGE_LIMIT_TOTAL'][0], self.default_power_cap)
@util.skip_unless_config_enable('beta')
def test_policy_custom(self):
profile = 'power_custom'
report_path = profile + '.report'
policy_trace = profile + '.trace-policy'
self._files.append(report_path)
self._files.append(policy_trace)
self._argv.extend(['--geopm-profile', profile])
self._argv.extend(['--geopm-trace-endpoint-policy', policy_trace])
self._argv.extend(['--geopm-report', report_path])
self._argv.append(self._app_conf.get_exec_path())
self._argv.extend(self._app_conf.get_exec_args())
launcher = geopmpy.launcher.Factory().create(self._argv, self._num_rank, self._num_node)
launcher.run()
report_data = geopmpy.io.RawReport(report_path).meta_data()
self.assertEqual(report_data['Profile'], profile)
policy = report_data['Policy']
self.assertEqual(policy, 'DYNAMIC')
# check profile trace for single line with this power cap
csv_data = pandas.read_csv(policy_trace, delimiter='|', comment='#')
self.assertEqual(csv_data['POWER_PACKAGE_LIMIT_TOTAL'][0], self.custom_power_cap)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
shangwuhencc/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
xyguo/scikit-learn | examples/calibration/plot_calibration_curve.py | 113 | 5904 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
BorisJeremic/Real-ESSI-Examples | analytic_solution/test_cases/Contact/Static_Normal_Contact/Monotonic_Loading/Two_Bar_Truss_With_Gap/kn_1e2/plot.py | 8 | 2153 | #!/usr/bin/python
import h5py
import matplotlib.pylab as plt
import sys
################ Node # 2 Displacement #############################
## Analytical Solution
finput = h5py.File('Analytical_Solution.feioutput')
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][3,:]
# Plot the figure. Add labels and titles.
plt.figure()
plt.plot(times,disp,'-r',label='Analytical Solution')
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
plt.hold(True)
## Current Solution
finput = h5py.File('Verification_Of_Static_Normal_Contact_Adding_Normal_Load.h5.feioutput')
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][3,:]
# Plot the figure. Add labels and titles.
plt.plot(times,disp,'-k',label='Numerical Solution')
# plt.grid(b=True, which='major', color='k', linestyle='-')
# plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
plt.legend()
plt.savefig('Node_2_Displacement', bbox_inches='tight')
#plt.show()
################ Node # 3 Displacement #############################
## Analytical Solution
finput = h5py.File('Analytical_Solution.feioutput')
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][6,:]
# Plot the figure. Add labels and titles.
plt.figure()
plt.plot(times,disp,'-r',label='Analytical Solution')
plt.hold(True)
## Current Solution
finput = h5py.File('Verification_Of_Static_Normal_Contact_Adding_Normal_Load.h5.feioutput')
# Read the time and displacement
times = finput["time"][:]
disp = finput["/Model/Nodes/Generalized_Displacements"][6,:]
# Plot the figure. Add labels and titles.
plt.plot(times,disp,'-k',label='Numerical Solution')
# plt.grid(b=True, which='major', color='k', linestyle='-')
# plt.grid(b=True, which='minor', color='r', linestyle='-', alpha=0.2)
plt.minorticks_on()
plt.xlabel("Time [s] ")
plt.ylabel("Displacement [m] ")
plt.legend()
plt.savefig('Node_3_Displacement', bbox_inches='tight')
#plt.show()
| cc0-1.0 |
NunoEdgarGub1/scikit-learn | sklearn/datasets/mlcomp.py | 289 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
nikste/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/feeding_queue_runner_test.py | 12 | 5278 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests `FeedingQueueRunner` using arrays and `DataFrames`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions as ff
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class FeedingQueueRunnerTestCase(test.TestCase):
"""Tests for `FeedingQueueRunner`."""
def testArrayFeeding(self):
with ops.Graph().as_default():
array = np.arange(32).reshape([16, 2])
q = ff.enqueue_data(array, capacity=100)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_dq = get_rows(array, indices)
dq = sess.run(dq_op)
np.testing.assert_array_equal(indices, dq[0])
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testArrayFeedingMultiThread(self):
with ops.Graph().as_default():
array = np.arange(256).reshape([128, 2])
q = ff.enqueue_data(array, capacity=128, num_threads=8, shuffle=True)
batch_size = 3
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_dq = get_rows(array, indices)
np.testing.assert_array_equal(expected_dq, dq[1])
coord.request_stop()
coord.join(threads)
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff.enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
def testPandasFeedingMultiThread(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(128, 256)
array2 = 2 * array1
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(128))
q = ff.enqueue_data(df, capacity=128, num_threads=8, shuffle=True)
batch_size = 5
dq_op = q.dequeue_many(batch_size)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for _ in range(100):
dq = sess.run(dq_op)
indices = dq[0]
expected_rows = df.iloc[indices]
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
OshynSong/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 213 | 11911 | import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn import grid_search
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0]+1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: np.mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: np.median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = grid_search.GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_true(np.all(X == Xt))
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_true(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
xubenben/scikit-learn | sklearn/utils/multiclass.py | 83 | 12343 |
# Author: Arnaud Joly, Joel Nothman, Hamzeh Alsalhi
#
# License: BSD 3 clause
"""
Multi-class / multi-label utility function
==========================================
"""
from __future__ import division
from collections import Sequence
from itertools import chain
from scipy.sparse import issparse
from scipy.sparse.base import spmatrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
import numpy as np
from ..externals.six import string_types
from .validation import check_array
from ..utils.fixes import bincount
def _unique_multiclass(y):
if hasattr(y, '__array__'):
return np.unique(np.asarray(y))
else:
return set(y)
def _unique_indicator(y):
return np.arange(check_array(y, ['csr', 'csc', 'coo']).shape[1])
_FN_UNIQUE_LABELS = {
'binary': _unique_multiclass,
'multiclass': _unique_multiclass,
'multilabel-indicator': _unique_indicator,
}
def unique_labels(*ys):
"""Extract an ordered array of unique labels
We don't allow:
- mix of multilabel and multiclass (single label) targets
- mix of label indicator matrix and anything else,
because there are no explicit labels)
- mix of label indicator matrices of different sizes
- mix of string and integer labels
At the moment, we also don't allow "multiclass-multioutput" input type.
Parameters
----------
*ys : array-likes,
Returns
-------
out : numpy array of shape [n_unique_labels]
An ordered array of unique labels.
Examples
--------
>>> from sklearn.utils.multiclass import unique_labels
>>> unique_labels([3, 5, 5, 5, 7, 7])
array([3, 5, 7])
>>> unique_labels([1, 2, 3, 4], [2, 2, 3, 4])
array([1, 2, 3, 4])
>>> unique_labels([1, 2, 10], [5, 11])
array([ 1, 2, 5, 10, 11])
"""
if not ys:
raise ValueError('No argument has been passed.')
# Check that we don't mix label format
ys_types = set(type_of_target(x) for x in ys)
if ys_types == set(["binary", "multiclass"]):
ys_types = set(["multiclass"])
if len(ys_types) > 1:
raise ValueError("Mix type of y not allowed, got types %s" % ys_types)
label_type = ys_types.pop()
# Check consistency for the indicator format
if (label_type == "multilabel-indicator" and
len(set(check_array(y, ['csr', 'csc', 'coo']).shape[1]
for y in ys)) > 1):
raise ValueError("Multi-label binary indicator input with "
"different numbers of labels")
# Get the unique set of labels
_unique_labels = _FN_UNIQUE_LABELS.get(label_type, None)
if not _unique_labels:
raise ValueError("Unknown label type: %s" % repr(ys))
ys_labels = set(chain.from_iterable(_unique_labels(y) for y in ys))
# Check that we don't mix string type with number type
if (len(set(isinstance(label, string_types) for label in ys_labels)) > 1):
raise ValueError("Mix of label input types (string and number)")
return np.array(sorted(ys_labels))
def _is_integral_float(y):
return y.dtype.kind == 'f' and np.all(y.astype(int) == y)
def is_multilabel(y):
""" Check if ``y`` is in a multilabel format.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
out : bool,
Return ``True``, if ``y`` is in a multilabel format, else ```False``.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.multiclass import is_multilabel
>>> is_multilabel([0, 1, 0, 1])
False
>>> is_multilabel([[1], [0, 2], []])
False
>>> is_multilabel(np.array([[1, 0], [0, 0]]))
True
>>> is_multilabel(np.array([[1], [0], [0]]))
False
>>> is_multilabel(np.array([[1, 0, 0]]))
True
"""
if hasattr(y, '__array__'):
y = np.asarray(y)
if not (hasattr(y, "shape") and y.ndim == 2 and y.shape[1] > 1):
return False
if issparse(y):
if isinstance(y, (dok_matrix, lil_matrix)):
y = y.tocsr()
return (len(y.data) == 0 or np.ptp(y.data) == 0 and
(y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(np.unique(y.data))))
else:
labels = np.unique(y)
return len(labels) < 3 and (y.dtype.kind in 'biu' or # bool, int, uint
_is_integral_float(labels))
def type_of_target(y):
"""Determine the type of data indicated by target `y`
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multiclass-multioutput'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, string_types))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], string_types)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], string_types)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' # [1, 2] or [["a"], ["b"]]
def _check_partial_fit_first_call(clf, classes=None):
"""Private helper function for factorizing common classes param logic
Estimators that implement the ``partial_fit`` API need to be provided with
the list of possible classes at the first call to partial_fit.
Subsequent calls to partial_fit should check that ``classes`` is still
consistent with a previous value of ``clf.classes_`` when provided.
This function returns True if it detects that this was the first call to
``partial_fit`` on ``clf``. In that case the ``classes_`` attribute is also
set on ``clf``.
"""
if getattr(clf, 'classes_', None) is None and classes is None:
raise ValueError("classes must be passed on the first call "
"to partial_fit.")
elif classes is not None:
if getattr(clf, 'classes_', None) is not None:
if not np.all(clf.classes_ == unique_labels(classes)):
raise ValueError(
"`classes=%r` is not the same as on last call "
"to partial_fit, was: %r" % (classes, clf.classes_))
else:
# This is the first call to partial_fit
clf.classes_ = unique_labels(classes)
return True
# classes is None and clf.classes_ has already previously been set:
# nothing to do
return False
def class_distribution(y, sample_weight=None):
"""Compute class priors from multioutput-multiclass target data
Parameters
----------
y : array like or sparse matrix of size (n_samples, n_outputs)
The labels for each example.
sample_weight : array-like of shape = (n_samples,), optional
Sample weights.
Returns
-------
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
n_classes : list of integrs of size n_outputs
Number of classes in each column
class_prior : list of size n_outputs of arrays of size (n_classes,)
Class distribution of each column.
"""
classes = []
n_classes = []
class_prior = []
n_samples, n_outputs = y.shape
if issparse(y):
y = y.tocsc()
y_nnz = np.diff(y.indptr)
for k in range(n_outputs):
col_nonzero = y.indices[y.indptr[k]:y.indptr[k + 1]]
# separate sample weights for zero and non-zero elements
if sample_weight is not None:
nz_samp_weight = np.asarray(sample_weight)[col_nonzero]
zeros_samp_weight_sum = (np.sum(sample_weight) -
np.sum(nz_samp_weight))
else:
nz_samp_weight = None
zeros_samp_weight_sum = y.shape[0] - y_nnz[k]
classes_k, y_k = np.unique(y.data[y.indptr[k]:y.indptr[k + 1]],
return_inverse=True)
class_prior_k = bincount(y_k, weights=nz_samp_weight)
# An explicit zero was found, combine its wieght with the wieght
# of the implicit zeros
if 0 in classes_k:
class_prior_k[classes_k == 0] += zeros_samp_weight_sum
# If an there is an implict zero and it is not in classes and
# class_prior, make an entry for it
if 0 not in classes_k and y_nnz[k] < y.shape[0]:
classes_k = np.insert(classes_k, 0, 0)
class_prior_k = np.insert(class_prior_k, 0,
zeros_samp_weight_sum)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior.append(class_prior_k / class_prior_k.sum())
else:
for k in range(n_outputs):
classes_k, y_k = np.unique(y[:, k], return_inverse=True)
classes.append(classes_k)
n_classes.append(classes_k.shape[0])
class_prior_k = bincount(y_k, weights=sample_weight)
class_prior.append(class_prior_k / class_prior_k.sum())
return (classes, n_classes, class_prior)
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/tsa/vector_ar/dynamic.py | 27 | 9932 | # pylint: disable=W0201
from statsmodels.compat.python import iteritems, string_types, range
import numpy as np
from statsmodels.tools.decorators import cache_readonly
import pandas as pd
from . import var_model as _model
from . import util
from . import plotting
FULL_SAMPLE = 0
ROLLING = 1
EXPANDING = 2
def _get_window_type(window_type):
if window_type in (FULL_SAMPLE, ROLLING, EXPANDING):
return window_type
elif isinstance(window_type, string_types):
window_type_up = window_type.upper()
if window_type_up in ('FULL SAMPLE', 'FULL_SAMPLE'):
return FULL_SAMPLE
elif window_type_up == 'ROLLING':
return ROLLING
elif window_type_up == 'EXPANDING':
return EXPANDING
raise Exception('Unrecognized window type: %s' % window_type)
class DynamicVAR(object):
"""
Estimates time-varying vector autoregression (VAR(p)) using
equation-by-equation least squares
Parameters
----------
data : pandas.DataFrame
lag_order : int, default 1
window : int
window_type : {'expanding', 'rolling'}
min_periods : int or None
Minimum number of observations to require in window, defaults to window
size if None specified
trend : {'c', 'nc', 'ct', 'ctt'}
TODO
Returns
-------
**Attributes**:
coefs : WidePanel
items : coefficient names
major_axis : dates
minor_axis : VAR equation names
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.names = list(data.columns)
self.neqs = len(self.names)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _set_window(self, window_type, window, min_periods):
self._window_type = _get_window_type(window_type)
if self._is_rolling:
if window is None:
raise Exception('Must pass window when doing rolling '
'regression')
if min_periods is None:
min_periods = window
else:
window = len(self.x)
if min_periods is None:
min_periods = 1
self._window = int(window)
self._min_periods = min_periods
@cache_readonly
def T(self):
"""
Number of time periods in results
"""
return len(self.result_index)
@property
def nobs(self):
# Stub, do I need this?
data = dict((eq, r.nobs) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
@cache_readonly
def equations(self):
eqs = {}
for col, ts in iteritems(self.y):
model = pd.ols(y=ts, x=self.x, window=self._window,
window_type=self._window_type,
min_periods=self._min_periods)
eqs[col] = model
return eqs
@cache_readonly
def coefs(self):
"""
Return dynamic regression coefficients as WidePanel
"""
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.beta
panel = pd.WidePanel.fromDict(data)
# Coefficient names become items
return panel.swapaxes('items', 'minor')
@property
def result_index(self):
return self.coefs.major_axis
@cache_readonly
def _coefs_raw(self):
"""
Reshape coefficients to be more amenable to dynamic calculations
Returns
-------
coefs : (time_periods x lag_order x neqs x neqs)
"""
coef_panel = self.coefs.copy()
del coef_panel['intercept']
coef_values = coef_panel.swapaxes('items', 'major').values
coef_values = coef_values.reshape((len(coef_values),
self.lag_order,
self.neqs, self.neqs))
return coef_values
@cache_readonly
def _intercepts_raw(self):
"""
Similar to _coefs_raw, return intercept values in easy-to-use matrix
form
Returns
-------
intercepts : (T x K)
"""
return self.coefs['intercept'].values
@cache_readonly
def resid(self):
data = {}
for eq, result in iteritems(self.equations):
data[eq] = result.resid
return pd.DataFrame(data)
def forecast(self, steps=1):
"""
Produce dynamic forecast
Parameters
----------
steps
Returns
-------
forecasts : pandas.DataFrame
"""
output = np.empty((self.T - steps, self.neqs))
y_values = self.y.values
y_index_map = dict((d, idx) for idx, d in enumerate(self.y.index))
result_index_map = dict((d, idx) for idx, d in enumerate(self.result_index))
coefs = self._coefs_raw
intercepts = self._intercepts_raw
# can only produce this many forecasts
forc_index = self.result_index[steps:]
for i, date in enumerate(forc_index):
# TODO: check that this does the right thing in weird cases...
idx = y_index_map[date] - steps
result_idx = result_index_map[date] - steps
y_slice = y_values[:idx]
forcs = _model.forecast(y_slice, coefs[result_idx],
intercepts[result_idx], steps)
output[i] = forcs[-1]
return pd.DataFrame(output, index=forc_index, columns=self.names)
def plot_forecast(self, steps=1, figsize=(10, 10)):
"""
Plot h-step ahead forecasts against actual realizations of time
series. Note that forecasts are lined up with their respective
realizations.
Parameters
----------
steps :
"""
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=figsize, nrows=self.neqs,
sharex=True)
forc = self.forecast(steps=steps)
dates = forc.index
y_overlay = self.y.reindex(dates)
for i, col in enumerate(forc.columns):
ax = axes[i]
y_ts = y_overlay[col]
forc_ts = forc[col]
y_handle = ax.plot(dates, y_ts.values, 'k.', ms=2)
forc_handle = ax.plot(dates, forc_ts.values, 'k-')
fig.legend((y_handle, forc_handle), ('Y', 'Forecast'))
fig.autofmt_xdate()
fig.suptitle('Dynamic %d-step forecast' % steps)
# pretty things up a bit
plotting.adjust_subplots(bottom=0.15, left=0.10)
plt.draw_if_interactive()
@property
def _is_rolling(self):
return self._window_type == ROLLING
@cache_readonly
def r2(self):
"""Returns the r-squared values."""
data = dict((eq, r.r2) for eq, r in iteritems(self.equations))
return pd.DataFrame(data)
class DynamicPanelVAR(DynamicVAR):
"""
Dynamic (time-varying) panel vector autoregression using panel ordinary
least squares
Parameters
----------
"""
def __init__(self, data, lag_order=1, window=None, window_type='expanding',
trend='c', min_periods=None):
self.lag_order = lag_order
self.neqs = len(data.columns)
self._y_orig = data
# TODO: deal with trend
self._x_orig = _make_lag_matrix(data, lag_order)
self._x_orig['intercept'] = 1
(self.y, self.x, self.x_filtered, self._index,
self._time_has_obs) = _filter_data(self._y_orig, self._x_orig)
self.lag_order = lag_order
self.trendorder = util.get_trendorder(trend)
self._set_window(window_type, window, min_periods)
def _filter_data(lhs, rhs):
"""
Data filtering routine for dynamic VAR
lhs : DataFrame
original data
rhs : DataFrame
lagged variables
Returns
-------
"""
def _has_all_columns(df):
return np.isfinite(df.values).sum(1) == len(df.columns)
rhs_valid = _has_all_columns(rhs)
if not rhs_valid.all():
pre_filtered_rhs = rhs[rhs_valid]
else:
pre_filtered_rhs = rhs
index = lhs.index.union(rhs.index)
if not index.equals(rhs.index) or not index.equals(lhs.index):
rhs = rhs.reindex(index)
lhs = lhs.reindex(index)
rhs_valid = _has_all_columns(rhs)
lhs_valid = _has_all_columns(lhs)
valid = rhs_valid & lhs_valid
if not valid.all():
filt_index = rhs.index[valid]
filtered_rhs = rhs.reindex(filt_index)
filtered_lhs = lhs.reindex(filt_index)
else:
filtered_rhs, filtered_lhs = rhs, lhs
return filtered_lhs, filtered_rhs, pre_filtered_rhs, index, valid
def _make_lag_matrix(x, lags):
data = {}
columns = []
for i in range(1, 1 + lags):
lagstr = 'L%d.'% i
lag = x.shift(i).rename(columns=lambda c: lagstr + c)
data.update(lag._series)
columns.extend(lag.columns)
return pd.DataFrame(data, columns=columns)
class Equation(object):
"""
Stub, estimate one equation
"""
def __init__(self, y, x):
pass
if __name__ == '__main__':
import pandas.util.testing as ptest
ptest.N = 500
data = ptest.makeTimeDataFrame().cumsum(0)
var = DynamicVAR(data, lag_order=2, window_type='expanding')
var2 = DynamicVAR(data, lag_order=2, window=10,
window_type='rolling')
| bsd-3-clause |
zorojean/scikit-learn | sklearn/svm/classes.py | 126 | 40114 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive'
(default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') ecision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
mahak/spark | python/pyspark/sql/tests/test_arrow.py | 15 | 27974 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import os
import threading
import time
import unittest
import warnings
from distutils.version import LooseVersion
from pyspark import SparkContext, SparkConf
from pyspark.sql import Row, SparkSession
from pyspark.sql.functions import rand, udf
from pyspark.sql.types import StructType, StringType, IntegerType, LongType, \
FloatType, DoubleType, DecimalType, DateType, TimestampType, BinaryType, StructField, \
ArrayType, NullType
from pyspark.testing.sqlutils import ReusedSQLTestCase, have_pandas, have_pyarrow, \
pandas_requirement_message, pyarrow_requirement_message
from pyspark.testing.utils import QuietTest
if have_pandas:
import pandas as pd
from pandas.testing import assert_frame_equal
if have_pyarrow:
import pyarrow as pa # noqa: F401
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class ArrowTests(ReusedSQLTestCase):
@classmethod
def setUpClass(cls):
from datetime import date, datetime
from decimal import Decimal
super(ArrowTests, cls).setUpClass()
cls.warnings_lock = threading.Lock()
# Synchronize default timezone between Python and Java
cls.tz_prev = os.environ.get("TZ", None) # save current tz if set
tz = "America/Los_Angeles"
os.environ["TZ"] = tz
time.tzset()
cls.spark.conf.set("spark.sql.session.timeZone", tz)
# Test fallback
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "false"
cls.spark.conf.set("spark.sql.execution.arrow.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "true")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "true"
cls.spark.conf.set("spark.sql.execution.arrow.fallback.enabled", "false")
assert cls.spark.conf.get("spark.sql.execution.arrow.pyspark.fallback.enabled") == "false"
# Enable Arrow optimization in this tests.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Disable fallback by default to easily detect the failures.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
cls.schema_wo_null = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True),
StructField("6_decimal_t", DecimalType(38, 18), True),
StructField("7_date_t", DateType(), True),
StructField("8_timestamp_t", TimestampType(), True),
StructField("9_binary_t", BinaryType(), True)])
cls.schema = cls.schema_wo_null.add("10_null_t", NullType(), True)
cls.data_wo_null = [
(u"a", 1, 10, 0.2, 2.0, Decimal("2.0"),
date(1969, 1, 1), datetime(1969, 1, 1, 1, 1, 1), bytearray(b"a")),
(u"b", 2, 20, 0.4, 4.0, Decimal("4.0"),
date(2012, 2, 2), datetime(2012, 2, 2, 2, 2, 2), bytearray(b"bb")),
(u"c", 3, 30, 0.8, 6.0, Decimal("6.0"),
date(2100, 3, 3), datetime(2100, 3, 3, 3, 3, 3), bytearray(b"ccc")),
(u"d", 4, 40, 1.0, 8.0, Decimal("8.0"),
date(2262, 4, 12), datetime(2262, 3, 3, 3, 3, 3), bytearray(b"dddd")),
]
cls.data = [tuple(list(d) + [None]) for d in cls.data_wo_null]
@classmethod
def tearDownClass(cls):
del os.environ["TZ"]
if cls.tz_prev is not None:
os.environ["TZ"] = cls.tz_prev
time.tzset()
super(ArrowTests, cls).tearDownClass()
def create_pandas_data_frame(self):
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
return pd.DataFrame(data=data_dict)
def test_toPandas_fallback_enabled(self):
ts = datetime.datetime(2015, 11, 1, 0, 30)
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
schema = StructType([StructField("a", ArrayType(TimestampType()), True)])
df = self.spark.createDataFrame([([ts],)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
pdf = df.toPandas()
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in str(user_warns[-1]))
assert_frame_equal(pdf, pd.DataFrame({"a": [[ts]]}))
def test_toPandas_fallback_disabled(self):
schema = StructType([StructField("a", ArrayType(TimestampType()), True)])
df = self.spark.createDataFrame([(None,)], schema=schema)
with QuietTest(self.sc):
with self.warnings_lock:
with self.assertRaisesRegex(Exception, 'Unsupported type'):
df.toPandas()
def test_null_conversion(self):
df_null = self.spark.createDataFrame(
[tuple([None for _ in range(len(self.data_wo_null[0]))])] + self.data_wo_null)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def _toPandas_arrow_toggle(self, df):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
pdf = df.toPandas()
pdf_arrow = df.toPandas()
return pdf, pdf_arrow
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
expected = self.create_pandas_data_frame()
assert_frame_equal(expected, pdf)
assert_frame_equal(expected, pdf_arrow)
def test_toPandas_respect_session_timezone(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_la, pdf_arrow_la = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_la, pdf_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
pdf_ny, pdf_arrow_ny = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow_ny, pdf_ny)
self.assertFalse(pdf_ny.equals(pdf_la))
from pyspark.sql.pandas.types import _check_series_convert_timestamps_local_tz
pdf_la_corrected = pdf_la.copy()
for field in self.schema:
if isinstance(field.dataType, TimestampType):
pdf_la_corrected[field.name] = _check_series_convert_timestamps_local_tz(
pdf_la_corrected[field.name], timezone)
assert_frame_equal(pdf_ny, pdf_la_corrected)
def test_pandas_round_trip(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_pandas_self_destruct(self):
import pyarrow as pa
rows = 2 ** 10
cols = 4
expected_bytes = rows * cols * 8
df = self.spark.range(0, rows).select(*[rand() for _ in range(cols)])
# Test the self_destruct behavior by testing _collect_as_arrow directly
allocation_before = pa.total_allocated_bytes()
batches = df._collect_as_arrow(split_batches=True)
table = pa.Table.from_batches(batches)
del batches
pdf_split = table.to_pandas(self_destruct=True, split_blocks=True, use_threads=False)
allocation_after = pa.total_allocated_bytes()
difference = allocation_after - allocation_before
# Should be around 1x the data size (table should not hold on to any memory)
self.assertGreaterEqual(difference, 0.9 * expected_bytes)
self.assertLessEqual(difference, 1.1 * expected_bytes)
with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": False}):
no_self_destruct_pdf = df.toPandas()
# Note while memory usage is 2x data size here (both table and pdf hold on to
# memory), in this case Arrow still only tracks 1x worth of memory (since the
# batches are not allocated by Arrow in this case), so we can't make any
# assertions here
with self.sql_conf({"spark.sql.execution.arrow.pyspark.selfDestruct.enabled": True}):
self_destruct_pdf = df.toPandas()
assert_frame_equal(pdf_split, no_self_destruct_pdf)
assert_frame_equal(pdf_split, self_destruct_pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
def test_no_partition_frame(self):
schema = StructType([StructField("field1", StringType(), True)])
df = self.spark.createDataFrame(self.sc.emptyRDD(), schema)
pdf = df.toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "field1")
self.assertTrue(pdf.empty)
def test_propagates_spark_exception(self):
df = self.spark.range(3).toDF("i")
def raise_exception():
raise RuntimeError("My error")
exception_udf = udf(raise_exception, IntegerType())
df = df.withColumn("error", exception_udf())
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, 'My error'):
df.toPandas()
def _createDataFrame_toggle(self, pdf, schema=None):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df_no_arrow = self.spark.createDataFrame(pdf, schema=schema)
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
return df_no_arrow, df_arrow
def test_createDataFrame_toggle(self):
pdf = self.create_pandas_data_frame()
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf, schema=self.schema)
self.assertEqual(df_no_arrow.collect(), df_arrow.collect())
def test_createDataFrame_respect_session_timezone(self):
from datetime import timedelta
pdf = self.create_pandas_data_frame()
timezone = "America/Los_Angeles"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_la, df_arrow_la = self._createDataFrame_toggle(pdf, schema=self.schema)
result_la = df_no_arrow_la.collect()
result_arrow_la = df_arrow_la.collect()
self.assertEqual(result_la, result_arrow_la)
timezone = "America/New_York"
with self.sql_conf({"spark.sql.session.timeZone": timezone}):
df_no_arrow_ny, df_arrow_ny = self._createDataFrame_toggle(pdf, schema=self.schema)
result_ny = df_no_arrow_ny.collect()
result_arrow_ny = df_arrow_ny.collect()
self.assertEqual(result_ny, result_arrow_ny)
self.assertNotEqual(result_ny, result_la)
# Correct result_la by adjusting 3 hours difference between Los Angeles and New York
result_la_corrected = [Row(**{k: v - timedelta(hours=3) if k == '8_timestamp_t' else v
for k, v in row.asDict().items()})
for row in result_la]
self.assertEqual(result_ny, result_la_corrected)
def test_createDataFrame_with_schema(self):
pdf = self.create_pandas_data_frame()
df = self.spark.createDataFrame(pdf, schema=self.schema)
self.assertEqual(self.schema, df.schema)
pdf_arrow = df.toPandas()
assert_frame_equal(pdf_arrow, pdf)
def test_createDataFrame_with_incorrect_schema(self):
pdf = self.create_pandas_data_frame()
fields = list(self.schema)
fields[5], fields[6] = fields[6], fields[5] # swap decimal with date
wrong_schema = StructType(fields)
with self.sql_conf({"spark.sql.execution.pandas.convertToArrowArraySafely": False}):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "[D|d]ecimal.*got.*date"):
self.spark.createDataFrame(pdf, schema=wrong_schema)
def test_createDataFrame_with_names(self):
pdf = self.create_pandas_data_frame()
new_names = list(map(str, range(len(self.schema.fieldNames()))))
# Test that schema as a list of column names gets applied
df = self.spark.createDataFrame(pdf, schema=list(new_names))
self.assertEqual(df.schema.fieldNames(), new_names)
# Test that schema as tuple of column names gets applied
df = self.spark.createDataFrame(pdf, schema=tuple(new_names))
self.assertEqual(df.schema.fieldNames(), new_names)
def test_createDataFrame_column_name_encoding(self):
pdf = pd.DataFrame({u'a': [1]})
columns = self.spark.createDataFrame(pdf).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEqual(columns[0], 'a')
columns = self.spark.createDataFrame(pdf, [u'b']).columns
self.assertTrue(isinstance(columns[0], str))
self.assertEqual(columns[0], 'b')
def test_createDataFrame_with_single_data_type(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(ValueError, ".*IntegerType.*not supported.*"):
self.spark.createDataFrame(pd.DataFrame({"a": [1]}), schema="int")
def test_createDataFrame_does_not_modify_input(self):
# Some series get converted for Spark to consume, this makes sure input is unchanged
pdf = self.create_pandas_data_frame()
# Use a nanosecond value to make sure it is not truncated
pdf.iloc[0, 7] = pd.Timestamp(1)
# Integers with nulls will get NaNs filled with 0 and will be casted
pdf.iloc[1, 1] = None
pdf_copy = pdf.copy(deep=True)
self.spark.createDataFrame(pdf, schema=self.schema)
self.assertTrue(pdf.equals(pdf_copy))
def test_schema_conversion_roundtrip(self):
from pyspark.sql.pandas.types import from_arrow_schema, to_arrow_schema
arrow_schema = to_arrow_schema(self.schema)
schema_rt = from_arrow_schema(arrow_schema)
self.assertEqual(self.schema, schema_rt)
def test_createDataFrame_with_array_type(self):
pdf = pd.DataFrame({"a": [[1, 2], [3, 4]], "b": [[u"x", u"y"], [u"y", u"z"]]})
df, df_arrow = self._createDataFrame_toggle(pdf)
result = df.collect()
result_arrow = df_arrow.collect()
expected = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_toPandas_with_array_type(self):
expected = [([1, 2], [u"x", u"y"]), ([3, 4], [u"y", u"z"])]
array_schema = StructType([StructField("a", ArrayType(IntegerType())),
StructField("b", ArrayType(StringType()))])
df = self.spark.createDataFrame(expected, schema=array_schema)
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
result = [tuple(list(e) for e in rec) for rec in pdf.to_records(index=False)]
result_arrow = [tuple(list(e) for e in rec) for rec in pdf_arrow.to_records(index=False)]
for r in range(len(expected)):
for e in range(len(expected[r])):
self.assertTrue(expected[r][e] == result_arrow[r][e] and
result[r][e] == result_arrow[r][e])
def test_createDataFrame_with_map_type(self):
map_data = [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]
pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4], "m": map_data})
schema = "id long, m map<string, long>"
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema=schema)
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
self.spark.createDataFrame(pdf, schema=schema)
else:
df_arrow = self.spark.createDataFrame(pdf, schema=schema)
result = df.collect()
result_arrow = df_arrow.collect()
self.assertEqual(len(result), len(result_arrow))
for row, row_arrow in zip(result, result_arrow):
i, m = row
_, m_arrow = row_arrow
self.assertEqual(m, map_data[i])
self.assertEqual(m_arrow, map_data[i])
def test_toPandas_with_map_type(self):
pdf = pd.DataFrame({"id": [0, 1, 2, 3],
"m": [{}, {"a": 1}, {"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}]})
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>")
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
df.toPandas()
else:
pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow, pdf_non)
def test_toPandas_with_map_type_nulls(self):
pdf = pd.DataFrame({"id": [0, 1, 2, 3, 4],
"m": [{"a": 1}, {"b": 2, "c": 3}, {}, None, {"d": None}]})
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf, schema="id long, m map<string, long>")
if LooseVersion(pa.__version__) < LooseVersion("2.0.0"):
with QuietTest(self.sc):
with self.assertRaisesRegex(Exception, "MapType.*only.*pyarrow 2.0.0"):
df.toPandas()
else:
pdf_non, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf_arrow, pdf_non)
def test_createDataFrame_with_int_col_names(self):
import numpy as np
pdf = pd.DataFrame(np.random.rand(4, 2))
df, df_arrow = self._createDataFrame_toggle(pdf)
pdf_col_names = [str(c) for c in pdf.columns]
self.assertEqual(pdf_col_names, df.columns)
self.assertEqual(pdf_col_names, df_arrow.columns)
def test_createDataFrame_fallback_enabled(self):
ts = datetime.datetime(2015, 11, 1, 0, 30)
with QuietTest(self.sc):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.fallback.enabled": True}):
with warnings.catch_warnings(record=True) as warns:
# we want the warnings to appear even if this test is run from a subclass
warnings.simplefilter("always")
df = self.spark.createDataFrame(
pd.DataFrame({"a": [[ts]]}), "a: array<timestamp>")
# Catch and check the last UserWarning.
user_warns = [
warn.message for warn in warns if isinstance(warn.message, UserWarning)]
self.assertTrue(len(user_warns) > 0)
self.assertTrue(
"Attempting non-optimization" in str(user_warns[-1]))
self.assertEqual(df.collect(), [Row(a=[ts])])
def test_createDataFrame_fallback_disabled(self):
with QuietTest(self.sc):
with self.assertRaisesRegex(TypeError, 'Unsupported type'):
self.spark.createDataFrame(
pd.DataFrame({"a": [[datetime.datetime(2015, 11, 1, 0, 30)]]}),
"a: array<timestamp>")
# Regression test for SPARK-23314
def test_timestamp_dst(self):
# Daylight saving time for Los Angeles for 2015 is Sun, Nov 1 at 2:00 am
dt = [datetime.datetime(2015, 11, 1, 0, 30),
datetime.datetime(2015, 11, 1, 1, 30),
datetime.datetime(2015, 11, 1, 2, 30)]
pdf = pd.DataFrame({'time': dt})
df_from_python = self.spark.createDataFrame(dt, 'timestamp').toDF('time')
df_from_pandas = self.spark.createDataFrame(pdf)
assert_frame_equal(pdf, df_from_python.toPandas())
assert_frame_equal(pdf, df_from_pandas.toPandas())
# Regression test for SPARK-28003
def test_timestamp_nat(self):
dt = [pd.NaT, pd.Timestamp('2019-06-11'), None] * 100
pdf = pd.DataFrame({'time': dt})
df_no_arrow, df_arrow = self._createDataFrame_toggle(pdf)
assert_frame_equal(pdf, df_no_arrow.toPandas())
assert_frame_equal(pdf, df_arrow.toPandas())
def test_toPandas_batch_order(self):
def delay_first_part(partition_index, iterator):
if partition_index == 0:
time.sleep(0.1)
return iterator
# Collects Arrow RecordBatches out of order in driver JVM then re-orders in Python
def run_test(num_records, num_parts, max_records, use_delay=False):
df = self.spark.range(num_records, numPartitions=num_parts).toDF("a")
if use_delay:
df = df.rdd.mapPartitionsWithIndex(delay_first_part).toDF()
with self.sql_conf({"spark.sql.execution.arrow.maxRecordsPerBatch": max_records}):
pdf, pdf_arrow = self._toPandas_arrow_toggle(df)
assert_frame_equal(pdf, pdf_arrow)
cases = [
(1024, 512, 2), # Use large num partitions for more likely collecting out of order
(64, 8, 2, True), # Use delay in first partition to force collecting out of order
(64, 64, 1), # Test single batch per partition
(64, 1, 64), # Test single partition, single batch
(64, 1, 8), # Test single partition, multiple batches
(30, 7, 2), # Test different sized partitions
]
for case in cases:
run_test(*case)
def test_createDateFrame_with_category_type(self):
pdf = pd.DataFrame({"A": [u"a", u"b", u"c", u"a"]})
pdf["B"] = pdf["A"].astype('category')
category_first_element = dict(enumerate(pdf['B'].cat.categories))[0]
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
arrow_df = self.spark.createDataFrame(pdf)
arrow_type = arrow_df.dtypes[1][1]
result_arrow = arrow_df.toPandas()
arrow_first_category_element = result_arrow["B"][0]
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
df = self.spark.createDataFrame(pdf)
spark_type = df.dtypes[1][1]
result_spark = df.toPandas()
spark_first_category_element = result_spark["B"][0]
assert_frame_equal(result_spark, result_arrow)
# ensure original category elements are string
self.assertIsInstance(category_first_element, str)
# spark data frame and arrow execution mode enabled data frame type must match pandas
self.assertEqual(spark_type, 'string')
self.assertEqual(arrow_type, 'string')
self.assertIsInstance(arrow_first_category_element, str)
self.assertIsInstance(spark_first_category_element, str)
def test_createDataFrame_with_float_index(self):
# SPARK-32098: float index should not produce duplicated or truncated Spark DataFrame
self.assertEqual(
self.spark.createDataFrame(
pd.DataFrame({'a': [1, 2, 3]}, index=[2., 3., 4.])).distinct().count(), 3)
def test_no_partition_toPandas(self):
# SPARK-32301: toPandas should work from a Spark DataFrame with no partitions
# Forward-ported from SPARK-32300.
pdf = self.spark.sparkContext.emptyRDD().toDF("col1 int").toPandas()
self.assertEqual(len(pdf), 0)
self.assertEqual(list(pdf.columns), ["col1"])
def test_createDataFrame_empty_partition(self):
pdf = pd.DataFrame({"c1": [1], "c2": ["string"]})
df = self.spark.createDataFrame(pdf)
self.assertEqual([Row(c1=1, c2='string')], df.collect())
self.assertGreater(self.spark.sparkContext.defaultParallelism, len(pdf))
@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message) # type: ignore
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
# is a static configuration to Spark context.
@classmethod
def setUpClass(cls):
cls.spark = SparkSession(SparkContext(
'local[4]', cls.__name__, conf=SparkConf().set("spark.driver.maxResultSize", "10k")))
# Explicitly enable Arrow and disable fallback.
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
cls.spark.conf.set("spark.sql.execution.arrow.pyspark.fallback.enabled", "false")
@classmethod
def tearDownClass(cls):
if hasattr(cls, "spark"):
cls.spark.stop()
def test_exception_by_max_results(self):
with self.assertRaisesRegex(Exception, "is bigger than"):
self.spark.range(0, 10000, 1, 100).toPandas()
class EncryptionArrowTests(ArrowTests):
@classmethod
def conf(cls):
return super(EncryptionArrowTests, cls).conf().set("spark.io.encryption.enabled", "true")
if __name__ == "__main__":
from pyspark.sql.tests.test_arrow import * # noqa: F401
try:
import xmlrunner # type: ignore
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
Clyde-fare/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
rajul/mne-python | mne/viz/tests/test_decoding.py | 10 | 3823 | # Authors: Denis Engemann <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: Simplified BSD
import os.path as op
import warnings
from nose.tools import assert_raises, assert_equals
import numpy as np
from mne.epochs import equalize_epoch_counts, concatenate_epochs
from mne.decoding import GeneralizationAcrossTime
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn, run_tests_if_main
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
warnings.simplefilter('always') # enable b/c these tests throw warnings
def _get_data(tmin=-0.2, tmax=0.5, event_id=dict(aud_l=1, vis_l=3),
event_id_gen=dict(aud_l=2, vis_l=4), test_times=None):
"""Aux function for testing GAT viz"""
gat = GeneralizationAcrossTime()
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg='mag', stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
decim = 30
# Test on time generalization within one condition
with warnings.catch_warnings(record=True):
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True, decim=decim)
epochs_list = [epochs[k] for k in event_id]
equalize_epoch_counts(epochs_list)
epochs = concatenate_epochs(epochs_list)
# Test default running
gat = GeneralizationAcrossTime(test_times=test_times)
gat.fit(epochs)
gat.score(epochs)
return gat
@requires_sklearn
def test_gat_plot_matrix():
"""Test GAT matrix plot"""
gat = _get_data()
gat.plot()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_diagonal():
"""Test GAT diagonal plot"""
gat = _get_data()
gat.plot_diagonal()
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_times():
"""Test GAT times plot"""
gat = _get_data()
# test one line
gat.plot_times(gat.train_times_['times'][0])
# test multiple lines
gat.plot_times(gat.train_times_['times'])
# test multiple colors
n_times = len(gat.train_times_['times'])
colors = np.tile(['r', 'g', 'b'],
int(np.ceil(n_times / 3)))[:n_times]
gat.plot_times(gat.train_times_['times'], color=colors)
# test invalid time point
assert_raises(ValueError, gat.plot_times, -1.)
# test float type
assert_raises(ValueError, gat.plot_times, 1)
assert_raises(ValueError, gat.plot_times, 'diagonal')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
def chance(ax):
return ax.get_children()[1].get_lines()[0].get_ydata()[0]
@requires_sklearn
def test_gat_chance_level():
"""Test GAT plot_times chance level"""
gat = _get_data()
ax = gat.plot_diagonal(chance=False)
ax = gat.plot_diagonal()
assert_equals(chance(ax), .5)
gat = _get_data(event_id=dict(aud_l=1, vis_l=3, aud_r=2, vis_r=4))
ax = gat.plot_diagonal()
assert_equals(chance(ax), .25)
ax = gat.plot_diagonal(chance=1.234)
assert_equals(chance(ax), 1.234)
assert_raises(ValueError, gat.plot_diagonal, chance='foo')
del gat.scores_
assert_raises(RuntimeError, gat.plot)
@requires_sklearn
def test_gat_plot_nonsquared():
"""Test GAT diagonal plot"""
gat = _get_data(test_times=dict(start=0.))
gat.plot()
ax = gat.plot_diagonal()
scores = ax.get_children()[1].get_lines()[2].get_ydata()
assert_equals(len(scores), len(gat.estimators_))
run_tests_if_main()
| bsd-3-clause |
plotly/python-api | packages/python/plotly/plotly/graph_objs/_image.py | 1 | 45495 | from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Image(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "image"
_valid_props = {
"colormodel",
"customdata",
"customdatasrc",
"dx",
"dy",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"meta",
"metasrc",
"name",
"opacity",
"stream",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"visible",
"x0",
"xaxis",
"y0",
"yaxis",
"z",
"zmax",
"zmin",
"zsrc",
}
# colormodel
# ----------
@property
def colormodel(self):
"""
Color model used to map the numerical color components
described in `z` into colors.
The 'colormodel' property is an enumeration that may be specified as:
- One of the following enumeration values:
['rgb', 'rgba', 'hsl', 'hsla']
Returns
-------
Any
"""
return self["colormodel"]
@colormodel.setter
def colormodel(self, val):
self["colormodel"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# dx
# --
@property
def dx(self):
"""
Set the pixel's horizontal size.
The 'dx' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dx"]
@dx.setter
def dx(self, val):
self["dx"] = val
# dy
# --
@property
def dy(self):
"""
Set the pixel's vertical size
The 'dy' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dy"]
@dy.setter
def dy(self, val):
self["dy"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y', 'z', 'color', 'name', 'text'] joined with '+' characters
(e.g. 'x+y')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.image.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.image.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `z`, `color` and `colormodel`. Anything contained in
tag `<extra>` is displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary box
completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.image.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.image.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets the text elements associated with each z value.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x0
# --
@property
def x0(self):
"""
Set the image's x position.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# y0
# --
@property
def y0(self):
"""
Set the image's y position.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# z
# -
@property
def z(self):
"""
A 2-dimensional array in which each element is an array of 3 or
4 numbers representing a color.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zmax
# ----
@property
def zmax(self):
"""
Array defining the higher bound for each color component. Note
that the default value will depend on the colormodel. For the
`rgb` colormodel, it is [255, 255, 255]. For the `rgba`
colormodel, it is [255, 255, 255, 1]. For the `hsl` colormodel,
it is [360, 100, 100]. For the `hsla` colormodel, it is [360,
100, 100, 1].
The 'zmax' property is an info array that may be specified as:
* a list or tuple of 4 elements where:
(0) The 'zmax[0]' property is a number and may be specified as:
- An int or float
(1) The 'zmax[1]' property is a number and may be specified as:
- An int or float
(2) The 'zmax[2]' property is a number and may be specified as:
- An int or float
(3) The 'zmax[3]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
# zmin
# ----
@property
def zmin(self):
"""
Array defining the lower bound for each color component. Note
that the default value will depend on the colormodel. For the
`rgb` colormodel, it is [0, 0, 0]. For the `rgba` colormodel,
it is [0, 0, 0, 0]. For the `hsl` colormodel, it is [0, 0, 0].
For the `hsla` colormodel, it is [0, 0, 0, 0].
The 'zmin' property is an info array that may be specified as:
* a list or tuple of 4 elements where:
(0) The 'zmin[0]' property is a number and may be specified as:
- An int or float
(1) The 'zmin[1]' property is a number and may be specified as:
- An int or float
(2) The 'zmin[2]' property is a number and may be specified as:
- An int or float
(3) The 'zmin[3]' property is a number and may be specified as:
- An int or float
Returns
-------
list
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
colormodel
Color model used to map the numerical color components
described in `z` into colors.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dx
Set the pixel's horizontal size.
dy
Set the pixel's vertical size
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.image.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `z`, `color` and `colormodel`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
stream
:class:`plotly.graph_objects.image.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with each z value.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x0
Set the image's x position.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
y0
Set the image's y position.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
z
A 2-dimensional array in which each element is an array
of 3 or 4 numbers representing a color.
zmax
Array defining the higher bound for each color
component. Note that the default value will depend on
the colormodel. For the `rgb` colormodel, it is [255,
255, 255]. For the `rgba` colormodel, it is [255, 255,
255, 1]. For the `hsl` colormodel, it is [360, 100,
100]. For the `hsla` colormodel, it is [360, 100, 100,
1].
zmin
Array defining the lower bound for each color
component. Note that the default value will depend on
the colormodel. For the `rgb` colormodel, it is [0, 0,
0]. For the `rgba` colormodel, it is [0, 0, 0, 0]. For
the `hsl` colormodel, it is [0, 0, 0]. For the `hsla`
colormodel, it is [0, 0, 0, 0].
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
colormodel=None,
customdata=None,
customdatasrc=None,
dx=None,
dy=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
visible=None,
x0=None,
xaxis=None,
y0=None,
yaxis=None,
z=None,
zmax=None,
zmin=None,
zsrc=None,
**kwargs
):
"""
Construct a new Image object
Display an image, i.e. data on a 2D regular raster. By default,
when an image is displayed in a subplot, its y axis will be
reversed (ie. `autorange: 'reversed'`), constrained to the
domain (ie. `constrain: 'domain'`) and it will have the same
scale as its x axis (ie. `scaleanchor: 'x,`) in order for
pixels to be rendered as squares.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Image`
colormodel
Color model used to map the numerical color components
described in `z` into colors.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dx
Set the pixel's horizontal size.
dy
Set the pixel's vertical size
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.image.Hoverlabel` instance
or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `z`, `color` and `colormodel`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
stream
:class:`plotly.graph_objects.image.Stream` instance or
dict with compatible properties
text
Sets the text elements associated with each z value.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
x0
Set the image's x position.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
y0
Set the image's y position.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
z
A 2-dimensional array in which each element is an array
of 3 or 4 numbers representing a color.
zmax
Array defining the higher bound for each color
component. Note that the default value will depend on
the colormodel. For the `rgb` colormodel, it is [255,
255, 255]. For the `rgba` colormodel, it is [255, 255,
255, 1]. For the `hsl` colormodel, it is [360, 100,
100]. For the `hsla` colormodel, it is [360, 100, 100,
1].
zmin
Array defining the lower bound for each color
component. Note that the default value will depend on
the colormodel. For the `rgb` colormodel, it is [0, 0,
0]. For the `rgba` colormodel, it is [0, 0, 0, 0]. For
the `hsl` colormodel, it is [0, 0, 0]. For the `hsla`
colormodel, it is [0, 0, 0, 0].
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Image
"""
super(Image, self).__init__("image")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Image
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Image`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("colormodel", None)
_v = colormodel if colormodel is not None else _v
if _v is not None:
self["colormodel"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dx", None)
_v = dx if dx is not None else _v
if _v is not None:
self["dx"] = _v
_v = arg.pop("dy", None)
_v = dy if dy is not None else _v
if _v is not None:
self["dy"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x0", None)
_v = x0 if x0 is not None else _v
if _v is not None:
self["x0"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("y0", None)
_v = y0 if y0 is not None else _v
if _v is not None:
self["y0"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zmax", None)
_v = zmax if zmax is not None else _v
if _v is not None:
self["zmax"] = _v
_v = arg.pop("zmin", None)
_v = zmin if zmin is not None else _v
if _v is not None:
self["zmin"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "image"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
Djabbz/scikit-learn | sklearn/datasets/tests/test_mldata.py | 384 | 5221 | """Test functionality of mldata fetching utilities."""
import os
import shutil
import tempfile
import scipy as sp
from sklearn import datasets
from sklearn.datasets import mldata_filename, fetch_mldata
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import mock_mldata_urlopen
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import with_setup
from sklearn.utils.testing import assert_array_equal
tmpdir = None
def setup_tmpdata():
# create temporary dir
global tmpdir
tmpdir = tempfile.mkdtemp()
os.makedirs(os.path.join(tmpdir, 'mldata'))
def teardown_tmpdata():
# remove temporary dir
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_mldata_filename():
cases = [('datasets-UCI iris', 'datasets-uci-iris'),
('news20.binary', 'news20binary'),
('book-crossing-ratings-1.0', 'book-crossing-ratings-10'),
('Nile Water Level', 'nile-water-level'),
('MNIST (original)', 'mnist-original')]
for name, desired in cases:
assert_equal(mldata_filename(name), desired)
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_download():
"""Test that fetch_mldata is able to download and cache a data set."""
_urlopen_ref = datasets.mldata.urlopen
datasets.mldata.urlopen = mock_mldata_urlopen({
'mock': {
'label': sp.ones((150,)),
'data': sp.ones((150, 4)),
},
})
try:
mock = fetch_mldata('mock', data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data"]:
assert_in(n, mock)
assert_equal(mock.target.shape, (150,))
assert_equal(mock.data.shape, (150, 4))
assert_raises(datasets.mldata.HTTPError,
fetch_mldata, 'not_existing_name')
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_one_column():
_urlopen_ref = datasets.mldata.urlopen
try:
dataname = 'onecol'
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
datasets.mldata.urlopen = mock_mldata_urlopen({dataname: {'x': x}})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "data"]:
assert_in(n, dset)
assert_not_in("target", dset)
assert_equal(dset.data.shape, (2, 3))
assert_array_equal(dset.data, x)
# transposing the data array
dset = fetch_mldata(dataname, transpose_data=False, data_home=tmpdir)
assert_equal(dset.data.shape, (3, 2))
finally:
datasets.mldata.urlopen = _urlopen_ref
@with_setup(setup_tmpdata, teardown_tmpdata)
def test_fetch_multiple_column():
_urlopen_ref = datasets.mldata.urlopen
try:
# create fake data set in cache
x = sp.arange(6).reshape(2, 3)
y = sp.array([1, -1])
z = sp.arange(12).reshape(4, 3)
# by default
dataname = 'threecol-default'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: (
{
'label': y,
'data': x,
'z': z,
},
['z', 'data', 'label'],
),
})
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by order
dataname = 'threecol-order'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['y', 'x', 'z']), })
dset = fetch_mldata(dataname, data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "z"]:
assert_in(n, dset)
assert_not_in("x", dset)
assert_not_in("y", dset)
assert_array_equal(dset.data, x)
assert_array_equal(dset.target, y)
assert_array_equal(dset.z, z.T)
# by number
dataname = 'threecol-number'
datasets.mldata.urlopen = mock_mldata_urlopen({
dataname: ({'y': y, 'x': x, 'z': z},
['z', 'x', 'y']),
})
dset = fetch_mldata(dataname, target_name=2, data_name=0,
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
assert_array_equal(dset.data, z)
assert_array_equal(dset.target, y)
# by name
dset = fetch_mldata(dataname, target_name='y', data_name='z',
data_home=tmpdir)
for n in ["COL_NAMES", "DESCR", "target", "data", "x"]:
assert_in(n, dset)
assert_not_in("y", dset)
assert_not_in("z", dset)
finally:
datasets.mldata.urlopen = _urlopen_ref
| bsd-3-clause |
sanuj/opencog | opencog/python/spatiotemporal/demo.py | 33 | 1221 | __author__ = 'sebastian'
from spatiotemporal.temporal_events.trapezium import TemporalEventTrapezium
from spatiotemporal.temporal_events.relation_formulas import FormulaCreator
from spatiotemporal.temporal_events.composition.non_linear_least_squares import DecompositionFitter
import matplotlib.pyplot as plt
all_relations = "pmoFDseSdfOMP"
a = TemporalEventTrapezium(1, 12, 4, 8)
b = TemporalEventTrapezium(9, 17, 13, 15)
# compute relations between events
temporal_relations = a * b
print("Relations: {0}".format(temporal_relations.to_list()))
# print degree for every relation
for relation in all_relations:
print(relation, temporal_relations[relation])
# plot events
a.plot(show_distributions=True).ylim(ymin=-0.1, ymax=1.1)
b.plot(show_distributions=True).figure()
plt.show()
# from the 13 relations, learns parameters for all combinations of the
# before, same, and after relationships between the beginning and
# ending distributions of the two intervals
formula = FormulaCreator(DecompositionFitter(temporal_relations))
# from these relationships, computes the 13 relations again
relations_estimate = formula.calculate_relations()
print("Estimated relations: {0}".format(relations_estimate.to_list())) | agpl-3.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-seaborn/package.py | 5 | 1184 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySeaborn(PythonPackage):
"""Seaborn: statistical data visualization.
Seaborn is a library for making attractive and informative statistical
graphics in Python. It is built on top of matplotlib and tightly
integrated with the PyData stack, including support for numpy and pandas
data structures and statistical routines from scipy and statsmodels."""
homepage = "http://seaborn.pydata.org/"
url = "https://pypi.io/packages/source/s/seaborn/seaborn-0.7.1.tar.gz"
version('0.9.0', sha256='76c83f794ca320fb6b23a7c6192d5e185a5fcf4758966a0c0a54baee46d41e2f')
version('0.7.1', sha256='fa274344b1ee72f723bab751c40a5c671801d47a29ee9b5e69fcf63a18ce5c5d')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
| lgpl-2.1 |
JoshuaMichaelKing/MyLearning | learn-python2.7/scikit-learn/NaiveBayesDemo.py | 1 | 2916 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import numpy as np
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB, MultinomialNB, BernoulliNB
__version__ = '0.0.1'
__license__ = 'MIT'
__author__ = 'Joshua Guo ([email protected])'
'''
Python To Try NaiveBayes(classification) through scikit-learn!
Naive Bayes methods are a set of supervised learning algorithms based on applying
Bayes' theorem with the "naive" assumption of independence between every pair of features.
In spite of its apparently over-simplified assumptions, naive Bayes classifiers have worked
quite well in many real-world situations, famously document classification and spam filtering.
They require a small amount of training data to estimate the necessary parameters.
On the flip side, although naive Bayes is known as a decent classifier, it is known to be a
bad estimator, so the probability outputs from predict_prob are not to be taken too seriously.
'''
def main():
gaussian_nb_test()
multinomial_nb_test()
multinomial_nb_partial_test()
bernoulli_nb_test()
def gaussian_nb_test():
'''
GaussianNB implements the Gaussian Naive Bayes algorithm for classification.
'''
print("gaussian_nb_test")
iris = datasets.load_iris()
print(iris.feature_names) # 四个特征的名字
print(iris.target_names)
# print(iris.data)
print(len(iris.data))
print(iris.target.data)
print(iris.target.size)
clf = GaussianNB()
clf.fit(iris.data, iris.target)
result1 = clf.predict(iris.data[0])
print(result1)
result2 = clf.predict(iris.data[149])
print(result2)
data = np.array([6, 4, 6, 2])
result3 = clf.predict(data)
print(result3)
def multinomial_nb_test():
'''
MultinomialNB implements the naive Bayes algorithm for multinomially distributed data,
and is one of the two classic naive Bayes variants used in text classification.
'''
print("multinomial_nb_test")
X = np.random.randint(5, size=(6, 100))
y = np.array([1, 2, 3, 4, 5, 6])
clf = MultinomialNB()
clf.fit(X, y)
print(clf.predict(X[2]))
def multinomial_nb_partial_test():
'''
在sklearn中,MultinomialNB()类的partial_fit()方法可以进行更新训练。
这种方式特别适合于训练集大到内存无法一次性放入的情况。
'''
clf = MultinomialNB()
clf.partial_fit(np.array([1,1]), np.array(['aa']), ['aa','bb'])
clf.partial_fit(np.array([6,1]), np.array(['bb']))
print(clf.predict(np.array([9,1])))
def bernoulli_nb_test():
'''
Like MultinomialNB, this classifier is suitable for discrete data.
The difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
'''
print("bernoulli_nb_test")
X = np.random.randint(2, size=(6, 100))
Y = np.array([1, 2, 3, 4, 4, 5])
clf = BernoulliNB()
clf.fit(X, Y)
print(clf.predict(X[2]))
if __name__ == '__main__':
main()
| mit |
Tong-Chen/scikit-learn | sklearn/utils/sparsetools/tests/test_spanning_tree.py | 11 | 2295 | """Test the minimum spanning tree function"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_
import numpy.testing as npt
from scipy.sparse import csr_matrix
from sklearn.utils import minimum_spanning_tree
def test_minimum_spanning_tree():
# Create a graph with two connected components.
graph = [[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 0, 0, 8, 5],
[0, 0, 8, 0, 1],
[0, 0, 5, 1, 0]]
graph = np.asarray(graph)
# Create the expected spanning tree.
expected = [[0, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 5],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]]
expected = np.asarray(expected)
# Ensure minimum spanning tree code gives this expected output.
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
# Ensure that the original graph was not modified.
npt.assert_array_equal(csgraph.todense(), graph,
'Original graph was modified.')
# Now let the algorithm modify the csgraph in place.
mintree = minimum_spanning_tree(csgraph, overwrite=True)
npt.assert_array_equal(mintree.todense(), expected,
'Graph was not properly modified to contain MST.')
np.random.seed(1234)
for N in (5, 10, 15, 20):
# Create a random graph.
graph = 3 + np.random.random((N, N))
csgraph = csr_matrix(graph)
# The spanning tree has at most N - 1 edges.
mintree = minimum_spanning_tree(csgraph)
assert_(mintree.nnz < N)
# Set the sub diagonal to 1 to create a known spanning tree.
idx = np.arange(N - 1)
graph[idx, idx + 1] = 1
csgraph = csr_matrix(graph)
mintree = minimum_spanning_tree(csgraph)
# We expect to see this pattern in the spanning tree and otherwise
# have this zero.
expected = np.zeros((N, N))
expected[idx, idx + 1] = 1
npt.assert_array_equal(mintree.todense(), expected,
'Incorrect spanning tree found.')
| bsd-3-clause |
KirstieJane/BrainsForPublication | scripts/show_cluster_in_volume.py | 3 | 18141 | #!/usr/bin/env python
#=============================================================================
# Created by Michael Notter
# at OHBM 2016 Brainhack in Lausanne, June 2016
# Edited with more comments by Kirstie Whitaker
# at Cambridge Brainhack-Global 2017, March 2017
# Contact: [email protected]
#=============================================================================
#=============================================================================
# IMPORTS
#=============================================================================
import argparse
import future # pip install future
from glob import glob as gg
import os
from os.path import join as opj
from os.path import basename as opb
import sys
import textwrap
import numpy as np
from matplotlib import pylab
from matplotlib import pyplot as plt
import nibabel as nb
import nilearn
from nipy.labs import viz
from scipy.ndimage import label as sci_label
#=============================================================================
# FUNCTIONS
#=============================================================================
def setup_argparser():
'''
Code to read in arguments from the command line
Also allows you to change some settings
'''
# Build a basic parser.
help_text = ('Show the locations of clusters in a statistical map in MNI space.')
sign_off = 'Author: Kirstie Whitaker <[email protected]>'
parser = argparse.ArgumentParser(description=help_text,
epilog=sign_off,
formatter_class=argparse.RawTextHelpFormatter)
# Now add the arguments
parser.add_argument(dest='stats_file',
type=str,
metavar='stats_file',
help=textwrap.dedent('3D nifti file in MNI space containing the statistical values\n ' +
'you want to visualise.\n' +
'Note that this file can be pre-thresholded or not.\n' +
'If your file is already thresholded then you will need to\n ' +
'pass an argument to the -t option otherwise it will default to 2.3.\n ' +
'A suggested value is 0.01.' ))
parser.add_argument('-ce, --cluster_extent',
type=str,
metavar='cluster_extent',
help=textwrap.dedent("Minimum cluster extent for a region to be included in the visualisation\n (integer)\n Default: 20"),
default=20)
parser.add_argument('-t, --cluster_thr',
type=str,
metavar='threshold',
help=textwrap.dedent("Minimum statistical value for a region to be included in the visualisation\n (float)\n Default: 2.3"),
default=2.3)
parser.add_argument('--csv',
action='store_true',
help=textwrap.dedent('Create a csv file with cluster information.\n Default: False'),
default=False)
parser.add_argument('--cluster_title',
action='store_true',
help=textwrap.dedent('Show cluster information in the title of the plot.\n Default: False'),
default=False)
parser.add_argument('-c', '--cmap',
type=str,
metavar='cmap',
help=textwrap.dedent('Any matplotlib colormap listed at\n http://matplotlib.org/examples/color/colormaps_reference.html\n Default: RdBu_r'),
default='hot')
parser.add_argument('-cb', '--cbar',
action='store_true',
help=textwrap.dedent('Display a colorbar on the right of the plots\n Default: False'),
default=False)
parser.add_argument('--black_bg',
action='store_true',
help=textwrap.dedent('Set the background to black.\n Default: White'),
default=False)
"""
parser.add_argument('--thr_abs',
type=float,
metavar='thr_abs',
help=textwrap.dedent('Mask the input image such that all values\n which have an absolute value less than this threshold\n are not shown.\nIf None then no thresholding is undertaken.\nDefault: None'),
default=None)
parser.add_argument('--thr_pos',
type=float,
metavar='thr_pos',
help=textwrap.dedent('Mask the input image such that all values\n less than this threshold are not shown.\nIf None then no thresholding is undertaken.\nDefault: None'),
default=None)
parser.add_argument('-l', '--lower',
type=float,
metavar='lowerthr',
help='Lower limit for colormap',
default=None)
parser.add_argument('-u', '--upper',
type=float,
metavar='upperthr',
help='Upper limit for colormap',
default=None)
"""
parser.add_argument('--dpi',
type=float,
metavar='dpi',
help='DPI of output png file\n Default: 300',
default=300)
parser.add_argument('--format',
type=float,
metavar='format',
help=textwrap.dedent('Format of the output image file.\n Eg: png, pdf, tif, jpeg, svg. \n Default: png'),
default='png')
arguments = parser.parse_args()
return arguments, parser
def get_labels(data, cluster_thr=0, min_extent=0):
"""
Get number of clusters in dataset as well as a labeled volume
Minimal extent of each cluster and voxel-vise threshold can be specified
"""
# Threshold the data by zeroing all voxels with values that have an
# absolute value less than the cluster threshold
thr_data = abs(data) > cluster_thr
# Find all the clusters in the thresholded data
labels, nlabels = sci_label(thr_data)
# Now loop through all the clusters
# and if a cluster is smaller than the minimum cluster extent
# exclude it from the list (set the values to zero)
for idx in range(1, nlabels + 1):
if np.sum(labels == idx) < min_extent:
labels[labels == idx] = 0
# Re-run the clustering command to get only the clusters
# that are larger than the minimum extent
labels, nlabels = sci_label(labels)
# overwrites the input data with the thresholded data
binarized_data = labels.astype('bool')
data[np.invert(binarized_data)] = 0
return labels, nlabels, data, binarized_data
def get_cluster_info(img, affine, data):
"""
Returns peak coordinations and cluster information of a given dataset,
if labeled file and affine is provided
"""
# Set up some variables we're going to need
coords = [] #
cs = [] # cluster sum values
maxcoords = [] # peak coordinate locations
clusterInfo = [] # list of lists containing max, min,
# mean and std of the cluster
# Find all the label ids (that aren't 0!)
labelID = np.setdiff1d(np.unique(img.ravel()), [0])
# Loop through the labels
for lab in labelID:
# Calculate the voume of the cluster
sumval = np.sum(img == lab)
cs.append(sumval)
# Calculate the max, min, mean and std of the cluster
maxval = np.max(data[img == lab])
minval = np.min(data[img == lab])
meanval = np.mean(data[img == lab])
stdval = np.std(data[img == lab])
# Save these values in a list
clusterInfo.append([sumval, maxval, minval, meanval, stdval])
# Get the location of the peak coordinate
maxidx = np.nonzero(np.multiply(data, img == lab) == maxval)
maxcoords.append([m[0] for m in maxidx])
# Transform the lists into numpy arrays
maxcoords = np.asarray(maxcoords)
clusterInfo = np.asarray(clusterInfo)
# Sort the lists by the volume of the clusters
maxcoords = maxcoords[np.argsort(cs)[::-1], :]
clusterInfo = clusterInfo[np.argsort(cs)[::-1], :]
# Loop through the clusters and put the peak coordinates
# in MNI space
for i, lab in enumerate(labelID[np.argsort(cs)[::-1]]):
coords.append(np.dot(affine,
np.hstack((maxcoords[i], 1)))[:3].tolist())
# Add the peak coordinate information to the clusterInfo array
clusterInfo = np.hstack((np.array(coords), clusterInfo))
# Returns peak coordination and additional cluster infos
return coords, clusterInfo
def show_slices(data, affine,
coords=None,
cmap=None,
show_colorbar=None,
showCross=False,
cluster_thr=0,
annotate=True, ###### KW DOCUMENT
template='../scripts/templates/MNI152_T1_1mm_brain.nii.gz', ####### KW DOCUMENT
dpiRes=300,
suffix='png',
show_title=False):
# Prepare background image
anatimg = nb.load(template)
anatdata, anataff = anatimg.get_data(), anatimg.affine()
anatdata = anatdata.astype(np.float)
anatdata[anatdata < 10.] = np.nan
# Create output figure for each peak coordinate
# (so a different figure for each cluster)
for idx, coord in enumerate(coords):
# Name the output file to include the cluster id,
# the cluster threshold and the minimum cluster extent
outfile = 'Cluster_{}_thr{:04.2f}_minext{:03:0f}'.format(idx, cluster_thr, cluster_extent)
# If show_title argument has been set to true then print the file name
# and the peak coordinates in the title of the figure
if show_title:
title = '{} {}'.format(outfile + coord)
else:
title = ''
# Woooo plot three orthogonal views of the cluster sliced through the
# peak coordinate
osl = viz.plot_map(
np.asarray(data), affine, anat=anatdata, anat_affine=anataff,
threshold=cluster_thr, cmap=cmap, annotate=annotate,
black_bg=False, cut_coords=coord, draw_cross=showCross,
slicer='ortho', title=title)
# If the show colorbar option is true then show the color bar on the
# right hand side of the image
if show_colorbar:
cbarLocation = [-0.1, 0.2, 0.015, 0.6]
im = plt.gca().get_images()[1]
cb = plt.colorbar(im, cax=plt.axes(cbarLocation),
orientation='horizontal', format='%.2f')
cb.set_ticks([cb._values.min(), cb._values.max()])
# Save the figure!
osl.frame_axes.figure.savefig(
opj(output_folder, '{}.{}'.format(outfile, suffix)),
dpi=dpiRes, bbox_inches='tight', transparent=True)
# DONE! Close the plot
plt.close()
#=============================================================================
# SET SOME VARIABLES
#=============================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
stats_file = arguments.stats_file
cluster_extent = arguments.cluster_extent
cluster_thr = arguments.cluster_thr
store_csv = arguments.csv
cluster_title = arguments.cluster_title
cmap = arguments.cmap
show_colorbar = arguments.cbar
#thr_abs = arguments.thr_abs
#thr_pos = arguments.thr_pos
#black_bg = arguments.black_bg
#lower_thresh = arguments.lower
#upper_thresh = arguments.upper
dpi = arguments.dpi
image_format = arguments.format
# Set a couple of hard coded options
#symmetric_cbar=False
#===============================================================================
# Get the colormap from nilearn
#===============================================================================
if hasattr(cm, cmap):
cmap = getattr(cm, cmap)
#===============================================================================
# Create the output folder
#===============================================================================
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
def create_output(stats_file, cluster_extent, threshold, template, create_CSV,
show_cross, annotate_figure, cmap, show_colorbar,
show_title, dpi, imageType):
# Read in the stats file
img = nb.load(stats_file)
data = img.get_data()
affine = img.affine()
# Find the clusters
labels, nlabels, data, binarized_data = get_labels(data,
cluster_thr=cluster_thr,
min_extent=cluster_extent)
# Catch if nlabels is 0, i.e. no clusters survived thresholding
if nlabels == 0:
print('No clusters survive the thresholds in {}'.format(stats_file))
return
# If there *are* cluster though, then get the cluster information
# for each of them
print('{} clusters were found in {}'.format(nlabels, stats_file))
coords, clusterInfo = get_cluster_info(labels, affine, data)
"""
# Get file prefix
if filePath.endswith('.nii'):
filename = opb(filePath)[:-4]
elif filePath.endswith('.nii.gz'):
filename = opb(filePath)[:-7]
"""
# Create output folder
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
# Create figures
show_slices(data, affine,
coords=coords,
cmap=cmap,
show_colorbar=show_colorbar,
showCross=False, ####### KW THINK ABOUT
cluster_thr=cluster_thr,
annotate=True, ###### KW DOCUMENT
template='../scripts/templates/MNI152_T1_1mm_brain.nii.gz', ####### KW DOCUMENT
dpiRes=dpi,
suffix=image_format,
show_title=show_title)
# Create CSV output
if create_CSV:
header = 'X,Y,Z,Size,Max,Min,Mean,Std'
np.savetxt(
opj('figures', filename, 'cluster_info.csv'), clusterInfo,
delimiter=',', fmt='%.8f', header=header, comments='')
# Print cluster info in terminal
row_format = "{:>8}{:>8}{:>8}{:>10}{:>16}{:>16}{:>16}{:>16}"
print(row_format.format(
*['X', 'Y', 'Z', 'Size', 'Max', 'Min', 'Mean', 'Std']))
for c in clusterInfo:
print(row_format.format(*c))
print('\n')
#===============================================================================
# Save the figure
#===============================================================================
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
if __name__ == "__main__":
cluster_extend = int(sys.argv[1])
threshold = float(sys.argv[2])
template = str(sys.argv[3])
create_CSV = bool(sys.argv[4])
show_cross = bool(sys.argv[5])
annotate_figure = bool(sys.argv[6])
show_colorbar = bool(sys.argv[7])
colorbar_orientation = str(sys.argv[8])
show_title = bool(sys.argv[9])
dpi = int(sys.argv[10])
imageType = str(sys.argv[11])
prefix = str(sys.argv[12])
#=========================================================================
# SET SOME VARIABLES
#=========================================================================
# Read in the arguments from argparse
arguments, parser = setup_argparser()
stats_file = arguments.stats_file
cluster_extent = arguments.cluster_extent
cluster_thr = arguments.cluster_thr
store_csv = arguments.csv
cluster_title = arguments.cluster_title
cmap = arguments.cmap
show_colorbar = arguments.cbar
#thr_abs = arguments.thr_abs
#thr_pos = arguments.thr_pos
#black_bg = arguments.black_bg
#lower_thresh = arguments.lower
#upper_thresh = arguments.upper
dpi = arguments.dpi
image_format = arguments.format
#===============================================================================
# Get the colormap from nilearn
#===============================================================================
if hasattr(cm, cmap):
cmap = getattr(cm, cmap)
#===============================================================================
# Create the output folder
#===============================================================================
output_folder = '{}_CLUSTERS'.format(stats_file.rsplit('.nii', 1)[0])
if not os.isdir(output_folder):
os.path.makedirs(output_folder)
#===============================================================================
# Create the figures and CSV output
#===============================================================================
create_output(stats_file, cluster_extent, threshold, template, create_CSV,
show_cross, annotate_figure, cmap, show_colorbar,
show_title, dpi, imageType)
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/types/test_missing.py | 7 | 12132 | # -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
import pandas as pd
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
class TestIsNull(tm.TestCase):
def test_0d_array(self):
self.assertTrue(isnull(np.array(np.nan)))
self.assertFalse(isnull(np.array(0.0)))
self.assertFalse(isnull(np.array(0)))
# test object dtype
self.assertTrue(isnull(np.array(np.nan, dtype=object)))
self.assertFalse(isnull(np.array(0.0, dtype=object)))
self.assertFalse(isnull(np.array(0, dtype=object)))
def test_isnull(self):
self.assertFalse(isnull(1.))
self.assertTrue(isnull(None))
self.assertTrue(isnull(np.NaN))
self.assertTrue(float('nan'))
self.assertFalse(isnull(np.inf))
self.assertFalse(isnull(-np.inf))
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
self.assertIsInstance(isnull(s), Series)
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(),
tm.add_nans(tm.makePanel())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists(self):
result = isnull([[False]])
exp = np.array([[False]])
tm.assert_numpy_array_equal(result, exp)
result = isnull([[1], [2]])
exp = np.array([[False], [False]])
tm.assert_numpy_array_equal(result, exp)
# list of strings / unicode
result = isnull(['foo', 'bar'])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
result = isnull([u('foo'), u('bar')])
exp = np.array([False, False])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_nat(self):
result = isnull([NaT])
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
result = isnull(np.array([NaT], dtype=object))
exp = np.array([True])
tm.assert_numpy_array_equal(result, exp)
def test_isnull_numpy_nat(self):
arr = np.array([NaT, np.datetime64('NaT'), np.timedelta64('NaT'),
np.datetime64('NaT', 's')])
result = isnull(arr)
expected = np.array([True] * 4)
tm.assert_numpy_array_equal(result, expected)
def test_isnull_datetime(self):
self.assertFalse(isnull(datetime.now()))
self.assertTrue(notnull(datetime.now()))
idx = date_range('1/1/1990', periods=20)
exp = np.ones(len(idx), dtype=bool)
tm.assert_numpy_array_equal(notnull(idx), exp)
idx = np.asarray(idx)
idx[0] = iNaT
idx = DatetimeIndex(idx)
mask = isnull(idx)
self.assertTrue(mask[0])
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
self.assert_numpy_array_equal(mask, exp)
# GH 9129
pidx = idx.to_period(freq='M')
mask = isnull(pidx)
self.assertTrue(mask[0])
exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)
self.assert_numpy_array_equal(mask, exp)
mask = isnull(pidx[1:])
exp = np.zeros(len(mask), dtype=bool)
self.assert_numpy_array_equal(mask, exp)
def test_datetime_other_units(self):
idx = pd.DatetimeIndex(['2011-01-01', 'NaT', '2011-01-02'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['datetime64[D]', 'datetime64[h]', 'datetime64[m]',
'datetime64[s]', 'datetime64[ms]', 'datetime64[us]',
'datetime64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_timedelta_other_units(self):
idx = pd.TimedeltaIndex(['1 days', 'NaT', '2 days'])
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
tm.assert_numpy_array_equal(isnull(idx.values), exp)
tm.assert_numpy_array_equal(notnull(idx.values), ~exp)
for dtype in ['timedelta64[D]', 'timedelta64[h]', 'timedelta64[m]',
'timedelta64[s]', 'timedelta64[ms]', 'timedelta64[us]',
'timedelta64[ns]']:
values = idx.values.astype(dtype)
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(values), exp)
tm.assert_numpy_array_equal(notnull(values), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(values)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(values, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_period(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2012-01'], freq='M')
exp = np.array([False, True, False])
tm.assert_numpy_array_equal(isnull(idx), exp)
tm.assert_numpy_array_equal(notnull(idx), ~exp)
exp = pd.Series([False, True, False])
s = pd.Series(idx)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
s = pd.Series(idx, dtype=object)
tm.assert_series_equal(isnull(s), exp)
tm.assert_series_equal(notnull(s), ~exp)
def test_array_equivalent():
assert array_equivalent(np.array([np.nan, np.nan]),
np.array([np.nan, np.nan]))
assert array_equivalent(np.array([np.nan, 1, np.nan]),
np.array([np.nan, 1, np.nan]))
assert array_equivalent(np.array([np.nan, None], dtype='object'),
np.array([np.nan, None], dtype='object'))
assert array_equivalent(np.array([np.nan, 1 + 1j], dtype='complex'),
np.array([np.nan, 1 + 1j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1 + 1j], dtype='complex'), np.array(
[np.nan, 1 + 2j], dtype='complex'))
assert not array_equivalent(
np.array([np.nan, 1, np.nan]), np.array([np.nan, 2, np.nan]))
assert not array_equivalent(
np.array(['a', 'b', 'c', 'd']), np.array(['e', 'e']))
assert array_equivalent(Float64Index([0, np.nan]),
Float64Index([0, np.nan]))
assert not array_equivalent(
Float64Index([0, np.nan]), Float64Index([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan]),
DatetimeIndex([0, np.nan]))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]))
assert array_equivalent(TimedeltaIndex([0, np.nan]),
TimedeltaIndex([0, np.nan]))
assert not array_equivalent(
TimedeltaIndex([0, np.nan]), TimedeltaIndex([1, np.nan]))
assert array_equivalent(DatetimeIndex([0, np.nan], tz='US/Eastern'),
DatetimeIndex([0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='US/Eastern'), DatetimeIndex(
[1, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan], tz='CET'), DatetimeIndex(
[0, np.nan], tz='US/Eastern'))
assert not array_equivalent(
DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))
def test_array_equivalent_compat():
# see gh-13388
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
assert (array_equivalent(m, n, strict_nan=True))
assert (array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (4, 3)], dtype=[('a', int), ('b', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
m = np.array([(1, 2), (3, 4)], dtype=[('a', int), ('b', float)])
n = np.array([(1, 2), (3, 4)], dtype=[('b', int), ('a', float)])
assert (not array_equivalent(m, n, strict_nan=True))
assert (not array_equivalent(m, n, strict_nan=False))
def test_array_equivalent_str():
for dtype in ['O', 'S', 'U']:
assert array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'B'], dtype=dtype))
assert not array_equivalent(np.array(['A', 'B'], dtype=dtype),
np.array(['A', 'X'], dtype=dtype))
def test_na_value_for_dtype():
for dtype in [np.dtype('M8[ns]'), np.dtype('m8[ns]'),
DatetimeTZDtype('datetime64[ns, US/Eastern]')]:
assert na_value_for_dtype(dtype) is NaT
for dtype in ['u1', 'u2', 'u4', 'u8',
'i1', 'i2', 'i4', 'i8']:
assert na_value_for_dtype(np.dtype(dtype)) == 0
for dtype in ['bool']:
assert na_value_for_dtype(np.dtype(dtype)) is False
for dtype in ['f2', 'f4', 'f8']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
for dtype in ['O']:
assert np.isnan(na_value_for_dtype(np.dtype(dtype)))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/drawing/tests/test_pylab.py | 1 | 7836 | """Unit tests for matplotlib drawing functions."""
import os
import itertools
import pytest
mpl = pytest.importorskip('matplotlib')
mpl.use('PS', warn=False)
plt = pytest.importorskip('matplotlib.pyplot')
plt.rcParams['text.usetex'] = False
import networkx as nx
class TestPylab(object):
@classmethod
def setup_class(cls):
cls.G = nx.barbell_graph(4, 6)
def test_draw(self):
try:
functions = [nx.draw_circular,
nx.draw_kamada_kawai,
nx.draw_planar,
nx.draw_random,
nx.draw_spectral,
nx.draw_spring,
nx.draw_shell]
options = [{
'node_color': 'black',
'node_size': 100,
'width': 3,
}]
for function, option in itertools.product(functions, options):
function(self.G, **option)
plt.savefig('test.ps')
finally:
try:
os.unlink('test.ps')
except OSError:
pass
def test_draw_shell_nlist(self):
try:
nlist = [list(range(4)), list(range(4, 10)), list(range(10, 14))]
nx.draw_shell(self.G, nlist=nlist)
plt.savefig('test.ps')
finally:
try:
os.unlink('test.ps')
except OSError:
pass
def test_edge_colormap(self):
colors = range(self.G.number_of_edges())
nx.draw_spring(self.G, edge_color=colors, width=4,
edge_cmap=plt.cm.Blues, with_labels=True)
plt.show()
def test_arrows(self):
nx.draw_spring(self.G.to_directed())
plt.show()
def test_edge_colors_and_widths(self):
pos = nx.circular_layout(self.G)
for G in (self.G, self.G.to_directed()):
nx.draw_networkx_nodes(G, pos, node_color=[(1.0, 1.0, 0.2, 0.5)])
nx.draw_networkx_labels(G, pos)
# edge with default color and width
nx.draw_networkx_edges(G, pos, edgelist=[(0, 1)],
width=None,
edge_color=None)
# edges with global color strings and widths in lists
nx.draw_networkx_edges(G, pos, edgelist=[(0, 2), (0, 3)],
width=[3],
edge_color=['r'])
# edges with color strings and widths for each edge
nx.draw_networkx_edges(G, pos, edgelist=[(0, 2), (0, 3)],
width=[1, 3],
edge_color=['r', 'b'])
# edges with fewer color strings and widths than edges
nx.draw_networkx_edges(G, pos,
edgelist=[(1, 2), (1, 3), (2, 3), (3, 4)],
width=[1, 3],
edge_color=['g', 'm', 'c'])
# edges with more color strings and widths than edges
nx.draw_networkx_edges(G, pos, edgelist=[(3, 4)],
width=[1, 2, 3, 4],
edge_color=['r', 'b', 'g', 'k'])
# with rgb tuple and 3 edges - is interpreted with cmap
nx.draw_networkx_edges(G, pos, edgelist=[(4, 5), (5, 6), (6, 7)],
edge_color=(1.0, 0.4, 0.3))
# with rgb tuple in list
nx.draw_networkx_edges(G, pos, edgelist=[(7, 8), (8, 9)],
edge_color=[(0.4, 1.0, 0.0)])
# with rgba tuple and 4 edges - is interpretted with cmap
nx.draw_networkx_edges(G, pos, edgelist=[(9, 10), (10, 11),
(10, 12), (10, 13)],
edge_color=(0.0, 1.0, 1.0, 0.5))
# with rgba tuple in list
nx.draw_networkx_edges(G, pos, edgelist=[(9, 10), (10, 11),
(10, 12), (10, 13)],
edge_color=[(0.0, 1.0, 1.0, 0.5)])
# with color string and global alpha
nx.draw_networkx_edges(G, pos, edgelist=[(11, 12), (11, 13)],
edge_color='purple', alpha=0.2)
# with color string in a list
nx.draw_networkx_edges(G, pos, edgelist=[(11, 12), (11, 13)],
edge_color=['purple'])
# with single edge and hex color string
nx.draw_networkx_edges(G, pos, edgelist=[(12, 13)],
edge_color='#1f78b4f0')
# edge_color as numeric using vmin, vmax
nx.draw_networkx_edges(G, pos, edgelist=[(7, 8), (8, 9)],
edge_color=[0.2, 0.5],
edge_vmin=0.1, edge_max=0.6)
plt.show()
def test_labels_and_colors(self):
G = nx.cubical_graph()
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos,
nodelist=[0, 1, 2, 3],
node_color='r',
node_size=500,
alpha=0.75)
nx.draw_networkx_nodes(G, pos,
nodelist=[4, 5, 6, 7],
node_color='b',
node_size=500,
alpha=[0.25, 0.5, 0.75, 1.0])
# edges
nx.draw_networkx_edges(G, pos, width=1.0, alpha=0.5)
nx.draw_networkx_edges(G, pos,
edgelist=[(0, 1), (1, 2), (2, 3), (3, 0)],
width=8, alpha=0.5, edge_color='r')
nx.draw_networkx_edges(G, pos,
edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],
width=8, alpha=0.5, edge_color='b')
nx.draw_networkx_edges(G, pos,
edgelist=[(4, 5), (5, 6), (6, 7), (7, 4)],
min_source_margin=0.5, min_target_margin=0.75,
width=8, edge_color='b')
# some math labels
labels = {}
labels[0] = r'$a$'
labels[1] = r'$b$'
labels[2] = r'$c$'
labels[3] = r'$d$'
labels[4] = r'$\alpha$'
labels[5] = r'$\beta$'
labels[6] = r'$\gamma$'
labels[7] = r'$\delta$'
nx.draw_networkx_labels(G, pos, labels, font_size=16)
nx.draw_networkx_edge_labels(G, pos, edge_labels=None, rotate=False)
nx.draw_networkx_edge_labels(G, pos, edge_labels={(4, 5): '4-5'})
plt.show()
def test_axes(self):
fig, ax = plt.subplots()
nx.draw(self.G, ax=ax)
def test_empty_graph(self):
G = nx.Graph()
nx.draw(G)
def test_multigraph_edgelist_tuples(self):
# See Issue #3295
G = nx.path_graph(3, create_using=nx.MultiDiGraph)
nx.draw_networkx(G, edgelist=[(0, 1, 0)])
nx.draw_networkx(G, edgelist=[(0, 1, 0)], node_size=[10, 20])
def test_alpha_iter(self):
pos = nx.random_layout(self.G)
# with fewer alpha elements than nodes
plt.subplot(131)
nx.draw_networkx_nodes(self.G, pos, alpha=[0.1, 0.2])
# with equal alpha elements and nodes
num_nodes = len(self.G.nodes)
alpha = [x / num_nodes for x in range(num_nodes)]
colors = range(num_nodes)
plt.subplot(132)
nx.draw_networkx_nodes(self.G, pos, node_color=colors, alpha=alpha)
# with more alpha elements than nodes
alpha.append(1)
plt.subplot(133)
nx.draw_networkx_nodes(self.G, pos, alpha=alpha)
| mit |
cbweaver/stockscan | scans/plot_all_dips.py | 1 | 5723 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logbook, configparser, os
import matplotlib.pyplot as plt
import sqlite3 as sql
import numpy as np
def is_dip(closes, i):
if closes[i] < closes[i - 1] and \
closes[i] < closes[i + 1]:
return True
else:
return False
def perc_change(a, b):
return ((b - a) / a)
config = configparser.ConfigParser()
config.read(os.getcwd()+"/config.ini")
log_handler = logbook.FileHandler(config['DEBUG']['log_fpath'])
with log_handler.applicationbound():
con = sql.connect("data/ysdb.sql")
cur = con.cursor()
tp = "MSFT"
day_range = 20
min_perc_incr = 0.07
ta_offset = 33
dip_data = []
cur.execute("SELECT AdjClose FROM %s_HIST" % (tp))
ac = list(zip(*cur.fetchall())[0])
cur.execute("SELECT Rsi14 FROM %s_TA" % (tp))
rsi14 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT Rsi20 FROM %s_TA" % (tp))
rsi20 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT Rsi25 FROM %s_TA" % (tp))
rsi25 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT Rsi30 FROM %s_TA" % (tp))
rsi30 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT MacdH12_26_9 FROM %s_TA" % (tp))
macdh = list(zip(*cur.fetchall())[0])
cur.execute("SELECT StoK_14_3_3 FROM %s_TA" % (tp))
stok_14_3_3 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT StoD_14_3_3 FROM %s_TA" % (tp))
stod_14_3_3 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT StoK_26_6_6 FROM %s_TA" % (tp))
stok_26_6_6 = list(zip(*cur.fetchall())[0])
cur.execute("SELECT StoD_26_6_6 FROM %s_TA" % (tp))
stod_26_6_6 = list(zip(*cur.fetchall())[0])
if len(ac) > 600:
xMostRecent = 500
for i in range(ta_offset):
ac.pop(0)
rsi14.pop(0)
rsi20.pop(0)
rsi25.pop(0)
rsi30.pop(0)
macdh.pop(0)
stok_14_3_3.pop(0)
stod_14_3_3.pop(0)
stok_26_6_6.pop(0)
stod_26_6_6.pop(0)
ac = ac[-xMostRecent:]
rsi14 = rsi14[-xMostRecent:]
rsi20 = rsi20[-xMostRecent:]
rsi25 = rsi25[-xMostRecent:]
rsi30 = rsi30[-xMostRecent:]
macdh = macdh[-xMostRecent:]
stok_14_3_3 = stok_14_3_3[-xMostRecent:]
stod_14_3_3 = stod_14_3_3[-xMostRecent:]
stok_26_6_6 = stok_26_6_6[-xMostRecent:]
stod_26_6_6 = stod_26_6_6[-xMostRecent:]
num_dips = 0
num_pos_m = 0
x = np.array(range(0, day_range))
for i in range(1, (xMostRecent - day_range)):
y = np.zeros(len(x))
if is_dip(ac, i):
num_dips += 1
for j in x:
y[j] = perc_change(ac[i], ac[i+j])
A = np.vstack([x, np.ones(len(x))]).T
m, c = np.linalg.lstsq(A, y)[0]
if perc_change(ac[i], max(ac[i:i+day_range])) >= min_perc_incr:
num_pos_m += 1
# capture data about i
dip_data.append(
[
[i],
[
rsi14[i-1],
rsi20[i-1],
rsi25[i-1],
rsi30[i-1],
macdh[i-1],
stok_14_3_3[i-1],
stod_14_3_3[i-1],
stok_26_6_6[i-1],
stod_26_6_6[i-1]
], # i - 1
[
rsi14[i],
rsi20[i],
rsi25[i],
rsi30[i],
macdh[i],
stok_14_3_3[i],
stod_14_3_3[i],
stok_26_6_6[i],
stod_26_6_6[i]
], # i
[
rsi14[i+1],
rsi20[i+1],
rsi25[i+1],
rsi30[i+1],
macdh[i+1],
stok_14_3_3[i+1],
stod_14_3_3[i+1],
stok_26_6_6[i+1],
stod_26_6_6[i+1]
] # i + 1
]
)
#plt.cla()
#plt.plot(x, y, 'o')
#plt.plot(x, m*x + c, 'r')
#plt.xlim(0, day_range)
#plt.ylim(-0.2, 0.2)
#plt.savefig('plots/dip_%03d.png' % i)
#print num_dips
#print num_pos_m
#print (num_pos_m/float(num_dips))
stos = []
stos_hist = []
for data in dip_data:
stos.append([
[data[1][5], data[1][6]],
[data[2][5], data[2][6]],
[data[3][5], data[3][6]]
])
stos_hist.append([
[data[1][5] - data[1][6]],
[data[2][5] - data[2][6]],
[data[3][5] - data[3][6]]
])
stos = np.array(stos)
stos_hist = np.array(stos_hist)
z = []
for sto_data in stos_hist:
if sto_data[0] < 0 and \
sto_data[1] > 0:
print "v^"
else:
print "."
#plt.cla()
#plt.hist(z, range=(-1, 1), bins=20)
#plt.savefig('plots/z1.png')
| gpl-2.0 |
RobertABT/heightmap | build/matplotlib/lib/matplotlib/sphinxext/mathmpl.py | 4 | 3828 | from __future__ import print_function
import os
import sys
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils import nodes
from docutils.parsers.rst import directives
import warnings
from matplotlib import rcParams
from matplotlib.mathtext import MathTextParser
rcParams['mathtext.fontset'] = 'cm'
mathtext_parser = MathTextParser("Bitmap")
# Define LaTeX math node:
class latex_math(nodes.General, nodes.Element):
pass
def fontset_choice(arg):
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
options_spec = {'fontset': fontset_choice}
def math_role(role, rawtext, text, lineno, inliner,
options={}, content=[]):
i = rawtext.find('`')
latex = rawtext[i+1:-1]
node = latex_math(rawtext)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node], []
math_role.options = options_spec
def math_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
latex = ''.join(content)
node = latex_math(block_text)
node['latex'] = latex
node['fontset'] = options.get('fontset', 'cm')
return [node]
# This uses mathtext to render the expression
def latex2png(latex, filename, fontset='cm'):
latex = "$%s$" % latex
orig_fontset = rcParams['mathtext.fontset']
rcParams['mathtext.fontset'] = fontset
if os.path.exists(filename):
depth = mathtext_parser.get_depth(latex, dpi=100)
else:
try:
depth = mathtext_parser.to_png(filename, latex, dpi=100)
except:
warnings.warn("Could not render math expression %s" % latex,
Warning)
depth = 0
rcParams['mathtext.fontset'] = orig_fontset
sys.stdout.write("#")
sys.stdout.flush()
return depth
# LaTeX to HTML translation stuff:
def latex2html(node, source):
inline = isinstance(node.parent, nodes.TextElement)
latex = node['latex']
name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:]
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
if not os.path.exists(destdir):
os.makedirs(destdir)
dest = os.path.join(destdir, '%s.png' % name)
path = os.path.join(setup.app.builder.imgpath, 'mathmpl')
depth = latex2png(latex, dest, node['fontset'])
if inline:
cls = ''
else:
cls = 'class="center" '
if inline and depth != 0:
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
else:
style = ''
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
def setup(app):
setup.app = app
app.add_node(latex_math)
app.add_role('math', math_role)
# Add visit/depart methods to HTML-Translator:
def visit_latex_math_html(self, node):
source = self.document.attributes['source']
self.body.append(latex2html(node, source))
def depart_latex_math_html(self, node):
pass
# Add visit/depart methods to LaTeX-Translator:
def visit_latex_math_latex(self, node):
inline = isinstance(node.parent, nodes.TextElement)
if inline:
self.body.append('$%s$' % node['latex'])
else:
self.body.extend(['\\begin{equation}',
node['latex'],
'\\end{equation}'])
def depart_latex_math_latex(self, node):
pass
app.add_node(latex_math, html=(visit_latex_math_html,
depart_latex_math_html))
app.add_node(latex_math, latex=(visit_latex_math_latex,
depart_latex_math_latex))
app.add_role('math', math_role)
app.add_directive('math', math_directive,
True, (0, 0, 0), **options_spec)
| mit |
DrigerG/IIITB-ML | project/DigiRec/DigiRec.py | 1 | 2488 | #!/usr/bin/env python
"""CharRec.py: Runs a suite of ML algorithms on a character dataset"""
import logging
import numpy as np
import pandas as pd
from sklearn import tree, ensemble, neighbors, neural_network, multiclass
from sklearn.model_selection import cross_val_score
__author__ = "Pradeep Kumar A.V."
logging.basicConfig(filename='execution_log.log',
format='%(asctime)s %(message)s',
level=logging.DEBUG)
def log_and_print(msg):
logging.info(msg)
print(msg)
def train(x_train, y_train, algo="DT"):
"""
:param x_train: training data features
:param y_train: training data labels
:param algo: choice of learning algorithm [DT, RF, KNN, MLP]
:return: model object
"""
if algo == "DT":
cla = tree.DecisionTreeClassifier()
elif algo == "RF":
cla = ensemble.RandomForestClassifier(max_features=40,
n_estimators=500,
n_jobs=1,
max_depth=150)
elif algo == "KNN":
cla = neighbors.KNeighborsClassifier()
elif algo == "MLP":
cla = neural_network.MLPClassifier()
# Enable one of the optimization methods
# One Vs Rest multi class strategy
cla = multiclass.OneVsRestClassifier(cla)
# AdaBoost ensemble boosting method
# cla = ensemble.AdaBoostClassifier(cla)
cla.fit(x_train, y_train)
return cla
def main():
# Read training data
log_and_print("Reading training data")
dataset = pd.read_csv("/home/harold/train.csv")
y_train = dataset[[0]].values.ravel()
x_train = dataset.iloc[:, 1:].values
# Read testing data
log_and_print("Reading test data")
x_test = pd.read_csv("/home/harold/test.csv").values
for algo in ["DT", "RF", "KNN", "MLP"]:
log_and_print("Training the %s classifier" % algo)
model = train(x_train, y_train, algo)
log_and_print("Cross validating the classifier")
scores = cross_val_score(model, x_train, y_train, n_jobs=-1)
log_and_print("%s Cross-validation score = %s "
% (algo, scores.mean()))
pred = model.predict(x_test)
np.savetxt('submission_%s.csv' % algo,
np.c_[range(1, len(x_test)+1), pred],
delimiter=',', header='ImageId,Label',
comments='', fmt='%d')
if __name__ == '__main__':
main()
| apache-2.0 |
GillesPy/gillespy | examples/parameter_changing.py | 1 | 4014 | import scipy as sp
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path[:0] = ['..']
import gillespy
class parameter_changing_model(gillespy.Model):
"""
This toy example shows how we can simply simulate the same model for
multiple parameter sets. Our model consists of the following reactions:
0 -> S1
S1 + S1 -> S2
S2 -> 0
So S1 being produced, dimerizing, and being degraded.
"""
def __init__(self, parameter_values=None):
# Initialize the model.
gillespy.Model.__init__(self, name="simple1")
# Parameters
k1 = gillespy.Parameter(name='k1', expression=parameter_values[0])
k2 = gillespy.Parameter(name='k2', expression=parameter_values[1])
k3 = gillespy.Parameter(name='k3', expression=parameter_values[2])
self.add_parameter([k1, k2, k3])
# Species
S1 = gillespy.Species(name='S1', initial_value=100)
S2 = gillespy.Species(name='S2', initial_value=0)
self.add_species([S1, S2])
# Reactions
rxn1 = gillespy.Reaction(
name = 'S1 production',
reactants = {},
products = {S1:1},
rate = k1 )
rxn2 = gillespy.Reaction(
name = 'dimer formation',
reactants = {S1:2},
products = {S2:1},
rate = k2)
rxn3 = gillespy.Reaction(
name = 'dimer degradation',
reactants = {S2:1},
products = {},
rate = k3)
self.add_reaction([rxn1, rxn2, rxn3])
self.timespan(np.linspace(0,100,101))
if __name__ == '__main__':
# Here, we create the model objects. We have two different parameter
# sets:
set1 = [100, 0.1, 0.1]
set2 = [100, 0.001, 0.1]
# For set #2, dimers (S2) form much less readily.
set1_model = parameter_changing_model(parameter_values = set1)
set2_model = parameter_changing_model(parameter_values = set2)
num_trajectories = 100
# Let's simulate for both parameter sets, and compare the results
set1_trajectories = set1_model.run(number_of_trajectories = num_trajectories, show_labels=False)
set2_trajectories = set2_model.run(number_of_trajectories = num_trajectories, show_labels=False)
# Done! That was simple.
# PLOTTING RESULTS
# here, we will plot all trajectories with the mean overlaid
from matplotlib import gridspec
gs = gridspec.GridSpec(1,2)
alp = 0.1 # alpha value
# extract time values
time = np.array(set1_trajectories[0][:,0])
# Plot for parameter set #1
ax0 = plt.subplot(gs[0,0])
set1_S1 = np.array([set1_trajectories[i][:,1] for i in xrange(num_trajectories)]).T
set1_S2 = np.array([set2_trajectories[i][:,2] for i in xrange(num_trajectories)]).T
#plot individual trajectories
ax0.plot(time, set1_S1, 'r', alpha = alp)
ax0.plot(time, set1_S2, 'b', alpha = alp)
#plot mean
ax0.plot(time, set1_S1.mean(1), 'k--', label = "Mean S1")
ax0.plot(time, set1_S2.mean(1), 'k:', label = "Mean S2")
ax0.legend()
ax0.set_xlabel('Time')
ax0.set_ylabel('Species Count')
ax0.set_title('Parameter Set 1')
# Plot for parameter set #2
ax1 = plt.subplot(gs[0,1])
set2_S1 = np.array([set2_trajectories[i][:,1] for i in xrange(num_trajectories)]).T
set2_S2 = np.array([set2_trajectories[i][:,2] for i in xrange(num_trajectories)]).T
#plot individual trajectories
ax1.plot(time, set2_S1, 'r', alpha = alp)
ax1.plot(time, set2_S2, 'b', alpha = alp)
#plot mean
ax1.plot(time, set2_S1.mean(1), 'k--', label = "Mean S1")
ax1.plot(time, set2_S2.mean(1), 'k:', label = "Mean S2")
ax1.legend()
ax1.set_xlabel('Time')
ax1.set_title('Parameter Set 2')
plt.tight_layout()
plt.show()
| gpl-3.0 |
jalexvig/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 30 | 70017 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testEstimatorWithCoreFeatureColumns(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(iris.data[:, i], dtype=dtypes.float32),
[-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [fc_core.numeric_column(str(i)) for i in range(4)]
linear_features = [
fc_core.bucketized_column(
cont_features[i],
sorted(set(test_data.get_quantile_based_buckets(
iris.data[:, i], 10)))) for i in range(4)
]
linear_features.append(
fc_core.categorical_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=[language_column],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
global_step = classifier.get_variable_value('global_step')
if global_step == 100:
# Expected is 100, but because of the global step increment bug, is 50.
# Occasionally, step increments one more time due to a race condition,
# reaching 51 steps.
self.assertIn(step_counter.steps, [50, 51])
else:
# Occasionally, training stops when global_step == 102, due to a race
# condition. In addition, occasionally step increments one more time due
# to a race condition reaching 52 steps.
self.assertIn(step_counter.steps, [51, 52])
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=110)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=110)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
| apache-2.0 |
zorroblue/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 15 | 5780 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import (
assert_equal, assert_false, assert_true, assert_array_equal, assert_raises,
assert_warns, assert_warns_message, assert_no_warnings)
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import (
_equal_similarities_and_preferences
)
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
def test_affinity_propagation_fit_non_convergence():
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array and training samples should be labelled
# as noise (-1)
X = np.array([[0, 0], [1, 1], [-2, -2]])
# Force non-convergence by allowing only a single iteration
af = AffinityPropagation(preference=-10, max_iter=1)
assert_warns(ConvergenceWarning, af.fit, X)
assert_array_equal(np.empty((0, 2)), af.cluster_centers_)
assert_array_equal(np.array([-1, -1, -1]), af.labels_)
def test_affinity_propagation_equal_mutual_similarities():
X = np.array([[-1, 1], [1, -1]])
S = -euclidean_distances(X, squared=True)
# setting preference > similarity
cluster_center_indices, labels = assert_warns_message(
UserWarning, "mutually equal", affinity_propagation, S, preference=0)
# expect every sample to become an exemplar
assert_array_equal([0, 1], cluster_center_indices)
assert_array_equal([0, 1], labels)
# setting preference < similarity
cluster_center_indices, labels = assert_warns_message(
UserWarning, "mutually equal", affinity_propagation, S, preference=-10)
# expect one cluster, with arbitrary (first) sample as exemplar
assert_array_equal([0], cluster_center_indices)
assert_array_equal([0, 0], labels)
# setting different preferences
cluster_center_indices, labels = assert_no_warnings(
affinity_propagation, S, preference=[-20, -10])
# expect one cluster, with highest-preference sample as exemplar
assert_array_equal([1], cluster_center_indices)
assert_array_equal([0, 0], labels)
def test_affinity_propagation_predict_non_convergence():
# In case of non-convergence of affinity_propagation(), the cluster
# centers should be an empty array
X = np.array([[0, 0], [1, 1], [-2, -2]])
# Force non-convergence by allowing only a single iteration
af = AffinityPropagation(preference=-10, max_iter=1).fit(X)
# At prediction time, consider new samples as noise since there are no
# clusters
assert_array_equal(np.array([-1, -1, -1]),
af.predict(np.array([[2, 2], [3, 3], [4, 4]])))
def test_equal_similarities_and_preferences():
# Unequal distances
X = np.array([[0, 0], [1, 1], [-2, -2]])
S = -euclidean_distances(X, squared=True)
assert_false(_equal_similarities_and_preferences(S, np.array(0)))
assert_false(_equal_similarities_and_preferences(S, np.array([0, 0])))
assert_false(_equal_similarities_and_preferences(S, np.array([0, 1])))
# Equal distances
X = np.array([[0, 0], [1, 1]])
S = -euclidean_distances(X, squared=True)
# Different preferences
assert_false(_equal_similarities_and_preferences(S, np.array([0, 1])))
# Same preferences
assert_true(_equal_similarities_and_preferences(S, np.array([0, 0])))
assert_true(_equal_similarities_and_preferences(S, np.array(0)))
| bsd-3-clause |
samshara/Stock-Market-Analysis-and-Prediction | smap_nepse/prediction/plotter.py | 1 | 3450 | import sys
sys.path.insert(0, '../../smap_nepse')
import pandas as pd
import numpy as np
import prepareInput as pi
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.finance as finance
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import matplotlib.mlab as mlab
import matplotlib.font_manager as font_manager
from pandas.tools.plotting import table
from preprocessing import moreIndicators as indi
__author__ = "Semanta Bhandari"
__copyright__ = ""
__credits__ = ["Sameer Rai","Sumit Shrestha","Sankalpa Timilsina"]
__license__ = ""
__version__ = "0.1"
__email__ = "[email protected]"
def indicator_plot(df):
index = df.index
df.index = range(len(df.index))
df.columns = ['Transactions','Traded_Shares','Traded_Amount','High','Low','Close']
df = indi.EMA(df, 20)
df = indi.RSI(df, 14)
df = indi.MOM(df, 10)
df = indi.MA(df, 100)
df = indi.MA(df, 20)
df.index = index
last = df[-1:]
df = df.drop(df.columns[:5], axis=1)
# print(df.describe())
print(df.corr())
# print(df.RSI_10)
plt.rc('axes', grid=True)
plt.rc('grid', color='0.75', linestyle='-', linewidth=0.5)
textsize = 9
left, width = 0.1, 0.8
rect1 = [left, 0.7, width, 0.2]
rect2 = [left, 0.3, width, 0.4]
rect3 = [left, 0.1, width, 0.2]
fig = plt.figure(facecolor='white')
axescolor = '#f6f6f6' # the axes background color
ax1 = fig.add_axes(rect1, axisbg=axescolor) # left, bottom, width, height
ax2 = fig.add_axes(rect2, axisbg=axescolor, sharex=ax1)
ax2t = ax2.twinx()
ax3 = fig.add_axes(rect3, axisbg=axescolor, sharex=ax1)
rsi = df.RSI_14*100
fillcolor = 'darkgoldenrod'
ticker = 'NABIL'
ax1.plot(df.index, rsi, color=fillcolor)
ax1.axhline(70, color=fillcolor)
ax1.axhline(30, color=fillcolor)
ax1.fill_between(df.index, rsi, 70, where=(rsi >= 70), facecolor=fillcolor, edgecolor=fillcolor)
ax1.fill_between(df.index, rsi, 30, where=(rsi <= 30), facecolor=fillcolor, edgecolor=fillcolor)
ax1.text(0.6, 0.9, '>70 = overbought', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.text(0.6, 0.1, '<30 = oversold', transform=ax1.transAxes, fontsize=textsize)
ax1.set_ylim(0, 100)
ax1.set_yticks([30, 70])
ax1.text(0.025, 0.95, 'RSI (14)', va='top', transform=ax1.transAxes, fontsize=textsize)
ax1.set_title('%s daily' % ticker)
# plt.figure()
# df.plot()
ma20 = df['MA_20']
ma100 = df['MA_100']
linema100, = ax2.plot(df.index, ma100, color='green', lw=2, label='MA (100)', linestyle = '--')
linema20, = ax2.plot(df.index, ma20, color='blue', lw=2, label='MA (20)', linestyle = '-.')
close, = ax2.plot(df.index, df.Close, color='red', lw=2, label='Close')
s = '%s H:%1.2f L:%1.2f C:%1.2f' % (
last.index[0].date(), last.High, last.Low, last.Close)
t4 = ax2.text(0.3, 0.9, s, transform=ax2.transAxes, fontsize=textsize)
props = font_manager.FontProperties(size=10)
leg = ax2.legend(loc='center left', shadow=True, fancybox=True, prop=props)
leg.get_frame().set_alpha(0.5)
ax3.plot(df.index, df.Momentum_10)
ax3.text(0.025, 0.95, 'Momentum (10)', va='top',
transform=ax3.transAxes, fontsize=textsize)
plt.show()
#if __name__ == "__main__" :
dataframe = pi.load_data_frame('NABIL.csv')
indicator_plot(dataframe[1000:])
| mit |
iABC2XYZ/abc | Temp/TensorflowGPUPredic2.py | 1 | 12656 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 11 12:17:54 2017
@author: A
"""
import tensorflow as tf # Version 1.0 or 0.12
import numpy as np
import matplotlib.pyplot as plt
import random
import math
import os
plt.close('all')
inSeq=10
outSeq=3
batch_size = 50 # Low value used for live demo purposes - 100 and 1000 would be possible too, crank that up!
ioput_dim=2 # Output dimension (e.g.: multiple signals at once, tied in time)
hidden_dim = 50 # Count of hidden neurons in the recurrent units.
def ReadHistDataIndex(indexCode,kindCode):
fileName='./data/'+kindCode.lower()+'/'+indexCode+'.'+kindCode.lower()
if(kindCode.lower()=='date'):
if not (os.path.exists(fileName)):
return '3000-01-01'
with open(fileName,'r') as fid:
dataCodeTmp=fid.readlines()
nDataCodeTmp=len(dataCodeTmp)
dataCode=np.copy(dataCodeTmp)
for nLine in xrange(nDataCodeTmp):
dataCode[nLine]=dataCodeTmp[nDataCodeTmp-nLine-1]
else:
if not (os.path.exists(fileName)):
return [0]
dataCode= np.loadtxt(fileName)
if np.shape(dataCode)==():
return [0]
dataCode=np.flip(np.loadtxt(fileName),0)
return dataCode
def generate_x_y_data(indexCode,isTrain, seq_length,batch_size,ioput_dim):
openCode=ReadHistDataIndex(indexCode,'open')
if len(seq_length)==1:
inSeq=outSeq=seq_length
else:
inSeq,outSeq=seq_length[0],seq_length[1]
print openCode
'''
batch_x = []
batch_y = []
for _ in range(batch_size):
x_=np.empty((inSeq,ioput_dim))
y_=np.empty((outSeq,ioput_dim))
for nIO in xrange(ioput_dim):
rand = random.random() * 2 * math.pi
sig = np.sin(np.linspace(0.0 * math.pi + rand,
3.0 * math.pi + rand, inSeq+outSeq))
xTmp=sig[:inSeq]
yTmp=sig[inSeq:]
x_[:,nIO]=xTmp
y_[:,nIO]=yTmp
batch_x.append(x_)
batch_y.append(y_)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
# shape: (batch_size, seq_length, output_dim)
batch_x = np.array(batch_x).transpose((1, 0, 2))
batch_y = np.array(batch_y).transpose((1, 0, 2))
# shape: (seq_length, batch_size, output_dim)
return batch_x, batch_y
'''
seq_length=[inSeq,outSeq]
generate_x_y_data('603505',isTrain=True, seq_length=seq_length,batch_size=batch_size,ioput_dim=ioput_dim)
'''
def generate_x_y_data(isTrain, seq_length,batch_size,ioput_dim):
"""
Data for exercise 1.
returns: tuple (X, Y)
X is a sine and a cosine from 0.0*pi to 1.5*pi
Y is a sine and a cosine from 1.5*pi to 3.0*pi
Therefore, Y follows X. There is also a random offset
commonly applied to X an Y.
The returned arrays are of shape:
(seq_length, batch_size, output_dim)
Therefore: (10, batch_size, 2)
For this exercise, let's ignore the "isTrain"
argument and test on the same data.
"""
if len(seq_length)==1:
inSeq=outSeq=seq_length
else:
inSeq,outSeq=seq_length[0],seq_length[1]
batch_x = []
batch_y = []
for _ in range(batch_size):
x_=np.empty((inSeq,ioput_dim))
y_=np.empty((outSeq,ioput_dim))
for nIO in xrange(ioput_dim):
rand = random.random() * 2 * math.pi
sig = np.sin(np.linspace(0.0 * math.pi + rand,
3.0 * math.pi + rand, inSeq+outSeq))
xTmp=sig[:inSeq]
yTmp=sig[inSeq:]
x_[:,nIO]=xTmp
y_[:,nIO]=yTmp
batch_x.append(x_)
batch_y.append(y_)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
# shape: (batch_size, seq_length, output_dim)
batch_x = np.array(batch_x).transpose((1, 0, 2))
batch_y = np.array(batch_y).transpose((1, 0, 2))
# shape: (seq_length, batch_size, output_dim)
return batch_x, batch_y
'''
'''
def TensorFlowPredict(indexCode,inSeq,outSeq,batch_size,hidden_dim,ioput_dim):
#inSeq=10
#outSeq=3
seq_length = [inSeq,outSeq] ##############################
#batch_size = 50 # Low value used for live demo purposes - 100 and 1000 would be possible too, crank that up!
# Output dimension (e.g.: multiple signals at once, tied in time)
#ioput_dim=5
output_dim = input_dim =ioput_dim ###################################
#hidden_dim = 50 # Count of hidden neurons in the recurrent units.
# Number of stacked recurrent cells, on the neural depth axis.
layers_stacked_count = 2
# Optmizer:
learning_rate = 0.007 # Small lr helps not to diverge during training.
# How many times we perform a training step (therefore how many times we
# show a batch).
nb_iters = 100
lr_decay = 0.92 # default: 0.9 . Simulated annealing.
momentum = 0.5 # default: 0.0 . Momentum technique in weights update
lambda_l2_reg = 0.003 # L2 regularization of weights - avoids overfitting
try:
tf.nn.seq2seq = tf.contrib.legacy_seq2seq
tf.nn.rnn_cell = tf.contrib.rnn
tf.nn.rnn_cell.GRUCell = tf.contrib.rnn.GRUCell
print("TensorFlow's version : 1.0 (or more)")
except:
print("TensorFlow's version : 0.12")
tf.reset_default_graph()
# sess.close()
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
with tf.variable_scope('Seq2seq'):
# Encoder: inputs
enc_inp = [
tf.placeholder(tf.float32, shape=(
None, input_dim), name="inp_{}".format(t))
for t in range(inSeq)
]
# Decoder: expected outputs
expected_sparse_output = [
tf.placeholder(tf.float32, shape=(None, output_dim),
name="expected_sparse_output_".format(t))
for t in range(outSeq)
]
# Give a "GO" token to the decoder.
# You might want to revise what is the appended value "+ enc_inp[:-1]".
dec_inp = [tf.zeros_like(
enc_inp[0], dtype=np.float32, name="GO")] + enc_inp[:-1]
# Create a `layers_stacked_count` of stacked RNNs (GRU cells here).
cells = []
for i in range(layers_stacked_count):
with tf.variable_scope('RNN_{}'.format(i)):
cells.append(tf.nn.rnn_cell.GRUCell(hidden_dim))
# cells.append(tf.nn.rnn_cell.BasicLSTMCell(...))
cell = tf.nn.rnn_cell.MultiRNNCell(cells)
# For reshaping the input and output dimensions of the seq2seq RNN:
w_in = tf.Variable(tf.random_normal([input_dim, hidden_dim]))
b_in = tf.Variable(tf.random_normal([hidden_dim], mean=1.0))
w_out = tf.Variable(tf.random_normal([hidden_dim, output_dim]))
b_out = tf.Variable(tf.random_normal([output_dim]))
reshaped_inputs = [tf.nn.relu(tf.matmul(i, w_in) + b_in) for i in enc_inp]
# Here, the encoder and the decoder uses the same cell, HOWEVER,
# the weights aren't shared among the encoder and decoder, we have two
# sets of weights created under the hood according to that function's def.
dec_outputs, dec_memory = tf.nn.seq2seq.basic_rnn_seq2seq(
enc_inp,
dec_inp,
cell
)
output_scale_factor = tf.Variable(1.0, name="Output_ScaleFactor")
# Final outputs: with linear rescaling similar to batch norm,
# but without the "norm" part of batch normalization hehe.
reshaped_outputs = [output_scale_factor *
(tf.matmul(i, w_out) + b_out) for i in dec_outputs]
with tf.variable_scope('Loss'):
# L2 loss
output_loss = 0
for _y, _Y in zip(reshaped_outputs, expected_sparse_output):
output_loss += tf.reduce_mean(tf.nn.l2_loss(_y - _Y))
# L2 regularization (to avoid overfitting and to have a better
# generalization capacity)
reg_loss = 0
for tf_var in tf.trainable_variables():
if not ("Bias" in tf_var.name or "Output_" in tf_var.name):
reg_loss += tf.reduce_mean(tf.nn.l2_loss(tf_var))
loss = output_loss + lambda_l2_reg * reg_loss
with tf.variable_scope('Optimizer'):
optimizer = tf.train.RMSPropOptimizer(
learning_rate, decay=lr_decay, momentum=momentum)
train_op = optimizer.minimize(loss)
# Training
train_losses = []
test_losses = []
sess.run(tf.global_variables_initializer())
for t in range(nb_iters + 1):
#train_loss = train_batch(batch_size, seq_length, ioput_dim)
X, Y = generate_x_y_data(isTrain=True, seq_length=seq_length,batch_size=batch_size,ioput_dim=ioput_dim)
#print X.shape, Y.shape
feed_dict = {enc_inp[t]: X[t] for t in range(len(enc_inp))}
feed_dict.update({expected_sparse_output[t]: Y[
t] for t in range(len(expected_sparse_output))})
_, loss_t = sess.run([train_op, loss], feed_dict)
train_loss=loss_t
train_losses.append(train_loss)
if t % 10 == 0:
# Tester
#test_loss = test_batch(batch_size)
X, Y = generate_x_y_data(isTrain=False, seq_length=seq_length,batch_size=batch_size,ioput_dim=ioput_dim)
feed_dict = {enc_inp[t]: X[t] for t in range(len(enc_inp))}
feed_dict.update({expected_sparse_output[t]: Y[
t] for t in range(len(expected_sparse_output))})
loss_t = sess.run([loss], feed_dict)
test_loss= loss_t[0]
test_losses.append(test_loss)
print("Step {}/{}, train loss: {}, \tTEST loss: {}".format(t,
nb_iters, train_loss, test_loss))
print("Fin. train loss: {}, \tTEST loss: {}".format(train_loss, test_loss))
# Plot loss over time:
plt.figure(figsize=(12, 6))
plt.plot(
np.array(range(0, len(test_losses))) /
float(len(test_losses) - 1) * (len(train_losses) - 1),
np.log(test_losses),
label="Test loss"
)
plt.plot(
np.log(train_losses),
label="Train loss"
)
plt.title("Training errors over time (on a logarithmic scale)")
plt.xlabel('Iteration')
plt.ylabel('log(Loss)')
plt.legend(loc='best')
plt.show()
# Test ############################
nb_predictions = 5
print("Let's visualize {} predictions with our signals:".format(nb_predictions))
X, Y = generate_x_y_data(isTrain=False, seq_length=seq_length, batch_size=nb_predictions,ioput_dim=ioput_dim)
feed_dict = {enc_inp[t]: X[t] for t in range(inSeq)}
outputs = np.array(sess.run([reshaped_outputs], feed_dict)[0])
for j in range(nb_predictions):
plt.figure(figsize=(12, 3))
for k in range(output_dim):
past = X[:, j, k]
expected = Y[:, j, k]
pred = outputs[:, j, k]
label1 = "Seen (past) values" if k == 0 else "_nolegend_"
label2 = "True future values" if k == 0 else "_nolegend_"
label3 = "Predictions" if k == 0 else "_nolegend_"
plt.plot(range(len(past)), past, "o--b", label=label1)
plt.plot(range(len(past), len(expected) + len(past)),
expected, "x--b", label=label2)
plt.plot(range(len(past), len(pred) + len(past)),
pred, "o--y", label=label3)
plt.legend(loc='best')
plt.title("Predictions v.s. true values")
plt.show()
print("Reminder: the signal can contain many dimensions at once.")
print("In that case, signals have the same color.")
print("In reality, we could imagine multiple stock market symbols evolving,")
print("tied in time together and seen at once by the neural network.")
################################################################################
indexCode='0000'
TensorFlowPredict(indexCode,inSeq,outSeq,batch_size,hidden_dim,ioput_dim)
'''
| gpl-3.0 |
gpersistence/tstop | scripts/plots/plot_multi_persistence.py | 1 | 5663 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
import argparse
import threading
import multiprocessing
import itertools
import traceback
import os
import os.path
import errno
import sys
import time
import json
import numpy
# Used to guarantee to use at least Wx2.8
import wxversion
wxversion.ensureMinimal('2.8')
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import wx
from persistence.Datatypes.JSONObject import load_data, save_data
from persistence.Datatypes.PersistenceDiagrams import PersistenceDiagrams as PD
class CanvasFrame(wx.Frame):
def __init__(self, argv):
wx.Frame.__init__(self,None,-1,
'Segment Size',size=(550,350))
parser = argparse.ArgumentParser(description="utility to plot multiple persistence diagrams")
parser.add_argument('files', nargs="*")
self.args = vars(parser.parse_args(argv[1:]))
self.files = self.args['files']
self.persistences = []
for f in self.files :
pf_json = load_data(f, 'persistence', None, None, None)
if pf_json == None :
print "Could not load persistence file : %s" % (f,)
sys.exit(1)
self.persistences.append(PD.fromJSONDict(pf_json))
self.SetBackgroundColour(wx.NamedColour("WHITE"))
self.displays = []
self.sizer = wx.BoxSizer(wx.VERTICAL)
for f in self.files :
self.displays.append(dict([('figure', Figure())]))
self.displays[-1]['axes'] = self.displays[-1]['figure'].add_subplot(111)
self.displays[-1]['canvas'] = FigureCanvas(self, -1, self.displays[-1]['figure'])
self.sizer.Add(NavigationToolbar2Wx(self.displays[-1]['canvas']), 1, wx.LEFT | wx.TOP | wx.GROW)
self.sizer.Add(self.displays[-1]['canvas'], 8, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(self.sizer)
self.Fit()
self.background = self.displays[0]['axes'].figure.canvas.copy_from_bbox(self.displays[0]['axes'].bbox)
self.colors = ['red', 'yellow', 'orange', 'blue', 'green', 'violet', 'black']
self.Bind(wx.EVT_PAINT, self.OnPaint)
self.Bind(wx.EVT_KEY_UP, self.KeyEvent)
self.index = 0
self.point_Refresh()
def point_Refresh(self) :
lines = []
for i in range(len(self.files)) :
self.displays[i]['axes'].cla()
self.displays[i]['canvas'].restore_region(self.background)
p = self.persistences[i].diagrams[self.index].points
xs_0 = [point[0] for point in p if point[2]==0]
ys_0 = [point[1] for point in p if point[2]==0]
line = self.displays[i]['axes'].plot(xs_0, ys_0, 'bo')
lines.append(line)
xs_1 = [point[0] for point in p if point[2]==1]
ys_1 = [point[1] for point in p if point[2]==1]
line = self.displays[i]['axes'].plot(xs_1, ys_1, 'r+')
lines.append(line)
max_val = max([max(xs_0) if len(xs_0) != 0 else 0.0,\
max(ys_0) if len(ys_0) != 0 else 0.0,\
max(xs_1) if len(xs_1) != 0 else 0.0,
max(ys_1) if len(ys_1) != 0 else 0.0])
line = self.displays[i]['axes'].plot([0,max_val],[0,max_val],'-')
lines.append(line)
self.Fit()
def KeyEvent(self, event):
keycode = event.GetKeyCode()
if keycode == wx.WXK_LEFT :
self.index = (self.index - 1) % len(self.persistences[0].diagrams)
self.point_Refresh()
wx.PostEvent(self,wx.PaintEvent())
print self.index
elif keycode == wx.WXK_RIGHT :
self.index = (self.index + 1) % len(self.persistences[0].diagrams)
self.point_Refresh()
wx.PostEvent(self,wx.PaintEvent())
print self.index
else :
event.Skip()
def OnPaint(self, event):
paint_dc = wx.PaintDC(self)
for i in range(len(self.files)) :
self.displays[i]['canvas'].draw()
class App(wx.App):
def __init__(self, arg, argv):
self.argv = argv
wx.App.__init__(self,0)
def OnInit(self):
'Create the main window and insert the custom frame'
frame = CanvasFrame(self.argv)
frame.Show(True)
self.SetTopWindow(frame)
return True
def display(argv):
app = App(0, argv)
app.MainLoop()
def main(argv) :
current_dir = os.getcwd()
processes = []
try:
display_thread = \
multiprocessing.Process(target=display,
args=(argv,))
display_thread.start()
processes.append(display_thread)
display_thread.join()
except KeyboardInterrupt:
print "Caught cntl-c, shutting down..."
exit(0)
if __name__ == "__main__":
main(sys.argv)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.