repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
squared9/Robotics
|
Robotic_PR2_3D_Perception_Pick_And_Place/sensor_stick/scripts/object_recognition.py
|
2
|
6247
|
#!/usr/bin/env python
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
return get_normals_prox(cloud).cluster
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# Convert ROS msg to PCL data
cloud = ros_to_pcl(pcl_msg)
# Voxel Grid filter downsampling
vox = cloud.make_voxel_grid_filter()
LEAF_SIZE = 0.01
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# PassThrough filter
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# RANSAC plane segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.01
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# Extract inliers and outliers
cloud_table = cloud_filtered.extract(inliers, negative=False)
cloud_objects = cloud_filtered.extract(inliers, negative=True)
# Extract outliers
# outlier_filter = cloud_filtered.make_statistical_outlier_filter()
# Euclidean Clustering
white_cloud = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
ec = white_cloud.make_EuclideanClusterExtraction()
ec.set_ClusterTolerance(0.05)
ec.set_MinClusterSize(10)
ec.set_MaxClusterSize(5000)
ec.set_SearchMethod(tree)
cluster_indices = ec.Extract()
# Create Cluster-Mask Point Cloud to visualize each cluster separately
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])
])
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
# Convert PCL data to ROS messages
# ros_cloud_objects = pcl_to_ros(cloud_objects)
# ros_cloud_table = pcl_to_ros(cloud_table)
# ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# Publish ROS messages
# pcl_objects_pub.publish(ros_cloud_objects)
# pcl_table_pub.publish(ros_cloud_table)
# pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters!
detected_objects_labels = []
detected_objects = []
# Classify the clusters! (loop through each detected cluster one at a time)
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster from the extracted outliers (cloud_objects)
pcl_cluster = cloud_objects.extract(pts_list)
# convert the cluster from pcl to ROS using helper function
ros_cluster = pcl_to_ros(pcl_cluster)
# Extract histogram features
# complete this step just as is covered in capture_features.py
chists = compute_color_histograms(ros_cluster, using_hsv=True)
normals = get_normals(ros_cluster)
nhists = compute_normal_histograms(normals)
feature = np.concatenate((chists, nhists))
# Make the prediction, retrieve the label for the result
# and add it to detected_objects_labels list
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
print('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels), detected_objects_labels))
# Publish the list of detected objects
# This is the output you'll need to complete the upcoming project!
detected_objects_pub.publish(detected_objects)
if __name__ == '__main__':
# ROS node initialization
rospy.init_node('recognition', anonymous=True)
# Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
# Create Publishers
# Call them object_markers_pub and detected_objects_pub
# Have them publish to "/object_markers" and "/detected_objects" with
# Message Types "Marker" and "DetectedObjectsArray" , respectively
object_markers_pub = rospy.Publisher("/object_markers", Marker, queue_size=1)
detected_objects_pub = rospy.Publisher("/detected_objects", DetectedObjectsArray, queue_size=1)
# Create Subscribers
pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2, pcl_callback, queue_size=1)
# Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# Initialize color_list
get_color_list.color_list = []
# Spin while node is not shutdown
while not rospy.is_shutdown():
rospy.spin()
|
mit
|
datapythonista/pandas
|
pandas/tests/io/formats/test_console.py
|
9
|
2460
|
import locale
import pytest
from pandas._config import detect_console_encoding
class MockEncoding: # TODO(py27): replace with mock
"""
Used to add a side effect when accessing the 'encoding' property. If the
side effect is a str in nature, the value will be returned. Otherwise, the
side effect should be an exception that will be raised.
"""
def __init__(self, encoding):
super().__init__()
self.val = encoding
@property
def encoding(self):
return self.raise_or_return(self.val)
@staticmethod
def raise_or_return(val):
if isinstance(val, str):
return val
else:
raise val
@pytest.mark.parametrize("empty,filled", [["stdin", "stdout"], ["stdout", "stdin"]])
def test_detect_console_encoding_from_stdout_stdin(monkeypatch, empty, filled):
# Ensures that when sys.stdout.encoding or sys.stdin.encoding is used when
# they have values filled.
# GH 21552
with monkeypatch.context() as context:
context.setattr(f"sys.{empty}", MockEncoding(""))
context.setattr(f"sys.{filled}", MockEncoding(filled))
assert detect_console_encoding() == filled
@pytest.mark.parametrize("encoding", [AttributeError, IOError, "ascii"])
def test_detect_console_encoding_fallback_to_locale(monkeypatch, encoding):
# GH 21552
with monkeypatch.context() as context:
context.setattr("locale.getpreferredencoding", lambda: "foo")
context.setattr("sys.stdout", MockEncoding(encoding))
assert detect_console_encoding() == "foo"
@pytest.mark.parametrize(
"std,locale",
[
["ascii", "ascii"],
["ascii", locale.Error],
[AttributeError, "ascii"],
[AttributeError, locale.Error],
[IOError, "ascii"],
[IOError, locale.Error],
],
)
def test_detect_console_encoding_fallback_to_default(monkeypatch, std, locale):
# When both the stdout/stdin encoding and locale preferred encoding checks
# fail (or return 'ascii', we should default to the sys default encoding.
# GH 21552
with monkeypatch.context() as context:
context.setattr(
"locale.getpreferredencoding", lambda: MockEncoding.raise_or_return(locale)
)
context.setattr("sys.stdout", MockEncoding(std))
context.setattr("sys.getdefaultencoding", lambda: "sysDefaultEncoding")
assert detect_console_encoding() == "sysDefaultEncoding"
|
bsd-3-clause
|
joernhees/scikit-learn
|
sklearn/linear_model/tests/test_theil_sen.py
|
55
|
9939
|
"""
Testing for Theil-Sen module (sklearn.linear_model.theil_sen)
"""
# Author: Florian Wilhelm <[email protected]>
# License: BSD 3 clause
from __future__ import division, print_function, absolute_import
import os
import sys
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_array_equal, assert_array_less
from numpy.testing import assert_array_almost_equal, assert_warns
from scipy.linalg import norm
from scipy.optimize import fmin_bfgs
from sklearn.exceptions import ConvergenceWarning
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model.theil_sen import _spatial_median, _breakdown_point
from sklearn.linear_model.theil_sen import _modified_weiszfeld_step
from sklearn.utils.testing import (
assert_almost_equal, assert_greater, assert_less, raises,
)
@contextmanager
def no_stdout_stderr():
old_stdout = sys.stdout
old_stderr = sys.stderr
with open(os.devnull, 'w') as devnull:
sys.stdout = devnull
sys.stderr = devnull
yield
devnull.flush()
sys.stdout = old_stdout
sys.stderr = old_stderr
def gen_toy_problem_1d(intercept=True):
random_state = np.random.RandomState(0)
# Linear model y = 3*x + N(2, 0.1**2)
w = 3.
if intercept:
c = 2.
n_samples = 50
else:
c = 0.1
n_samples = 100
x = random_state.normal(size=n_samples)
noise = 0.1 * random_state.normal(size=n_samples)
y = w * x + c + noise
# Add some outliers
if intercept:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[33], y[33] = (2.5, 1)
x[49], y[49] = (2.1, 2)
else:
x[42], y[42] = (-2, 4)
x[43], y[43] = (-2.5, 8)
x[53], y[53] = (2.5, 1)
x[60], y[60] = (2.1, 2)
x[72], y[72] = (1.8, -7)
return x[:, np.newaxis], y, w, c
def gen_toy_problem_2d():
random_state = np.random.RandomState(0)
n_samples = 100
# Linear model y = 5*x_1 + 10*x_2 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 2))
w = np.array([5., 10.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def gen_toy_problem_4d():
random_state = np.random.RandomState(0)
n_samples = 10000
# Linear model y = 5*x_1 + 10*x_2 + 42*x_3 + 7*x_4 + N(1, 0.1**2)
X = random_state.normal(size=(n_samples, 4))
w = np.array([5., 10., 42., 7.])
c = 1.
noise = 0.1 * random_state.normal(size=n_samples)
y = np.dot(X, w) + c + noise
# Add some outliers
n_outliers = n_samples // 10
ix = random_state.randint(0, n_samples, size=n_outliers)
y[ix] = 50 * random_state.normal(size=n_outliers)
return X, y, w, c
def test_modweiszfeld_step_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
# Check startvalue is element of X and solution
median = 2.
new_y = _modified_weiszfeld_step(X, median)
assert_array_almost_equal(new_y, median)
# Check startvalue is not the solution
y = 2.5
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check startvalue is not the solution but element of X
y = 3.
new_y = _modified_weiszfeld_step(X, y)
assert_array_less(median, new_y)
assert_array_less(new_y, y)
# Check that a single vector is identity
X = np.array([1., 2., 3.]).reshape(1, 3)
y = X[0, ]
new_y = _modified_weiszfeld_step(X, y)
assert_array_equal(y, new_y)
def test_modweiszfeld_step_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
y = np.array([0.5, 0.5])
# Check first two iterations
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, np.array([1 / 3, 2 / 3]))
new_y = _modified_weiszfeld_step(X, new_y)
assert_array_almost_equal(new_y, np.array([0.2792408, 0.7207592]))
# Check fix point
y = np.array([0.21132505, 0.78867497])
new_y = _modified_weiszfeld_step(X, y)
assert_array_almost_equal(new_y, y)
def test_spatial_median_1d():
X = np.array([1., 2., 3.]).reshape(3, 1)
true_median = 2.
_, median = _spatial_median(X)
assert_array_almost_equal(median, true_median)
# Test larger problem and for exact solution in 1d case
random_state = np.random.RandomState(0)
X = random_state.randint(100, size=(1000, 1))
true_median = np.median(X.ravel())
_, median = _spatial_median(X)
assert_array_equal(median, true_median)
def test_spatial_median_2d():
X = np.array([0., 0., 1., 1., 0., 1.]).reshape(3, 2)
_, median = _spatial_median(X, max_iter=100, tol=1.e-6)
def cost_func(y):
dists = np.array([norm(x - y) for x in X])
return np.sum(dists)
# Check if median is solution of the Fermat-Weber location problem
fermat_weber = fmin_bfgs(cost_func, median, disp=False)
assert_array_almost_equal(median, fermat_weber)
# Check when maximum iteration is exceeded a warning is emitted
assert_warns(ConvergenceWarning, _spatial_median, X, max_iter=30, tol=0.)
def test_theil_sen_1d():
X, y, w, c = gen_toy_problem_1d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(np.abs(lstq.coef_ - w), 0.9)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_theil_sen_1d_no_intercept():
X, y, w, c = gen_toy_problem_1d(intercept=False)
# Check that Least Squares fails
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_greater(np.abs(lstq.coef_ - w - c), 0.5)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w + c, 1)
assert_almost_equal(theil_sen.intercept_, 0.)
def test_theil_sen_2d():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(max_subpopulation=1e3,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_calc_breakdown_point():
bp = _breakdown_point(1e10, 2)
assert_less(np.abs(bp - 1 + 1 / (np.sqrt(2))), 1.e-6)
@raises(ValueError)
def test_checksubparams_negative_subpopulation():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(max_subpopulation=-1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_few_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=1, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_too_many_subsamples():
X, y, w, c = gen_toy_problem_1d()
TheilSenRegressor(n_subsamples=101, random_state=0).fit(X, y)
@raises(ValueError)
def test_checksubparams_n_subsamples_if_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
TheilSenRegressor(n_subsamples=9, random_state=0).fit(X, y)
def test_subpopulation():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(max_subpopulation=250,
random_state=0).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_subsamples():
X, y, w, c = gen_toy_problem_4d()
theil_sen = TheilSenRegressor(n_subsamples=X.shape[0],
random_state=0).fit(X, y)
lstq = LinearRegression().fit(X, y)
# Check for exact the same results as Least Squares
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 9)
def test_verbosity():
X, y, w, c = gen_toy_problem_1d()
# Check that Theil-Sen can be verbose
with no_stdout_stderr():
TheilSenRegressor(verbose=True, random_state=0).fit(X, y)
TheilSenRegressor(verbose=True,
max_subpopulation=10,
random_state=0).fit(X, y)
def test_theil_sen_parallel():
X, y, w, c = gen_toy_problem_2d()
# Check that Least Squares fails
lstq = LinearRegression().fit(X, y)
assert_greater(norm(lstq.coef_ - w), 1.0)
# Check that Theil-Sen works
theil_sen = TheilSenRegressor(n_jobs=-1,
random_state=0,
max_subpopulation=2e3).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, w, 1)
assert_array_almost_equal(theil_sen.intercept_, c, 1)
def test_less_samples_than_features():
random_state = np.random.RandomState(0)
n_samples, n_features = 10, 20
X = random_state.normal(size=(n_samples, n_features))
y = random_state.normal(size=n_samples)
# Check that Theil-Sen falls back to Least Squares if fit_intercept=False
theil_sen = TheilSenRegressor(fit_intercept=False,
random_state=0).fit(X, y)
lstq = LinearRegression(fit_intercept=False).fit(X, y)
assert_array_almost_equal(theil_sen.coef_, lstq.coef_, 12)
# Check fit_intercept=True case. This will not be equal to the Least
# Squares solution since the intercept is calculated differently.
theil_sen = TheilSenRegressor(fit_intercept=True, random_state=0).fit(X, y)
y_pred = theil_sen.predict(X)
assert_array_almost_equal(y_pred, y, 12)
|
bsd-3-clause
|
PhenixI/machine-learning
|
3_unsupervised_clustering/2_hierarchical tree/hierarchical_clustering/hierarchical_clustering.py
|
1
|
1637
|
#generate data
import pandas as pd
import numpy as np
np.random.seed(123)
variables = ['X','Y','Z']
labels = ['ID_0','ID_1','ID_2','ID_3','ID_4']
X = np.random.random_sample([5,3])*10
df = pd.DataFrame(X,columns = variables,index = labels)
df
#calculate the distance matrix
from scipy.spatial.distance import pdist,squareform
row_dist = pd.DataFrame(squareform(pdist(df,metric='euclidean')),columns = labels,index = labels)
row_dist
#apply complete linkage agglomeration to clusters
from scipy.cluster.hierarchy import linkage
row_clusters = linkage(pdist(df,metric = 'euclidean'),
method='complete')
pd.DataFrame(row_clusters,
columns = ['row label 1','row label 2','distance','no. of items in clust.'],
index = ['cluster %d' % (i+1) for i in range(row_clusters.shape[0])])
#visulaize the results in the form of a dendrogram
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram
row_dendr = dendrogram(row_clusters,labels = labels)
plt.tight_layout()
plt.ylabel('Eucliden distance')
plt.show()
#attching dendrograms to a heat map
#1
fig = plt.figure(figsize=(8,8))
axd = fig.add_axes([0.09,0.1,0.2,0.6])
row_dendr = dendrogram(row_clusters, orientation='left')
#2
df_rowclust = df.ix[row_dendr['leaves'][::-1]]
#3
axm = fig.add_axes([0.23,0.1,0.6,0.6])
cax = axm.matshow(df_rowclust,interpolation='nearest', cmap='hot_r')
#4
axd.set_xticks([])
axd.set_yticks([])
for i in axd.spines.values():
i.set_visible(False)
fig.colorbar(cax)
axm.set_xticklabels([''] + list(df_rowclust.columns))
axm.set_yticklabels([''] + list(df_rowclust.index))
plt.show()
|
gpl-2.0
|
marwin-ko/projects
|
aerialintel-data_science_challenge/sequential_backward_selection.py
|
1
|
1880
|
import numpy as np
from sklearn.base import clone
from itertools import combinations
from sklearn.cross_validation import train_test_split, cross_val_score
class SBS():
def __init__(self, model, k_features,scoring_metric=cross_val_score, test_size=0.25, random_state=42):
self.scoring_metric = scoring_metric
self.model = clone(model)
self.k_features = k_features
self.test_size = test_size
self.random_state = random_state
def fit(self,X,y):
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=self.test_size,random_state=self.random_state)
dim = X_train.shape[1]
self.indices_ = tuple(range(dim))
self.subsets_ = [self.indices_]
score = self._calc_score(X_train,y_train,X_test,y_test,self.indices_)
self.scores_ = [score]
while dim > self.k_features:
scores = []
subsets = []
for p in combinations(self.indices_,r=dim-1):
score = self._calc_score(X_train,y_train,X_test,y_test,p)
scores.append(score)
subsets.append(p)
best = np.argmax(scores)
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
dim -= 1
self.scores_.append(scores[best])
self.k_score_ = self.scores_[-1]
return self
def transform(self,X):
return X[:,self.indices_]
def _calc_score(self,X_train,y_train,X_test,y_test,indices):
results = self.model.fit(X_train[:,indices],y_train)
score = results.score(X_test[:,indices],y_test)
# y_pred = self.model.predict(X_test[:,indices])
# score = self.scoring_metric(y_test,y_pred)
# score = np.mean(self.scoring_metric(self.model,X_train,y_train,scoring='r2'))
return score
|
mit
|
lscheinkman/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/text.py
|
69
|
55366
|
"""
Classes for including text in a figure.
"""
from __future__ import division
import math
import numpy as np
from matplotlib import cbook
from matplotlib import rcParams
import artist
from artist import Artist
from cbook import is_string_like, maxdict
from font_manager import FontProperties
from patches import bbox_artist, YAArrow, FancyBboxPatch, \
FancyArrowPatch, Rectangle
import transforms as mtransforms
from transforms import Affine2D, Bbox
from lines import Line2D
import matplotlib.nxutils as nxutils
def _process_text_args(override, fontdict=None, **kwargs):
"Return an override dict. See :func:`~pyplot.text' docstring for info"
if fontdict is not None:
override.update(fontdict)
override.update(kwargs)
return override
# Extracted from Text's method to serve as a function
def get_rotation(rotation):
"""
Return the text angle as float.
*rotation* may be 'horizontal', 'vertical', or a numeric value in degrees.
"""
if rotation in ('horizontal', None):
angle = 0.
elif rotation == 'vertical':
angle = 90.
else:
angle = float(rotation)
return angle%360
# these are not available for the object inspector until after the
# class is build so we define an initial set here for the init
# function and they will be overridden after object defn
artist.kwdocd['Text'] = """
========================== =========================================================================
Property Value
========================== =========================================================================
alpha float
animated [True | False]
backgroundcolor any matplotlib color
bbox rectangle prop dict plus key 'pad' which is a pad in points
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
color any matplotlib color
family [ 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
figure a matplotlib.figure.Figure instance
fontproperties a matplotlib.font_manager.FontProperties instance
horizontalalignment or ha [ 'center' | 'right' | 'left' ]
label any string
linespacing float
lod [True | False]
multialignment ['left' | 'right' | 'center' ]
name or fontname string eg, ['Sans' | 'Courier' | 'Helvetica' ...]
position (x,y)
rotation [ angle in degrees 'vertical' | 'horizontal'
size or fontsize [ size in points | relative size eg 'smaller', 'x-large' ]
style or fontstyle [ 'normal' | 'italic' | 'oblique']
text string
transform a matplotlib.transform transformation instance
variant [ 'normal' | 'small-caps' ]
verticalalignment or va [ 'center' | 'top' | 'bottom' | 'baseline' ]
visible [True | False]
weight or fontweight [ 'normal' | 'bold' | 'heavy' | 'light' | 'ultrabold' | 'ultralight']
x float
y float
zorder any number
========================== =========================================================================
"""
# TODO : This function may move into the Text class as a method. As a
# matter of fact, The information from the _get_textbox function
# should be available during the Text._get_layout() call, which is
# called within the _get_textbox. So, it would better to move this
# function as a method with some refactoring of _get_layout method.
def _get_textbox(text, renderer):
"""
Calculate the bounding box of the text. Unlike
:meth:`matplotlib.text.Text.get_extents` method, The bbox size of
the text before the rotation is calculated.
"""
projected_xs = []
projected_ys = []
theta = text.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(-theta)
for t, wh, x, y in text._get_layout(renderer)[1]:
w, h = wh
xt1, yt1 = tr.transform_point((x, y))
xt2, yt2 = xt1+w, yt1+h
projected_xs.extend([xt1, xt2])
projected_ys.extend([yt1, yt2])
xt_box, yt_box = min(projected_xs), min(projected_ys)
w_box, h_box = max(projected_xs) - xt_box, max(projected_ys) - yt_box
tr = mtransforms.Affine2D().rotate(theta)
x_box, y_box = tr.transform_point((xt_box, yt_box))
return x_box, y_box, w_box, h_box
class Text(Artist):
"""
Handle storing and drawing of text in window or data coordinates.
"""
zorder = 3
def __str__(self):
return "Text(%g,%g,%s)"%(self._y,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='bottom',
horizontalalignment='left',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
**kwargs
):
"""
Create a :class:`~matplotlib.text.Text` instance at *x*, *y*
with string *text*.
Valid kwargs are
%(Text)s
"""
Artist.__init__(self)
self.cached = maxdict(5)
self._x, self._y = x, y
if color is None: color = rcParams['text.color']
if fontproperties is None: fontproperties=FontProperties()
elif is_string_like(fontproperties): fontproperties=FontProperties(fontproperties)
self.set_text(text)
self.set_color(color)
self._verticalalignment = verticalalignment
self._horizontalalignment = horizontalalignment
self._multialignment = multialignment
self._rotation = rotation
self._fontproperties = fontproperties
self._bbox = None
self._bbox_patch = None # a FancyBboxPatch instance
self._renderer = None
if linespacing is None:
linespacing = 1.2 # Maybe use rcParam later.
self._linespacing = linespacing
self.update(kwargs)
#self.set_bbox(dict(pad=0))
def contains(self,mouseevent):
"""Test whether the mouse event occurred in the patch.
In the case of text, a hit is true anywhere in the
axis-aligned bounding-box containing the text.
Returns True or False.
"""
if callable(self._contains): return self._contains(self,mouseevent)
if not self.get_visible() or self._renderer is None:
return False,{}
l,b,w,h = self.get_window_extent().bounds
r = l+w
t = b+h
xyverts = (l,b), (l, t), (r, t), (r, b)
x, y = mouseevent.x, mouseevent.y
inside = nxutils.pnpoly(x, y, xyverts)
return inside,{}
def _get_xy_display(self):
'get the (possibly unit converted) transformed x, y in display coords'
x, y = self.get_position()
return self.get_transform().transform_point((x,y))
def _get_multialignment(self):
if self._multialignment is not None: return self._multialignment
else: return self._horizontalalignment
def get_rotation(self):
'return the text angle as float in degrees'
return get_rotation(self._rotation) # string_or_number -> number
def update_from(self, other):
'Copy properties from other to self'
Artist.update_from(self, other)
self._color = other._color
self._multialignment = other._multialignment
self._verticalalignment = other._verticalalignment
self._horizontalalignment = other._horizontalalignment
self._fontproperties = other._fontproperties.copy()
self._rotation = other._rotation
self._picker = other._picker
self._linespacing = other._linespacing
def _get_layout(self, renderer):
key = self.get_prop_tup()
if key in self.cached: return self.cached[key]
horizLayout = []
thisx, thisy = 0.0, 0.0
xmin, ymin = 0.0, 0.0
width, height = 0.0, 0.0
lines = self._text.split('\n')
whs = np.zeros((len(lines), 2))
horizLayout = np.zeros((len(lines), 4))
# Find full vertical extent of font,
# including ascenders and descenders:
tmp, heightt, bl = renderer.get_text_width_height_descent(
'lp', self._fontproperties, ismath=False)
offsety = heightt * self._linespacing
baseline = None
for i, line in enumerate(lines):
clean_line, ismath = self.is_math_text(line)
w, h, d = renderer.get_text_width_height_descent(
clean_line, self._fontproperties, ismath=ismath)
if baseline is None:
baseline = h - d
whs[i] = w, h
horizLayout[i] = thisx, thisy, w, h
thisy -= offsety
width = max(width, w)
ymin = horizLayout[-1][1]
ymax = horizLayout[0][1] + horizLayout[0][3]
height = ymax-ymin
xmax = xmin + width
# get the rotation matrix
M = Affine2D().rotate_deg(self.get_rotation())
offsetLayout = np.zeros((len(lines), 2))
offsetLayout[:] = horizLayout[:, 0:2]
# now offset the individual text lines within the box
if len(lines)>1: # do the multiline aligment
malign = self._get_multialignment()
if malign == 'center':
offsetLayout[:, 0] += width/2.0 - horizLayout[:, 2] / 2.0
elif malign == 'right':
offsetLayout[:, 0] += width - horizLayout[:, 2]
# the corners of the unrotated bounding box
cornersHoriz = np.array(
[(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)],
np.float_)
# now rotate the bbox
cornersRotated = M.transform(cornersHoriz)
txs = cornersRotated[:, 0]
tys = cornersRotated[:, 1]
# compute the bounds of the rotated box
xmin, xmax = txs.min(), txs.max()
ymin, ymax = tys.min(), tys.max()
width = xmax - xmin
height = ymax - ymin
# Now move the box to the targe position offset the display bbox by alignment
halign = self._horizontalalignment
valign = self._verticalalignment
# compute the text location in display coords and the offsets
# necessary to align the bbox with that location
if halign=='center': offsetx = (xmin + width/2.0)
elif halign=='right': offsetx = (xmin + width)
else: offsetx = xmin
if valign=='center': offsety = (ymin + height/2.0)
elif valign=='top': offsety = (ymin + height)
elif valign=='baseline': offsety = (ymin + height) - baseline
else: offsety = ymin
xmin -= offsetx
ymin -= offsety
bbox = Bbox.from_bounds(xmin, ymin, width, height)
# now rotate the positions around the first x,y position
xys = M.transform(offsetLayout)
xys -= (offsetx, offsety)
xs, ys = xys[:, 0], xys[:, 1]
ret = bbox, zip(lines, whs, xs, ys)
self.cached[key] = ret
return ret
def set_bbox(self, rectprops):
"""
Draw a bounding box around self. rectprops are any settable
properties for a rectangle, eg facecolor='red', alpha=0.5.
t.set_bbox(dict(facecolor='red', alpha=0.5))
If rectprops has "boxstyle" key. A FancyBboxPatch
is initialized with rectprops and will be drawn. The mutation
scale of the FancyBboxPath is set to the fontsize.
ACCEPTS: rectangle prop dict
"""
# The self._bbox_patch object is created only if rectprops has
# boxstyle key. Otherwise, self._bbox will be set to the
# rectprops and the bbox will be drawn using bbox_artist
# function. This is to keep the backward compatibility.
if rectprops is not None and "boxstyle" in rectprops:
props = rectprops.copy()
boxstyle = props.pop("boxstyle")
bbox_transmuter = props.pop("bbox_transmuter", None)
self._bbox_patch = FancyBboxPatch((0., 0.),
1., 1.,
boxstyle=boxstyle,
bbox_transmuter=bbox_transmuter,
transform=mtransforms.IdentityTransform(),
**props)
self._bbox = None
else:
self._bbox_patch = None
self._bbox = rectprops
def get_bbox_patch(self):
"""
Return the bbox Patch object. Returns None if the the
FancyBboxPatch is not made.
"""
return self._bbox_patch
def update_bbox_position_size(self, renderer):
"""
Update the location and the size of the bbox. This method
should be used when the position and size of the bbox needs to
be updated before actually drawing the bbox.
"""
# For arrow_patch, use textbox as patchA by default.
if not isinstance(self.arrow_patch, FancyArrowPatch):
return
if self._bbox_patch:
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
#self._bbox_patch.draw(renderer)
else:
props = self._bbox
if props is None: props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = self.get_window_extent(renderer)
l,b,w,h = bbox.bounds
l-=pad/2.
b-=pad/2.
w+=pad
h+=pad
r = Rectangle(xy=(l,b),
width=w,
height=h,
)
r.set_transform(mtransforms.IdentityTransform())
r.set_clip_on( False )
r.update(props)
self.arrow_patch.set_patchA(r)
def _draw_bbox(self, renderer, posx, posy):
""" Update the location and the size of the bbox
(FancyBoxPatch), and draw
"""
x_box, y_box, w_box, h_box = _get_textbox(self, renderer)
self._bbox_patch.set_bounds(0., 0.,
w_box, h_box)
theta = self.get_rotation()/180.*math.pi
tr = mtransforms.Affine2D().rotate(theta)
tr = tr.translate(posx+x_box, posy+y_box)
self._bbox_patch.set_transform(tr)
fontsize_in_pixel = renderer.points_to_pixels(self.get_size())
self._bbox_patch.set_mutation_scale(fontsize_in_pixel)
self._bbox_patch.draw(renderer)
def draw(self, renderer):
"""
Draws the :class:`Text` object to the given *renderer*.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible(): return
if self._text=='': return
bbox, info = self._get_layout(renderer)
trans = self.get_transform()
# don't use self.get_position here, which refers to text position
# in Text, and dash position in TextWithDash:
posx = float(self.convert_xunits(self._x))
posy = float(self.convert_yunits(self._y))
posx, posy = trans.transform_point((posx, posy))
canvasw, canvash = renderer.get_canvas_width_height()
# draw the FancyBboxPatch
if self._bbox_patch:
self._draw_bbox(renderer, posx, posy)
gc = renderer.new_gc()
gc.set_foreground(self._color)
gc.set_alpha(self._alpha)
gc.set_url(self._url)
if self.get_clip_on():
gc.set_clip_rectangle(self.clipbox)
if self._bbox:
bbox_artist(self, renderer, self._bbox)
angle = self.get_rotation()
if rcParams['text.usetex']:
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_tex(gc, x, y, clean_line,
self._fontproperties, angle)
return
for line, wh, x, y in info:
x = x + posx
y = y + posy
if renderer.flipy():
y = canvash-y
clean_line, ismath = self.is_math_text(line)
renderer.draw_text(gc, x, y, clean_line,
self._fontproperties, angle,
ismath=ismath)
def get_color(self):
"Return the color of the text"
return self._color
def get_fontproperties(self):
"Return the :class:`~font_manager.FontProperties` object"
return self._fontproperties
def get_font_properties(self):
'alias for get_fontproperties'
return self.get_fontproperties
def get_family(self):
"Return the list of font families used for font lookup"
return self._fontproperties.get_family()
def get_fontfamily(self):
'alias for get_family'
return self.get_family()
def get_name(self):
"Return the font name as string"
return self._fontproperties.get_name()
def get_style(self):
"Return the font style as string"
return self._fontproperties.get_style()
def get_size(self):
"Return the font size as integer"
return self._fontproperties.get_size_in_points()
def get_variant(self):
"Return the font variant as a string"
return self._fontproperties.get_variant()
def get_fontvariant(self):
'alias for get_variant'
return self.get_variant()
def get_weight(self):
"Get the font weight as string or number"
return self._fontproperties.get_weight()
def get_fontname(self):
'alias for get_name'
return self.get_name()
def get_fontstyle(self):
'alias for get_style'
return self.get_style()
def get_fontsize(self):
'alias for get_size'
return self.get_size()
def get_fontweight(self):
'alias for get_weight'
return self.get_weight()
def get_stretch(self):
'Get the font stretch as a string or number'
return self._fontproperties.get_stretch()
def get_fontstretch(self):
'alias for get_stretch'
return self.get_stretch()
def get_ha(self):
'alias for get_horizontalalignment'
return self.get_horizontalalignment()
def get_horizontalalignment(self):
"""
Return the horizontal alignment as string. Will be one of
'left', 'center' or 'right'.
"""
return self._horizontalalignment
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._x))
y = float(self.convert_yunits(self._y))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
x, y = self.get_position()
return (x, y, self._text, self._color,
self._verticalalignment, self._horizontalalignment,
hash(self._fontproperties), self._rotation,
self.figure.dpi, id(self._renderer),
)
def get_text(self):
"Get the text as string"
return self._text
def get_va(self):
'alias for :meth:`getverticalalignment`'
return self.get_verticalalignment()
def get_verticalalignment(self):
"""
Return the vertical alignment as string. Will be one of
'top', 'center', 'bottom' or 'baseline'.
"""
return self._verticalalignment
def get_window_extent(self, renderer=None, dpi=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
*dpi* defaults to self.figure.dpi; the renderer dpi is
irrelevant. For the web application, if figure.dpi is not
the value used when saving the figure, then the value that
was used must be specified as the *dpi* argument.
'''
#return _unit_box
if not self.get_visible(): return Bbox.unit()
if dpi is not None:
dpi_orig = self.figure.dpi
self.figure.dpi = dpi
if self._text == '':
tx, ty = self._get_xy_display()
return Bbox.from_bounds(tx,ty,0,0)
if renderer is not None:
self._renderer = renderer
if self._renderer is None:
raise RuntimeError('Cannot get window extent w/o renderer')
bbox, info = self._get_layout(self._renderer)
x, y = self.get_position()
x, y = self.get_transform().transform_point((x, y))
bbox = bbox.translated(x, y)
if dpi is not None:
self.figure.dpi = dpi_orig
return bbox
def set_backgroundcolor(self, color):
"""
Set the background color of the text by updating the bbox.
.. seealso::
:meth:`set_bbox`
ACCEPTS: any matplotlib color
"""
if self._bbox is None:
self._bbox = dict(facecolor=color, edgecolor=color)
else:
self._bbox.update(dict(facecolor=color))
def set_color(self, color):
"""
Set the foreground color of the text
ACCEPTS: any matplotlib color
"""
# Make sure it is hashable, or get_prop_tup will fail.
try:
hash(color)
except TypeError:
color = tuple(color)
self._color = color
def set_ha(self, align):
'alias for set_horizontalalignment'
self.set_horizontalalignment(align)
def set_horizontalalignment(self, align):
"""
Set the horizontal alignment to one of
ACCEPTS: [ 'center' | 'right' | 'left' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._horizontalalignment = align
def set_ma(self, align):
'alias for set_verticalalignment'
self.set_multialignment(align)
def set_multialignment(self, align):
"""
Set the alignment for multiple lines layout. The layout of the
bounding box of all the lines is determined bu the horizontalalignment
and verticalalignment properties, but the multiline text within that
box can be
ACCEPTS: ['left' | 'right' | 'center' ]
"""
legal = ('center', 'right', 'left')
if align not in legal:
raise ValueError('Horizontal alignment must be one of %s' % str(legal))
self._multialignment = align
def set_linespacing(self, spacing):
"""
Set the line spacing as a multiple of the font size.
Default is 1.2.
ACCEPTS: float (multiple of font size)
"""
self._linespacing = spacing
def set_family(self, fontname):
"""
Set the font family. May be either a single string, or a list
of strings in decreasing priority. Each string may be either
a real font name or a generic font class name. If the latter,
the specific font names will be looked up in the
:file:`matplotlibrc` file.
ACCEPTS: [ FONTNAME | 'serif' | 'sans-serif' | 'cursive' | 'fantasy' | 'monospace' ]
"""
self._fontproperties.set_family(fontname)
def set_variant(self, variant):
"""
Set the font variant, either 'normal' or 'small-caps'.
ACCEPTS: [ 'normal' | 'small-caps' ]
"""
self._fontproperties.set_variant(variant)
def set_fontvariant(self, variant):
'alias for set_variant'
return self.set_variant(variant)
def set_name(self, fontname):
"""alias for set_family"""
return self.set_family(fontname)
def set_fontname(self, fontname):
"""alias for set_family"""
self.set_family(fontname)
def set_style(self, fontstyle):
"""
Set the font style.
ACCEPTS: [ 'normal' | 'italic' | 'oblique']
"""
self._fontproperties.set_style(fontstyle)
def set_fontstyle(self, fontstyle):
'alias for set_style'
return self.set_style(fontstyle)
def set_size(self, fontsize):
"""
Set the font size. May be either a size string, relative to
the default font size, or an absolute font size in points.
ACCEPTS: [ size in points | 'xx-small' | 'x-small' | 'small' | 'medium' | 'large' | 'x-large' | 'xx-large' ]
"""
self._fontproperties.set_size(fontsize)
def set_fontsize(self, fontsize):
'alias for set_size'
return self.set_size(fontsize)
def set_weight(self, weight):
"""
Set the font weight.
ACCEPTS: [ a numeric value in range 0-1000 | 'ultralight' | 'light' | 'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' | 'bold' | 'heavy' | 'extra bold' | 'black' ]
"""
self._fontproperties.set_weight(weight)
def set_fontweight(self, weight):
'alias for set_weight'
return self.set_weight(weight)
def set_stretch(self, stretch):
"""
Set the font stretch (horizontal condensation or expansion).
ACCEPTS: [ a numeric value in range 0-1000 | 'ultra-condensed' | 'extra-condensed' | 'condensed' | 'semi-condensed' | 'normal' | 'semi-expanded' | 'expanded' | 'extra-expanded' | 'ultra-expanded' ]
"""
self._fontproperties.set_stretch(stretch)
def set_fontstretch(self, stretch):
'alias for set_stretch'
return self.set_stretch(stretch)
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the text
ACCEPTS: (x,y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the text
ACCEPTS: float
"""
self._x = x
def set_y(self, y):
"""
Set the *y* position of the text
ACCEPTS: float
"""
self._y = y
def set_rotation(self, s):
"""
Set the rotation of the text
ACCEPTS: [ angle in degrees | 'vertical' | 'horizontal' ]
"""
self._rotation = s
def set_va(self, align):
'alias for set_verticalalignment'
self.set_verticalalignment(align)
def set_verticalalignment(self, align):
"""
Set the vertical alignment
ACCEPTS: [ 'center' | 'top' | 'bottom' | 'baseline' ]
"""
legal = ('top', 'bottom', 'center', 'baseline')
if align not in legal:
raise ValueError('Vertical alignment must be one of %s' % str(legal))
self._verticalalignment = align
def set_text(self, s):
"""
Set the text string *s*
It may contain newlines (``\\n``) or math in LaTeX syntax.
ACCEPTS: string or anything printable with '%s' conversion.
"""
self._text = '%s' % (s,)
def is_math_text(self, s):
"""
Returns True if the given string *s* contains any mathtext.
"""
# Did we find an even number of non-escaped dollar signs?
# If so, treat is as math text.
dollar_count = s.count(r'$') - s.count(r'\$')
even_dollars = (dollar_count > 0 and dollar_count % 2 == 0)
if rcParams['text.usetex']:
return s, 'TeX'
if even_dollars:
return s, True
else:
return s.replace(r'\$', '$'), False
def set_fontproperties(self, fp):
"""
Set the font properties that control the text. *fp* must be a
:class:`matplotlib.font_manager.FontProperties` object.
ACCEPTS: a :class:`matplotlib.font_manager.FontProperties` instance
"""
if is_string_like(fp):
fp = FontProperties(fp)
self._fontproperties = fp.copy()
def set_font_properties(self, fp):
'alias for set_fontproperties'
self.set_fontproperties(fp)
artist.kwdocd['Text'] = artist.kwdoc(Text)
Text.__init__.im_func.__doc__ = cbook.dedent(Text.__init__.__doc__) % artist.kwdocd
class TextWithDash(Text):
"""
This is basically a :class:`~matplotlib.text.Text` with a dash
(drawn with a :class:`~matplotlib.lines.Line2D`) before/after
it. It is intended to be a drop-in replacement for
:class:`~matplotlib.text.Text`, and should behave identically to
it when *dashlength* = 0.0.
The dash always comes between the point specified by
:meth:`~matplotlib.text.Text.set_position` and the text. When a
dash exists, the text alignment arguments (*horizontalalignment*,
*verticalalignment*) are ignored.
*dashlength* is the length of the dash in canvas units.
(default = 0.0).
*dashdirection* is one of 0 or 1, where 0 draws the dash after the
text and 1 before. (default = 0).
*dashrotation* specifies the rotation of the dash, and should
generally stay *None*. In this case
:meth:`~matplotlib.text.TextWithDash.get_dashrotation` returns
:meth:`~matplotlib.text.Text.get_rotation`. (I.e., the dash takes
its rotation from the text's rotation). Because the text center is
projected onto the dash, major deviations in the rotation cause
what may be considered visually unappealing results.
(default = *None*)
*dashpad* is a padding length to add (or subtract) space
between the text and the dash, in canvas units.
(default = 3)
*dashpush* "pushes" the dash and text away from the point
specified by :meth:`~matplotlib.text.Text.set_position` by the
amount in canvas units. (default = 0)
.. note::
The alignment of the two objects is based on the bounding box
of the :class:`~matplotlib.text.Text`, as obtained by
:meth:`~matplotlib.artist.Artist.get_window_extent`. This, in
turn, appears to depend on the font metrics as given by the
rendering backend. Hence the quality of the "centering" of the
label text with respect to the dash varies depending on the
backend used.
.. note::
I'm not sure that I got the
:meth:`~matplotlib.text.TextWithDash.get_window_extent` right,
or whether that's sufficient for providing the object bounding
box.
"""
__name__ = 'textwithdash'
def __str__(self):
return "TextWithDash(%g,%g,%s)"%(self._x,self._y,repr(self._text))
def __init__(self,
x=0, y=0, text='',
color=None, # defaults to rc params
verticalalignment='center',
horizontalalignment='center',
multialignment=None,
fontproperties=None, # defaults to FontProperties()
rotation=None,
linespacing=None,
dashlength=0.0,
dashdirection=0,
dashrotation=None,
dashpad=3,
dashpush=0,
):
Text.__init__(self, x=x, y=y, text=text, color=color,
verticalalignment=verticalalignment,
horizontalalignment=horizontalalignment,
multialignment=multialignment,
fontproperties=fontproperties,
rotation=rotation,
linespacing=linespacing)
# The position (x,y) values for text and dashline
# are bogus as given in the instantiation; they will
# be set correctly by update_coords() in draw()
self.dashline = Line2D(xdata=(x, x),
ydata=(y, y),
color='k',
linestyle='-')
self._dashx = float(x)
self._dashy = float(y)
self._dashlength = dashlength
self._dashdirection = dashdirection
self._dashrotation = dashrotation
self._dashpad = dashpad
self._dashpush = dashpush
#self.set_bbox(dict(pad=0))
def get_position(self):
"Return the position of the text as a tuple (*x*, *y*)"
x = float(self.convert_xunits(self._dashx))
y = float(self.convert_yunits(self._dashy))
return x, y
def get_prop_tup(self):
"""
Return a hashable tuple of properties.
Not intended to be human readable, but useful for backends who
want to cache derived information about text (eg layouts) and
need to know if the text has changed.
"""
props = [p for p in Text.get_prop_tup(self)]
props.extend([self._x, self._y, self._dashlength, self._dashdirection, self._dashrotation, self._dashpad, self._dashpush])
return tuple(props)
def draw(self, renderer):
"""
Draw the :class:`TextWithDash` object to the given *renderer*.
"""
self.update_coords(renderer)
Text.draw(self, renderer)
if self.get_dashlength() > 0.0:
self.dashline.draw(renderer)
def update_coords(self, renderer):
"""
Computes the actual *x*, *y* coordinates for text based on the
input *x*, *y* and the *dashlength*. Since the rotation is
with respect to the actual canvas's coordinates we need to map
back and forth.
"""
dashx, dashy = self.get_position()
dashlength = self.get_dashlength()
# Shortcircuit this process if we don't have a dash
if dashlength == 0.0:
self._x, self._y = dashx, dashy
return
dashrotation = self.get_dashrotation()
dashdirection = self.get_dashdirection()
dashpad = self.get_dashpad()
dashpush = self.get_dashpush()
angle = get_rotation(dashrotation)
theta = np.pi*(angle/180.0+dashdirection-1)
cos_theta, sin_theta = np.cos(theta), np.sin(theta)
transform = self.get_transform()
# Compute the dash end points
# The 'c' prefix is for canvas coordinates
cxy = transform.transform_point((dashx, dashy))
cd = np.array([cos_theta, sin_theta])
c1 = cxy+dashpush*cd
c2 = cxy+(dashpush+dashlength)*cd
inverse = transform.inverted()
(x1, y1) = inverse.transform_point(tuple(c1))
(x2, y2) = inverse.transform_point(tuple(c2))
self.dashline.set_data((x1, x2), (y1, y2))
# We now need to extend this vector out to
# the center of the text area.
# The basic problem here is that we're "rotating"
# two separate objects but want it to appear as
# if they're rotated together.
# This is made non-trivial because of the
# interaction between text rotation and alignment -
# text alignment is based on the bbox after rotation.
# We reset/force both alignments to 'center'
# so we can do something relatively reasonable.
# There's probably a better way to do this by
# embedding all this in the object's transformations,
# but I don't grok the transformation stuff
# well enough yet.
we = Text.get_window_extent(self, renderer=renderer)
w, h = we.width, we.height
# Watch for zeros
if sin_theta == 0.0:
dx = w
dy = 0.0
elif cos_theta == 0.0:
dx = 0.0
dy = h
else:
tan_theta = sin_theta/cos_theta
dx = w
dy = w*tan_theta
if dy > h or dy < -h:
dy = h
dx = h/tan_theta
cwd = np.array([dx, dy])/2
cwd *= 1+dashpad/np.sqrt(np.dot(cwd,cwd))
cw = c2+(dashdirection*2-1)*cwd
newx, newy = inverse.transform_point(tuple(cw))
self._x, self._y = newx, newy
# Now set the window extent
# I'm not at all sure this is the right way to do this.
we = Text.get_window_extent(self, renderer=renderer)
self._twd_window_extent = we.frozen()
self._twd_window_extent.update_from_data_xy(np.array([c1]), False)
# Finally, make text align center
Text.set_horizontalalignment(self, 'center')
Text.set_verticalalignment(self, 'center')
def get_window_extent(self, renderer=None):
'''
Return a :class:`~matplotlib.transforms.Bbox` object bounding
the text, in display units.
In addition to being used internally, this is useful for
specifying clickable regions in a png file on a web page.
*renderer* defaults to the _renderer attribute of the text
object. This is not assigned until the first execution of
:meth:`draw`, so you must use this kwarg if you want
to call :meth:`get_window_extent` prior to the first
:meth:`draw`. For getting web page regions, it is
simpler to call the method after saving the figure.
'''
self.update_coords(renderer)
if self.get_dashlength() == 0.0:
return Text.get_window_extent(self, renderer=renderer)
else:
return self._twd_window_extent
def get_dashlength(self):
"""
Get the length of the dash.
"""
return self._dashlength
def set_dashlength(self, dl):
"""
Set the length of the dash.
ACCEPTS: float (canvas units)
"""
self._dashlength = dl
def get_dashdirection(self):
"""
Get the direction dash. 1 is before the text and 0 is after.
"""
return self._dashdirection
def set_dashdirection(self, dd):
"""
Set the direction of the dash following the text.
1 is before the text and 0 is after. The default
is 0, which is what you'd want for the typical
case of ticks below and on the left of the figure.
ACCEPTS: int (1 is before, 0 is after)
"""
self._dashdirection = dd
def get_dashrotation(self):
"""
Get the rotation of the dash in degrees.
"""
if self._dashrotation == None:
return self.get_rotation()
else:
return self._dashrotation
def set_dashrotation(self, dr):
"""
Set the rotation of the dash, in degrees
ACCEPTS: float (degrees)
"""
self._dashrotation = dr
def get_dashpad(self):
"""
Get the extra spacing between the dash and the text, in canvas units.
"""
return self._dashpad
def set_dashpad(self, dp):
"""
Set the "pad" of the TextWithDash, which is the extra spacing
between the dash and the text, in canvas units.
ACCEPTS: float (canvas units)
"""
self._dashpad = dp
def get_dashpush(self):
"""
Get the extra spacing between the dash and the specified text
position, in canvas units.
"""
return self._dashpush
def set_dashpush(self, dp):
"""
Set the "push" of the TextWithDash, which
is the extra spacing between the beginning
of the dash and the specified position.
ACCEPTS: float (canvas units)
"""
self._dashpush = dp
def set_position(self, xy):
"""
Set the (*x*, *y*) position of the :class:`TextWithDash`.
ACCEPTS: (x, y)
"""
self.set_x(xy[0])
self.set_y(xy[1])
def set_x(self, x):
"""
Set the *x* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashx = float(x)
def set_y(self, y):
"""
Set the *y* position of the :class:`TextWithDash`.
ACCEPTS: float
"""
self._dashy = float(y)
def set_transform(self, t):
"""
Set the :class:`matplotlib.transforms.Transform` instance used
by this artist.
ACCEPTS: a :class:`matplotlib.transforms.Transform` instance
"""
Text.set_transform(self, t)
self.dashline.set_transform(t)
def get_figure(self):
'return the figure instance the artist belongs to'
return self.figure
def set_figure(self, fig):
"""
Set the figure instance the artist belong to.
ACCEPTS: a :class:`matplotlib.figure.Figure` instance
"""
Text.set_figure(self, fig)
self.dashline.set_figure(fig)
artist.kwdocd['TextWithDash'] = artist.kwdoc(TextWithDash)
class Annotation(Text):
"""
A :class:`~matplotlib.text.Text` class to make annotating things
in the figure, such as :class:`~matplotlib.figure.Figure`,
:class:`~matplotlib.axes.Axes`,
:class:`~matplotlib.patches.Rectangle`, etc., easier.
"""
def __str__(self):
return "Annotation(%g,%g,%s)"%(self.xy[0],self.xy[1],repr(self._text))
def __init__(self, s, xy,
xytext=None,
xycoords='data',
textcoords=None,
arrowprops=None,
**kwargs):
"""
Annotate the *x*, *y* point *xy* with text *s* at *x*, *y*
location *xytext*. (If *xytext* = *None*, defaults to *xy*,
and if *textcoords* = *None*, defaults to *xycoords*).
*arrowprops*, if not *None*, is a dictionary of line properties
(see :class:`matplotlib.lines.Line2D`) for the arrow that connects
annotation to the point.
If the dictionary has a key *arrowstyle*, a FancyArrowPatch
instance is created with the given dictionary and is
drawn. Otherwise, a YAArow patch instance is created and
drawn. Valid keys for YAArow are
========= =============================================================
Key Description
========= =============================================================
width the width of the arrow in points
frac the fraction of the arrow length occupied by the head
headwidth the width of the base of the arrow head in points
shrink oftentimes it is convenient to have the arrowtip
and base a bit away from the text and point being
annotated. If *d* is the distance between the text and
annotated point, shrink will shorten the arrow so the tip
and base are shink percent of the distance *d* away from the
endpoints. ie, ``shrink=0.05 is 5%%``
? any key for :class:`matplotlib.patches.polygon`
========= =============================================================
Valid keys for FancyArrowPatch are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*xycoords* and *textcoords* are strings that indicate the
coordinates of *xy* and *xytext*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
If a 'points' or 'pixels' option is specified, values will be
added to the bottom-left and if negative, values will be
subtracted from the top-right. Eg::
# 10 points to the right of the left border of the axes and
# 5 points below the top border
xy=(10,-5), xycoords='axes points'
Additional kwargs are Text properties:
%(Text)s
"""
if xytext is None:
xytext = xy
if textcoords is None:
textcoords = xycoords
# we'll draw ourself after the artist we annotate by default
x,y = self.xytext = xytext
Text.__init__(self, x, y, s, **kwargs)
self.xy = xy
self.xycoords = xycoords
self.textcoords = textcoords
self.arrowprops = arrowprops
self.arrow = None
if arrowprops and arrowprops.has_key("arrowstyle"):
self._arrow_relpos = arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1,1),
**arrowprops)
else:
self.arrow_patch = None
__init__.__doc__ = cbook.dedent(__init__.__doc__) % artist.kwdocd
def contains(self,event):
t,tinfo = Text.contains(self,event)
if self.arrow is not None:
a,ainfo=self.arrow.contains(event)
t = t or a
# self.arrow_patch is currently not checked as this can be a line - JJ
return t,tinfo
def set_figure(self, fig):
if self.arrow is not None:
self.arrow.set_figure(fig)
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
Artist.set_figure(self, fig)
def _get_xy(self, x, y, s):
if s=='data':
trans = self.axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s=='offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi/72.
y *= dpi/72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s=='polar':
theta, r = x, y
x = r*np.cos(theta)
y = r*np.sin(theta)
trans = self.axes.transData
return trans.transform_point((x,y))
elif s=='figure points':
#points from the lower left corner of the figure
dpi = self.figure.dpi
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
x *= dpi/72.
y *= dpi/72.
if x<0:
x = r + x
if y<0:
y = t + y
return x,y
elif s=='figure pixels':
#pixels from the lower left corner of the figure
l,b,w,h = self.figure.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
if y<0:
y = t + y
return x, y
elif s=='figure fraction':
#(0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x,y))
elif s=='axes points':
#points from the lower left corner of the axes
dpi = self.figure.dpi
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x*dpi/72.
else:
x = l + x*dpi/72.
if y<0:
y = t + y*dpi/72.
else:
y = b + y*dpi/72.
return x, y
elif s=='axes pixels':
#pixels from the lower left corner of the axes
l,b,w,h = self.axes.bbox.bounds
r = l+w
t = b+h
if x<0:
x = r + x
else:
x = l + x
if y<0:
y = t + y
else:
y = b + y
return x, y
elif s=='axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = self.axes.transAxes
return trans.transform_point((x, y))
def update_positions(self, renderer):
x, y = self.xytext
self._x, self._y = self._get_xy(x, y, self.textcoords)
x, y = self.xy
x, y = self._get_xy(x, y, self.xycoords)
ox0, oy0 = self._x, self._y
ox1, oy1 = x, y
if self.arrowprops:
x0, y0 = x, y
l,b,w,h = self.get_window_extent(renderer).bounds
r = l+w
t = b+h
xc = 0.5*(l+r)
yc = 0.5*(b+t)
d = self.arrowprops.copy()
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Otherwise, fallback to YAArrow.
#if d.has_key("arrowstyle"):
if self.arrow_patch:
# adjust the starting point of the arrow relative to
# the textbox.
# TODO : Rotation needs to be accounted.
relpos = self._arrow_relpos
bbox = self.get_window_extent(renderer)
ox0 = bbox.x0 + bbox.width * relpos[0]
oy0 = bbox.y0 + bbox.height * relpos[1]
# The arrow will be drawn from (ox0, oy0) to (ox1,
# oy1). It will be first clipped by patchA and patchB.
# Then it will be shrinked by shirnkA and shrinkB
# (in points). If patch A is not set, self.bbox_patch
# is used.
self.arrow_patch.set_positions((ox0, oy0), (ox1,oy1))
mutation_scale = d.pop("mutation_scale", self.get_size())
mutation_scale = renderer.points_to_pixels(mutation_scale)
self.arrow_patch.set_mutation_scale(mutation_scale)
if self._bbox_patch:
patchA = d.pop("patchA", self._bbox_patch)
self.arrow_patch.set_patchA(patchA)
else:
patchA = d.pop("patchA", self._bbox)
self.arrow_patch.set_patchA(patchA)
else:
# pick the x,y corner of the text bbox closest to point
# annotated
dsu = [(abs(val-x0), val) for val in l, r, xc]
dsu.sort()
_, x = dsu[0]
dsu = [(abs(val-y0), val) for val in b, t, yc]
dsu.sort()
_, y = dsu[0]
shrink = d.pop('shrink', 0.0)
theta = math.atan2(y-y0, x-x0)
r = math.sqrt((y-y0)**2. + (x-x0)**2.)
dx = shrink*r*math.cos(theta)
dy = shrink*r*math.sin(theta)
width = d.pop('width', 4)
headwidth = d.pop('headwidth', 12)
frac = d.pop('frac', 0.1)
self.arrow = YAArrow(self.figure, (x0+dx,y0+dy), (x-dx, y-dy),
width=width, headwidth=headwidth, frac=frac,
**d)
self.arrow.set_clip_box(self.get_clip_box())
def draw(self, renderer):
"""
Draw the :class:`Annotation` object to the given *renderer*.
"""
self.update_positions(renderer)
self.update_bbox_position_size(renderer)
if self.arrow is not None:
if self.arrow.figure is None and self.figure is not None:
self.arrow.figure = self.figure
self.arrow.draw(renderer)
if self.arrow_patch is not None:
if self.arrow_patch.figure is None and self.figure is not None:
self.arrow_patch.figure = self.figure
self.arrow_patch.draw(renderer)
Text.draw(self, renderer)
artist.kwdocd['Annotation'] = Annotation.__init__.__doc__
|
agpl-3.0
|
lazywei/scikit-learn
|
benchmarks/bench_plot_neighbors.py
|
287
|
6433
|
"""
Plot the scaling of the nearest neighbors algorithms with k, D, and N
"""
from time import time
import numpy as np
import pylab as pl
from matplotlib import ticker
from sklearn import neighbors, datasets
def get_data(N, D, dataset='dense'):
if dataset == 'dense':
np.random.seed(0)
return np.random.random((N, D))
elif dataset == 'digits':
X = datasets.load_digits().data
i = np.argsort(X[0])[::-1]
X = X[:, i]
return X[:N, :D]
else:
raise ValueError("invalid dataset: %s" % dataset)
def barplot_neighbors(Nrange=2 ** np.arange(1, 11),
Drange=2 ** np.arange(7),
krange=2 ** np.arange(10),
N=1000,
D=64,
k=5,
leaf_size=30,
dataset='digits'):
algorithms = ('kd_tree', 'brute', 'ball_tree')
fiducial_values = {'N': N,
'D': D,
'k': k}
#------------------------------------------------------------
# varying N
N_results_build = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
N_results_query = dict([(alg, np.zeros(len(Nrange)))
for alg in algorithms])
for i, NN in enumerate(Nrange):
print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange)))
X = get_data(NN, D, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k),
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
N_results_build[algorithm][i] = (t1 - t0)
N_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying D
D_results_build = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
D_results_query = dict([(alg, np.zeros(len(Drange)))
for alg in algorithms])
for i, DD in enumerate(Drange):
print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange)))
X = get_data(N, DD, dataset)
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=k,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
D_results_build[algorithm][i] = (t1 - t0)
D_results_query[algorithm][i] = (t2 - t1)
#------------------------------------------------------------
# varying k
k_results_build = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
k_results_query = dict([(alg, np.zeros(len(krange)))
for alg in algorithms])
X = get_data(N, DD, dataset)
for i, kk in enumerate(krange):
print("k = %i (%i out of %i)" % (kk, i + 1, len(krange)))
for algorithm in algorithms:
nbrs = neighbors.NearestNeighbors(n_neighbors=kk,
algorithm=algorithm,
leaf_size=leaf_size)
t0 = time()
nbrs.fit(X)
t1 = time()
nbrs.kneighbors(X)
t2 = time()
k_results_build[algorithm][i] = (t1 - t0)
k_results_query[algorithm][i] = (t2 - t1)
pl.figure(figsize=(8, 11))
for (sbplt, vals, quantity,
build_time, query_time) in [(311, Nrange, 'N',
N_results_build,
N_results_query),
(312, Drange, 'D',
D_results_build,
D_results_query),
(313, krange, 'k',
k_results_build,
k_results_query)]:
ax = pl.subplot(sbplt, yscale='log')
pl.grid(True)
tick_vals = []
tick_labels = []
bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg])))
for alg in algorithms])
for i, alg in enumerate(algorithms):
xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals))
width = 0.8
c_bar = pl.bar(xvals, build_time[alg] - bottom,
width, bottom, color='r')
q_bar = pl.bar(xvals, query_time[alg],
width, build_time[alg], color='b')
tick_vals += list(xvals + 0.5 * width)
tick_labels += ['%i' % val for val in vals]
pl.text((i + 0.02) / len(algorithms), 0.98, alg,
transform=ax.transAxes,
ha='left',
va='top',
bbox=dict(facecolor='w', edgecolor='w', alpha=0.5))
pl.ylabel('Time (s)')
ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals))
ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels))
for label in ax.get_xticklabels():
label.set_rotation(-90)
label.set_fontsize(10)
title_string = 'Varying %s' % quantity
descr_string = ''
for s in 'NDk':
if s == quantity:
pass
else:
descr_string += '%s = %i, ' % (s, fiducial_values[s])
descr_string = descr_string[:-2]
pl.text(1.01, 0.5, title_string,
transform=ax.transAxes, rotation=-90,
ha='left', va='center', fontsize=20)
pl.text(0.99, 0.5, descr_string,
transform=ax.transAxes, rotation=-90,
ha='right', va='center')
pl.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16)
pl.figlegend((c_bar, q_bar), ('construction', 'N-point query'),
'upper right')
if __name__ == '__main__':
barplot_neighbors(dataset='digits')
barplot_neighbors(dataset='dense')
pl.show()
|
bsd-3-clause
|
justincassidy/scikit-learn
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
5245
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
bsd-3-clause
|
MohammedWasim/scikit-learn
|
examples/cluster/plot_ward_structured_vs_unstructured.py
|
320
|
3369
|
"""
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
###############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
###############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(np.float(l) / np.max(label + 1)))
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
###############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
###############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
###############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.plot3D(X[label == l, 0], X[label == l, 1], X[label == l, 2],
'o', color=plt.cm.jet(float(l) / np.max(label + 1)))
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
|
bsd-3-clause
|
drammock/mne-python
|
examples/inverse/compute_mne_inverse_epochs_in_label.py
|
20
|
4356
|
"""
==================================================
Compute MNE-dSPM inverse solution on single epochs
==================================================
Compute dSPM inverse solution on single trial epochs restricted
to a brain label.
"""
# Author: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
from mne.minimum_norm import apply_inverse
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
event_id, tmin, tmax = 1, -0.2, 0.5
# Using the same inverse operator when inspecting single trials Vs. evoked
snr = 3.0 # Standard assumption for average data but using it for single trial
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Load data
inverse_operator = read_inverse_operator(fname_inv)
label = mne.read_label(fname_label)
raw = mne.io.read_raw_fif(fname_raw)
events = mne.read_events(fname_event)
# Set up pick list
include = []
# Add a bad channel
raw.info['bads'] += ['EEG 053'] # bads + 1 more
# pick MEG channels
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True,
include=include, exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13,
eog=150e-6))
# Get evoked data (averaging across trials in sensor space)
evoked = epochs.average()
# Compute inverse solution and stcs for each epoch
# Use the same inverse operator as with evoked data (i.e., set nave)
# If you use a different nave, dSPM just scales by a factor sqrt(nave)
stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, label,
pick_ori="normal", nave=evoked.nave)
# Mean across trials but not across vertices in label
mean_stc = sum(stcs) / len(stcs)
# compute sign flip to avoid signal cancellation when averaging signed values
flip = mne.label_sign_flip(label, inverse_operator['src'])
label_mean = np.mean(mean_stc.data, axis=0)
label_mean_flip = np.mean(flip[:, np.newaxis] * mean_stc.data, axis=0)
# Get inverse solution by inverting evoked data
stc_evoked = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori="normal")
# apply_inverse() does whole brain, so sub-select label of interest
stc_evoked_label = stc_evoked.in_label(label)
# Average over label (not caring to align polarities here)
label_mean_evoked = np.mean(stc_evoked_label.data, axis=0)
###############################################################################
# View activation time-series to illustrate the benefit of aligning/flipping
times = 1e3 * stcs[0].times # times in ms
plt.figure()
h0 = plt.plot(times, mean_stc.data.T, 'k')
h1, = plt.plot(times, label_mean, 'r', linewidth=3)
h2, = plt.plot(times, label_mean_flip, 'g', linewidth=3)
plt.legend((h0[0], h1, h2), ('all dipoles in label', 'mean',
'mean with sign flip'))
plt.xlabel('time (ms)')
plt.ylabel('dSPM value')
plt.show()
###############################################################################
# Viewing single trial dSPM and average dSPM for unflipped pooling over label
# Compare to (1) Inverse (dSPM) then average, (2) Evoked then dSPM
# Single trial
plt.figure()
for k, stc_trial in enumerate(stcs):
plt.plot(times, np.mean(stc_trial.data, axis=0).T, 'k--',
label='Single Trials' if k == 0 else '_nolegend_',
alpha=0.5)
# Single trial inverse then average.. making linewidth large to not be masked
plt.plot(times, label_mean, 'b', linewidth=6,
label='dSPM first, then average')
# Evoked and then inverse
plt.plot(times, label_mean_evoked, 'r', linewidth=2,
label='Average first, then dSPM')
plt.xlabel('time (ms)')
plt.ylabel('dSPM value')
plt.legend()
plt.show()
|
bsd-3-clause
|
IshankGulati/scikit-learn
|
sklearn/utils/graph.py
|
24
|
6326
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <[email protected]>
# Gael Varoquaux <[email protected]>
# Jake Vanderplas <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph : sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> list(sorted(single_source_shortest_path_length(graph, 0).items()))
[(0, 0), (1, 1), (2, 2), (3, 3)]
>>> graph = np.ones((6, 6))
>>> list(sorted(single_source_shortest_path_length(graph, 2).items()))
[(0, 1), (1, 1), (2, 0), (3, 1), (4, 1), (5, 1)]
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
msschwartz21/craniumPy
|
deltascope/alignment.py
|
1
|
13684
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import re
import h5py
import deltascope as cranium
def read_h5prob_to_dict(fdir):
'''
Read in hdf5 array from probability file into dictionary
:param str fdir: Filepath to directory
:return: Dictionary with filenumber as key and raw data as item
'''
D = {}
for f in os.listdir(fdir):
if 'h5' in f:
num = re.findall(r'\d+',f.split('.')[0])[1]
file = h5py.File(os.path.join(fdir,f))
D[num] = file.get('exported_data')
return(D)
def preprocess(fpath,p,stop=None,pca=None,mm=None,vertex=None):
'''
Generate brain object and process until stop is triggered
:param str stop: 'df_thresh' 'median' 'df_align'
:return: Cranium brain object
'''
b = cranium.brain()
b.read_data(fpath)
b.preprocess_data(p['gthresh'],p['scale'],p['microns'])
if stop == 'df_thresh':
return(b)
if pca == None:
b.calculate_pca_median(b.raw_data,p['mthresh'],p['radius'],p['microns'])
pca = b.pcamed
if stop == 'median':
return(b)
b.pca_transform_3d(b.df_thresh,pca,p['comp_order'],p['fit_dim'],deg=p['deg'],mm=mm,vertex=vertex)
if (stop == 'df_align') | (stop == None):
return(b)
def plot_lines(ax,i):
'''
Plots origin lines on graph
:param plt.subplots ax: Subplot array with minimum dimension [2,3]
:param int i: Index of row that should be labeled
'''
ax[i,0].axhline(0,c='y')
ax[i,0].axvline(0,c='r')
ax[i,1].axhline(0,c='y')
ax[i,1].axvline(0,c='c')
ax[i,2].axhline(0,c='c')
ax[i,2].axvline(0,c='r')
def scatter_df(ax,i,df):
'''
Scatter plot of df with projections in all 3 axes
:param plt.subplots ax: Subplot array with minimum dimension [2,3]
:param int i: Index of row that should be labeled
:param pd.DataFrame df: Dataframe containing 'x','y','z' columns preferably downsampled for faster plotting
'''
ax[i,0].scatter(df.x,df.y,s=10)
ax[i,1].scatter(df.x,df.z,s=10)
ax[i,2].scatter(df.z,df.y,s=10)
def imshow_arr(ax,i,arr,f):
'''
Scatter plot of df with projections in all 3 axes according to supplied function f
:param plt.subplots ax: Subplot array with minimum dimension [2,3]
:param int i: Index of row that should be labeled
:param np.array arr: 3D image array
:param np.fxn f: Numpy function that takes an array and an axis value as parameters, usually np.min or np.max
'''
ax[i,0].imshow(f(arr,axis=0),cmap='Greys')
ax[i,1].imshow(f(arr,axis=1),cmap='Greys')
ax[i,2].imshow(f(arr,axis=2),cmap='Greys')
def rotate(df,A):
'''
Transform dataframe according to matrix A
:param pd.DataFrame df: Dataframe containing 'x','y','z'
:param np.array A: 3x3 array containing transformation matrix
:return: New dataframe with transformed data
'''
rt = np.dot(np.array(df[['x','y','z']]),A)
dfo = pd.DataFrame({'x':rt[:,0],'y':rt[:,1],'z':rt[:,2]})
return(dfo)
def save_at(k,df,outdir,expname):
'''
Save dataframe as psi file in outdir
:param str k: Sample number
:param pd.DataFrame df: Dataframe containing 'x','y','z'
:param str outdir: Filepath to destination directory for psi
:param str expname: Descriptive name for experiment to be used in filename
'''
cranium.write_data(os.path.join(outdir,'AT_'+k+'_'+expname+'.psi'),df)
def save_zrf(k,df,outdir,expname):
'''
Save dataframe as psi file in outdir
:param str k: Sample number
:param pd.DataFrame df: Dataframe containing 'x','y','z'
:param str outdir: Filepath to destination directory for psi
:param str expname: Descriptive name for experiment to be used in filename
'''
cranium.write_data(os.path.join(outdir,'ZRF_'+k+'_'+expname+'.psi'),df)
def save_both(k,dfa,dfz,outdir,expname):
'''
Save dataframe as psi file in outdir
:param str k: Sample number
:param pd.DataFrame dfa: AT dataframe containing 'x','y','z'
:param pd.DataFrame dfa: Zrf dataframe containing 'x','y','z'
:param str outdir: Filepath to destination directory for psi
:param str expname: Descriptive name for experiment to be used in filename
'''
save_at(k,dfa,outdir,expname)
save_zrf(k,dfz,outdir,expname)
def save_caax(k,df,outdir,expname):
cranium.write_data(os.path.join(outdir,'caax_'+k+'_'+expname+'.psi'),df)
def save_all(k,dfa,dfz,dfc,outdir,expname):
'''
Save dataframe as psi file in outdir
:param str k: Sample number
:param pd.DataFrame dfa: AT dataframe containing 'x','y','z'
:param pd.DataFrame dfa: Zrf dataframe containing 'x','y','z'
:param str outdir: Filepath to destination directory for psi
:param str expname: Descriptive name for experiment to be used in filename
'''
save_at(k,dfa,outdir,expname)
save_zrf(k,dfz,outdir,expname)
save_caax(k,dfc,outdir,expname)
######### Alignment functions ###############
def calc_rotation(pts,d):
'''
Calculate a rotation matrix A based on two anchor points
:param pd.DataFrame pts: Two points in x and the dimension d
:param str d: Second dimension of the two points: 'z' or 'y'
:return: midpoint, rotation matrix
'''
dx = (pts.iloc[0].x - pts.iloc[1].x)/2
dy = (pts.iloc[0][d] - pts.iloc[1][d])/2
mp = [pts.iloc[1].x + dx, pts.iloc[1][d] + dy]
phi = np.arctan2(dy,dx)
if phi > np.pi/2:
phi = -(np.pi - phi)
if d == 'y':
A = np.array([[np.cos(phi),-np.sin(phi),0],
[np.sin(phi),np.cos(phi),0],
[0,0,1]])
else:
A = np.array([[np.cos(phi),0,-np.sin(phi)],
[0,1,0],
[np.sin(phi),0,np.cos(phi)]])
return(mp,A)
def xrotate(df,Ldf,d,pts=None):
'''
Define two anchor points and rotate data so that the anchor points are level
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:param str d: Second dimension of the plane that needs to be rotated: 'z' or 'y'
:param pd.DataFrame pts: Two points in x and the dimension d
:return: `df` after rotation, a list of rotated dataframes from `Ldf`, `pts`
'''
if type(pts) == type(None):
pt1,pt2 = cranium.find_anchors(df,d)
pts = pd.DataFrame({'x':[pt1['x'],pt2['x']],d:[pt1[d],pt2[d]]})
mp,A = calc_rotation(pts,d)
out = rotate(df,A)
Lout = []
for d in Ldf:
Lout.append(rotate(d,A))
return(out,Lout,pts)
def zyswitch(df,Ldf):
'''
Switch z and y axes by reassigning z to y and vice versa
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:return: `df` and `Ldf` after exchanging z and y
'''
out = pd.DataFrame({'x':df.x,'y':df.z,'z':df.y})
Lout = []
for d in Ldf:
Lout.append(pd.DataFrame({'x':d.x,'y':d.z,'z':d.y}))
return(out,Lout)
def vertex(df,Ldf,pts=None):
'''
Calculate the vertex in the XZ plane and shift the vertex to the origin
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:return: `df` and `Ldf` after translation of the vertex to the origin
'''
if type(pts)!=type(None):
cf = np.polyfit(pts.x,pts.z,2)
else:
cf = np.polyfit(df.x,df.z,2)
x = -cf[1]/(2*cf[0])
z = np.poly1d(cf)(x)
y = np.mean(df.y)
out = pd.DataFrame({
'x':df.x-x,
'y':df.y-y,
'z':df.z-z
})
Lout = []
for d in Ldf:
Lout.append(pd.DataFrame({
'x':d.x-x,
'y':d.y-y,
'z':d.z-z
}))
if type(pts)!=type(None):
cfout = np.polyfit(pts.x-x,pts.z-z,2)
else:
cfout = np.polyfit(out.x,out.z,2)
return(out,Lout,cfout)
def flip(df,Ldf):
'''
Rotate by 180 degrees around the x axis
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:return: `df` and `Ldf` after rotation
'''
A = np.array([[1,0,0],
[0,np.cos(np.pi),-np.sin(np.pi)],
[0,np.sin(np.pi),np.cos(np.pi)]])
out = rotate(df,A)
Lout = []
for d in Ldf:
Lout.append(rotate(d,A))
return(out,Lout)
def yzrotate(df,Ldf,mm=None):
'''
Fit a line in YZ and rotate so the line is horizontal
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:return: `df` and `Ldf` after rotation to correct the lines, 1d line function, range of z values for the line
'''
if mm == None:
mm = np.polyfit(df.z,df.y,1)
p = np.poly1d(mm)
xrange = np.arange(1.1*np.min(df.z), 1.1*np.max(df.z))
phi = -np.arctan(mm[0])
A = np.array([[1,0,0],
[0,np.cos(phi),-np.sin(phi)],
[0,np.sin(phi),np.cos(phi)]])
out = rotate(df,A)
Lout = []
for d in Ldf:
Lout.append(rotate(d,A))
return(out,Lout,p,xrange)
def check_yz(df,Ldf,mm=None):
'''
Fit line in YZ and rotate line to be horizontal using `yzrotate` then display graph
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:return: `df` and `Ldf` after rotation to correct the lines, array of graph subplots, 1d line function
'''
out,Lout,p,xrange = yzrotate(df,Ldf,mm=mm)
ax = make_graph([df,out]+Lout)
ax[0,2].plot(xrange,p(xrange),c='m')
return(out,Lout,ax,p)
def make_graph(Ldf,Lim=[]):
'''
Plot the three projections of each dataframe in `Ldf`
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:return: Array of subplots with minimum dimension of 2x3
'''
n = len(Ldf)+len(Lim)
fig,ax = plt.subplots(n,3,subplot_kw={'aspect':'equal','adjustable':'datalim'},figsize=(12,n*4))
for i,d in enumerate(Ldf):
scatter_df(ax,i,d.sample(frac=0.1))
plot_lines(ax,i)
for i,im in enumerate(Lim):
imshow_arr(ax,len(Ldf)+i,im,np.min)
return(ax)
def start(k,Da,LD,im=False):
'''
Extract `df_align` dataframes from `cranium.brain` objects and plots using `make_graph`
:param str k: Key corresponding to the relevant sample
:param dict Da: Dictionary containing brain objects for each sample with key `k`
:param list LD: List of any additional dictionaries from which `df_align` should be extracted
:returns: key, primary dataframe, list of additional dataframes, subplot array
'''
Lim = []
Lim.append(Da[k].raw_data)
df = Da[k].df_align
Ldf = []
for D in LD:
Ldf.append(D[k].df_align)
Lim.append(D[k].raw_data)
if im==True:
ax = make_graph([df]+Ldf,Lim)
else:
ax = make_graph([df]+Ldf,[])
return(k,df,Ldf,ax)
def check_pts(df,Ldf,d):
'''
Calculates anchor points and plots before and after rotation
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:param str d: Second dimension of the plane that needs to be rotated: 'z' or 'y'
:returns: Rotated `df`, list of rotated dfs from `Ldf`, anchor points, subplot array
'''
df1,Ldf1,pts = xrotate(df,Ldf,d)
ax = make_graph([df,df1]+Ldf1)
if d=='z':
ax[0,1].scatter(pts.x,pts[d],c='r')
if d=='y':
ax[0,0].scatter(pts.x,pts[d],c='r')
return(df1,Ldf1,pts,ax)
def revise_pts(df,Ldf,d,pts):
'''
Rotate and plot data based on `pts` parameter
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:param str d: Second dimension of the plane that needs to be rotated: 'z' or 'y'
:param pd.DataFrame pts: Two points in x and the dimension d
:returns: Rotated `df`, list of rotated dfs from `Ldf`, subplot array
'''
df1,Ldf1,pts2 = xrotate(df,Ldf,d,pts=pts)
ax = make_graph([df,df1]+Ldf1)
if d=='z':
ax[0,1].scatter(pts2.x,pts2[d],c='r')
if d=='y':
ax[0,0].scatter(pts2.x,pts2[d],c='r')
return(df1,Ldf1,ax)
def ch_vertex(df,Ldf,pts=None):
'''
Calculate vertex based on `df` and translate all dataframes to shift vertex to origin
:param pd.DataFrame df: Dataframe that will be used to calculate the anchor points and rotation
:param list Ldf: List of additionally dataframes that should be rotated based on the calculation from `df`
:returns: Translated `df`, list of translated dataframes from `Ldf`, subplot array
'''
df1,Ldf1,mm = vertex(df,Ldf,pts=pts)
ax = make_graph([df1]+Ldf1)
xrange = np.arange(df1.x.min(),df1.x.max())
ax[0,1].plot(xrange,np.poly1d(mm)(xrange),c='m')
return(df1,Ldf1,mm,ax)
|
gpl-3.0
|
Fenrir12/Master_BCG_EEG
|
Cours/Machine Learning - UDEMY/Machine Learning A-Z/Part 2 - Regression/Section 8 - Decision Tree Regression/decision_tree_regression.py
|
6
|
1294
|
# Decision Tree Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Decision Tree Regression to the dataset
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor(random_state = 0)
regressor.fit(X, y)
# Predicting a new result
y_pred = regressor.predict(6.5)
# Visualising the Decision Tree Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Decision Tree Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
apache-2.0
|
jzt5132/scikit-learn
|
sklearn/metrics/cluster/tests/test_supervised.py
|
206
|
7643
|
import numpy as np
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import homogeneity_score
from sklearn.metrics.cluster import completeness_score
from sklearn.metrics.cluster import v_measure_score
from sklearn.metrics.cluster import homogeneity_completeness_v_measure
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import mutual_info_score
from sklearn.metrics.cluster import expected_mutual_information
from sklearn.metrics.cluster import contingency_matrix
from sklearn.metrics.cluster import entropy
from sklearn.utils.testing import assert_raise_message
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from numpy.testing import assert_array_almost_equal
score_funcs = [
adjusted_rand_score,
homogeneity_score,
completeness_score,
v_measure_score,
adjusted_mutual_info_score,
normalized_mutual_info_score,
]
def test_error_messages_on_wrong_input():
for score_func in score_funcs:
expected = ('labels_true and labels_pred must have same size,'
' got 2 and 3')
assert_raise_message(ValueError, expected, score_func,
[0, 1], [1, 1, 1])
expected = "labels_true must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[[0, 1], [1, 0]], [1, 1, 1])
expected = "labels_pred must be 1D: shape is (2"
assert_raise_message(ValueError, expected, score_func,
[0, 1, 0], [[1, 1], [0, 0]])
def test_perfect_matches():
for score_func in score_funcs:
assert_equal(score_func([], []), 1.0)
assert_equal(score_func([0], [1]), 1.0)
assert_equal(score_func([0, 0, 0], [0, 0, 0]), 1.0)
assert_equal(score_func([0, 1, 0], [42, 7, 42]), 1.0)
assert_equal(score_func([0., 1., 0.], [42., 7., 42.]), 1.0)
assert_equal(score_func([0., 1., 2.], [42., 7., 2.]), 1.0)
assert_equal(score_func([0, 1, 2], [42, 7, 2]), 1.0)
def test_homogeneous_but_not_complete_labeling():
# homogeneous but not complete clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 2, 2])
assert_almost_equal(h, 1.00, 2)
assert_almost_equal(c, 0.69, 2)
assert_almost_equal(v, 0.81, 2)
def test_complete_but_not_homogeneous_labeling():
# complete but not homogeneous clustering
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 1, 1, 2, 2],
[0, 0, 1, 1, 1, 1])
assert_almost_equal(h, 0.58, 2)
assert_almost_equal(c, 1.00, 2)
assert_almost_equal(v, 0.73, 2)
def test_not_complete_and_not_homogeneous_labeling():
# neither complete nor homogeneous but not so bad either
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
def test_non_consicutive_labels():
# regression tests for labels with gaps
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 2, 2, 2],
[0, 1, 0, 1, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
h, c, v = homogeneity_completeness_v_measure(
[0, 0, 0, 1, 1, 1],
[0, 4, 0, 4, 2, 2])
assert_almost_equal(h, 0.67, 2)
assert_almost_equal(c, 0.42, 2)
assert_almost_equal(v, 0.52, 2)
ari_1 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 1, 0, 1, 2, 2])
ari_2 = adjusted_rand_score([0, 0, 0, 1, 1, 1], [0, 4, 0, 4, 2, 2])
assert_almost_equal(ari_1, 0.24, 2)
assert_almost_equal(ari_2, 0.24, 2)
def uniform_labelings_scores(score_func, n_samples, k_range, n_runs=10,
seed=42):
# Compute score for random uniform cluster labelings
random_labels = np.random.RandomState(seed).random_integers
scores = np.zeros((len(k_range), n_runs))
for i, k in enumerate(k_range):
for j in range(n_runs):
labels_a = random_labels(low=0, high=k - 1, size=n_samples)
labels_b = random_labels(low=0, high=k - 1, size=n_samples)
scores[i, j] = score_func(labels_a, labels_b)
return scores
def test_adjustment_for_chance():
# Check that adjusted scores are almost zero on random labels
n_clusters_range = [2, 10, 50, 90]
n_samples = 100
n_runs = 10
scores = uniform_labelings_scores(
adjusted_rand_score, n_samples, n_clusters_range, n_runs)
max_abs_scores = np.abs(scores).max(axis=1)
assert_array_almost_equal(max_abs_scores, [0.02, 0.03, 0.03, 0.02], 2)
def test_adjusted_mutual_info_score():
# Compute the Adjusted Mutual Information and test against known values
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
# Mutual information
mi = mutual_info_score(labels_a, labels_b)
assert_almost_equal(mi, 0.41022, 5)
# Expected mutual information
C = contingency_matrix(labels_a, labels_b)
n_samples = np.sum(C)
emi = expected_mutual_information(C, n_samples)
assert_almost_equal(emi, 0.15042, 5)
# Adjusted mutual information
ami = adjusted_mutual_info_score(labels_a, labels_b)
assert_almost_equal(ami, 0.27502, 5)
ami = adjusted_mutual_info_score([1, 1, 2, 2], [2, 2, 3, 3])
assert_equal(ami, 1.0)
# Test with a very large array
a110 = np.array([list(labels_a) * 110]).flatten()
b110 = np.array([list(labels_b) * 110]).flatten()
ami = adjusted_mutual_info_score(a110, b110)
# This is not accurate to more than 2 places
assert_almost_equal(ami, 0.37, 2)
def test_entropy():
ent = entropy([0, 0, 42.])
assert_almost_equal(ent, 0.6365141, 5)
assert_almost_equal(entropy([]), 1)
def test_contingency_matrix():
labels_a = np.array([1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3])
labels_b = np.array([1, 1, 1, 1, 2, 1, 2, 2, 2, 2, 3, 1, 3, 3, 3, 2, 2])
C = contingency_matrix(labels_a, labels_b)
C2 = np.histogram2d(labels_a, labels_b,
bins=(np.arange(1, 5),
np.arange(1, 5)))[0]
assert_array_almost_equal(C, C2)
C = contingency_matrix(labels_a, labels_b, eps=.1)
assert_array_almost_equal(C, C2 + .1)
def test_exactly_zero_info_score():
# Check numerical stability when information is exactly zero
for i in np.logspace(1, 4, 4).astype(np.int):
labels_a, labels_b = np.ones(i, dtype=np.int),\
np.arange(i, dtype=np.int)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(v_measure_score(labels_a, labels_b), 0.0)
assert_equal(adjusted_mutual_info_score(labels_a, labels_b), 0.0)
assert_equal(normalized_mutual_info_score(labels_a, labels_b), 0.0)
def test_v_measure_and_mutual_information(seed=36):
# Check relation between v_measure, entropy and mutual information
for i in np.logspace(1, 4, 4).astype(np.int):
random_state = np.random.RandomState(seed)
labels_a, labels_b = random_state.random_integers(0, 10, i),\
random_state.random_integers(0, 10, i)
assert_almost_equal(v_measure_score(labels_a, labels_b),
2.0 * mutual_info_score(labels_a, labels_b) /
(entropy(labels_a) + entropy(labels_b)), 0)
|
bsd-3-clause
|
jorge2703/scikit-learn
|
sklearn/metrics/tests/test_classification.py
|
83
|
49782
|
from __future__ import division, print_function
import numpy as np
from scipy import linalg
from functools import partial
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import label_binarize
from sklearn.utils.fixes import np_version
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import classification_report
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import zero_one_loss
from sklearn.metrics import brier_score_loss
from sklearn.metrics.classification import _check_targets
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def test_multilabel_accuracy_score_subset_accuracy():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(accuracy_score(y1, y2), 0.5)
assert_equal(accuracy_score(y1, y1), 1)
assert_equal(accuracy_score(y2, y2), 1)
assert_equal(accuracy_score(y2, np.logical_not(y2)), 0)
assert_equal(accuracy_score(y1, np.logical_not(y1)), 0)
assert_equal(accuracy_score(y1, np.zeros(y1.shape)), 0)
assert_equal(accuracy_score(y2, np.zeros(y1.shape)), 0)
def test_precision_recall_f1_score_binary():
# Test Precision Recall and F1 Score for binary classification task
y_true, y_pred, _ = make_prediction(binary=True)
# detailed measures for each class
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.73, 0.85], 2)
assert_array_almost_equal(r, [0.88, 0.68], 2)
assert_array_almost_equal(f, [0.80, 0.76], 2)
assert_array_equal(s, [25, 25])
# individual scoring function that can be used for grid search: in the
# binary class case the score is the value of the measure for the positive
# class (e.g. label == 1). This is deprecated for average != 'binary'.
assert_dep_warning = partial(assert_warns, DeprecationWarning)
for kwargs, my_assert in [({}, assert_no_warnings),
({'average': 'binary'}, assert_no_warnings),
({'average': 'micro'}, assert_dep_warning)]:
ps = my_assert(precision_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(ps, 0.85, 2)
rs = my_assert(recall_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(rs, 0.68, 2)
fs = my_assert(f1_score, y_true, y_pred, **kwargs)
assert_array_almost_equal(fs, 0.76, 2)
assert_almost_equal(my_assert(fbeta_score, y_true, y_pred, beta=2,
**kwargs),
(1 + 2 ** 2) * ps * rs / (2 ** 2 * ps + rs), 2)
def test_precision_recall_f_binary_single_class():
# Test precision, recall and F1 score behave with a single positive or
# negative class
# Such a case may occur with non-stratified cross-validation
assert_equal(1., precision_score([1, 1], [1, 1]))
assert_equal(1., recall_score([1, 1], [1, 1]))
assert_equal(1., f1_score([1, 1], [1, 1]))
assert_equal(0., precision_score([-1, -1], [-1, -1]))
assert_equal(0., recall_score([-1, -1], [-1, -1]))
assert_equal(0., f1_score([-1, -1], [-1, -1]))
@ignore_warnings
def test_precision_recall_f_extra_labels():
"""Test handling of explicit additional (not in input) labels to PRF
"""
y_true = [1, 3, 3, 2]
y_pred = [1, 1, 3, 2]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
# No average: zeros in array
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average=None)
assert_array_almost_equal([0., 1., 1., .5, 0.], actual)
# Macro average is changed
actual = recall_score(y_true, y_pred, labels=[0, 1, 2, 3, 4],
average='macro')
assert_array_almost_equal(np.mean([0., 1., 1., .5, 0.]), actual)
# No effect otheriwse
for average in ['micro', 'weighted', 'samples']:
if average == 'samples' and i == 0:
continue
assert_almost_equal(recall_score(y_true, y_pred,
labels=[0, 1, 2, 3, 4],
average=average),
recall_score(y_true, y_pred, labels=None,
average=average))
# Error when introducing invalid label in multilabel case
# (although it would only affect performance if average='macro'/None)
for average in [None, 'macro', 'micro', 'samples']:
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(6), average=average)
assert_raises(ValueError, recall_score, y_true_bin, y_pred_bin,
labels=np.arange(-1, 4), average=average)
@ignore_warnings
def test_precision_recall_f_ignored_labels():
"""Test a subset of labels may be requested for PRF"""
y_true = [1, 1, 2, 3]
y_pred = [1, 3, 3, 3]
y_true_bin = label_binarize(y_true, classes=np.arange(5))
y_pred_bin = label_binarize(y_pred, classes=np.arange(5))
data = [(y_true, y_pred),
(y_true_bin, y_pred_bin)]
for i, (y_true, y_pred) in enumerate(data):
recall_13 = partial(recall_score, y_true, y_pred, labels=[1, 3])
recall_all = partial(recall_score, y_true, y_pred, labels=None)
assert_array_almost_equal([.5, 1.], recall_13(average=None))
assert_almost_equal((.5 + 1.) / 2, recall_13(average='macro'))
assert_almost_equal((.5 * 2 + 1. * 1) / 3,
recall_13(average='weighted'))
assert_almost_equal(2. / 3, recall_13(average='micro'))
# ensure the above were meaningful tests:
for average in ['macro', 'weighted', 'micro']:
assert_not_equal(recall_13(average=average),
recall_all(average=average))
def test_average_precision_score_score_non_binary_class():
# Test that average_precision_score function returns an error when trying
# to compute average_precision_score for multiclass task.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
average_precision_score, y_true, y_pred)
def test_average_precision_score_duplicate_values():
# Duplicate values with precision-recall require a different
# processing than when computing the AUC of a ROC, because the
# precision-recall curve is a decreasing curve
# The following situtation corresponds to a perfect
# test statistic, the average_precision_score should be 1
y_true = [0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
y_score = [0, .1, .1, .4, .5, .6, .6, .9, .9, 1, 1]
assert_equal(average_precision_score(y_true, y_score), 1)
def test_average_precision_score_tied_values():
# Here if we go from left to right in y_true, the 0 values are
# are separated from the 1 values, so it appears that we've
# Correctly sorted our classifications. But in fact the first two
# values have the same score (0.5) and so the first two values
# could be swapped around, creating an imperfect sorting. This
# imperfection should come through in the end score, making it less
# than one.
y_true = [0, 1, 1]
y_score = [.5, .5, .6]
assert_not_equal(average_precision_score(y_true, y_score), 1.)
@ignore_warnings
def test_precision_recall_fscore_support_errors():
y_true, y_pred, _ = make_prediction(binary=True)
# Bad beta
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, beta=0.0)
# Bad pos_label
assert_raises(ValueError, precision_recall_fscore_support,
y_true, y_pred, pos_label=2, average='macro')
# Bad average option
assert_raises(ValueError, precision_recall_fscore_support,
[0, 1, 2], [1, 2, 0], average='mega')
def test_confusion_matrix_binary():
# Test confusion matrix - binary classification case
y_true, y_pred, _ = make_prediction(binary=True)
def test(y_true, y_pred):
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[22, 3], [8, 17]])
tp, fp, fn, tn = cm.flatten()
num = (tp * tn - fp * fn)
den = np.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
true_mcc = 0 if den == 0 else num / den
mcc = matthews_corrcoef(y_true, y_pred)
assert_array_almost_equal(mcc, true_mcc, decimal=2)
assert_array_almost_equal(mcc, 0.57, decimal=2)
test(y_true, y_pred)
test([str(y) for y in y_true],
[str(y) for y in y_pred])
def test_cohen_kappa():
# These label vectors reproduce the contingency matrix from Artstein and
# Poesio (2008), Table 1: np.array([[20, 20], [10, 50]]).
y1 = np.array([0] * 40 + [1] * 60)
y2 = np.array([0] * 20 + [1] * 20 + [0] * 10 + [1] * 50)
kappa = cohen_kappa_score(y1, y2)
assert_almost_equal(kappa, .348, decimal=3)
assert_equal(kappa, cohen_kappa_score(y2, y1))
# Add spurious labels and ignore them.
y1 = np.append(y1, [2] * 4)
y2 = np.append(y2, [2] * 4)
assert_equal(cohen_kappa_score(y1, y2, labels=[0, 1]), kappa)
assert_almost_equal(cohen_kappa_score(y1, y1), 1.)
# Multiclass example: Artstein and Poesio, Table 4.
y1 = np.array([0] * 46 + [1] * 44 + [2] * 10)
y2 = np.array([0] * 52 + [1] * 32 + [2] * 16)
assert_almost_equal(cohen_kappa_score(y1, y2), .8013, decimal=4)
def test_matthews_corrcoef_nan():
assert_equal(matthews_corrcoef([0], [1]), 0.0)
assert_equal(matthews_corrcoef([0, 0], [0, 1]), 0.0)
def test_precision_recall_f1_score_multiclass():
# Test Precision Recall and F1 Score for multiclass classification task
y_true, y_pred, _ = make_prediction(binary=False)
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
assert_array_almost_equal(p, [0.83, 0.33, 0.42], 2)
assert_array_almost_equal(r, [0.79, 0.09, 0.90], 2)
assert_array_almost_equal(f, [0.81, 0.15, 0.57], 2)
assert_array_equal(s, [24, 31, 20])
# averaging tests
ps = precision_score(y_true, y_pred, pos_label=1, average='micro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='micro')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='micro')
assert_array_almost_equal(fs, 0.53, 2)
ps = precision_score(y_true, y_pred, average='macro')
assert_array_almost_equal(ps, 0.53, 2)
rs = recall_score(y_true, y_pred, average='macro')
assert_array_almost_equal(rs, 0.60, 2)
fs = f1_score(y_true, y_pred, average='macro')
assert_array_almost_equal(fs, 0.51, 2)
ps = precision_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(ps, 0.51, 2)
rs = recall_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(rs, 0.53, 2)
fs = f1_score(y_true, y_pred, average='weighted')
assert_array_almost_equal(fs, 0.47, 2)
assert_raises(ValueError, precision_score, y_true, y_pred,
average="samples")
assert_raises(ValueError, recall_score, y_true, y_pred, average="samples")
assert_raises(ValueError, f1_score, y_true, y_pred, average="samples")
assert_raises(ValueError, fbeta_score, y_true, y_pred, average="samples",
beta=0.5)
# same prediction but with and explicit label ordering
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[0, 2, 1], average=None)
assert_array_almost_equal(p, [0.83, 0.41, 0.33], 2)
assert_array_almost_equal(r, [0.79, 0.90, 0.10], 2)
assert_array_almost_equal(f, [0.81, 0.57, 0.15], 2)
assert_array_equal(s, [24, 20, 31])
def test_precision_refcall_f1_score_multilabel_unordered_labels():
# test that labels need not be sorted in the multilabel case
y_true = np.array([[1, 1, 0, 0]])
y_pred = np.array([[0, 0, 1, 1]])
for average in ['samples', 'micro', 'macro', 'weighted', None]:
p, r, f, s = precision_recall_fscore_support(
y_true, y_pred, labels=[3, 0, 1, 2], warn_for=[], average=average)
assert_array_equal(p, 0)
assert_array_equal(r, 0)
assert_array_equal(f, 0)
if average is None:
assert_array_equal(s, [0, 1, 1, 0])
def test_precision_recall_f1_score_multiclass_pos_label_none():
# Test Precision Recall and F1 Score for multiclass classification task
# GH Issue #1296
# initialize data
y_true = np.array([0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1])
y_pred = np.array([1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1])
# compute scores with default labels introspection
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
pos_label=None,
average='weighted')
def test_zero_precision_recall():
# Check that pathological cases do not bring NaNs
old_error_settings = np.seterr(all='raise')
try:
y_true = np.array([0, 1, 2, 0, 1, 2])
y_pred = np.array([2, 0, 1, 1, 2, 0])
assert_almost_equal(precision_score(y_true, y_pred,
average='weighted'), 0.0, 2)
assert_almost_equal(recall_score(y_true, y_pred, average='weighted'),
0.0, 2)
assert_almost_equal(f1_score(y_true, y_pred, average='weighted'),
0.0, 2)
finally:
np.seterr(**old_error_settings)
def test_confusion_matrix_multiclass():
# Test confusion matrix - multi-class case
y_true, y_pred, _ = make_prediction(binary=False)
def test(y_true, y_pred, string_type=False):
# compute confusion matrix with default labels introspection
cm = confusion_matrix(y_true, y_pred)
assert_array_equal(cm, [[19, 4, 1],
[4, 3, 24],
[0, 2, 18]])
# compute confusion matrix with explicit label ordering
labels = ['0', '2', '1'] if string_type else [0, 2, 1]
cm = confusion_matrix(y_true,
y_pred,
labels=labels)
assert_array_equal(cm, [[19, 1, 4],
[0, 18, 2],
[4, 24, 3]])
test(y_true, y_pred)
test(list(str(y) for y in y_true),
list(str(y) for y in y_pred),
string_type=True)
def test_confusion_matrix_multiclass_subset_labels():
# Test confusion matrix - multi-class case with subset of labels
y_true, y_pred, _ = make_prediction(binary=False)
# compute confusion matrix with only first two labels considered
cm = confusion_matrix(y_true, y_pred, labels=[0, 1])
assert_array_equal(cm, [[19, 4],
[4, 3]])
# compute confusion matrix with explicit label ordering for only subset
# of labels
cm = confusion_matrix(y_true, y_pred, labels=[2, 1])
assert_array_equal(cm, [[18, 2],
[24, 3]])
def test_classification_report_multiclass():
# Test performance report
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.83 0.79 0.81 24
versicolor 0.33 0.10 0.15 31
virginica 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_digits():
# Test performance report with added digits in floating point values
iris = datasets.load_iris()
y_true, y_pred, _ = make_prediction(dataset=iris, binary=False)
# print classification report with class names
expected_report = """\
precision recall f1-score support
setosa 0.82609 0.79167 0.80851 24
versicolor 0.33333 0.09677 0.15000 31
virginica 0.41860 0.90000 0.57143 20
avg / total 0.51375 0.53333 0.47310 75
"""
report = classification_report(
y_true, y_pred, labels=np.arange(len(iris.target_names)),
target_names=iris.target_names, digits=5)
assert_equal(report, expected_report)
# print classification report with label detection
expected_report = """\
precision recall f1-score support
0 0.83 0.79 0.81 24
1 0.33 0.10 0.15 31
2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_string_label():
y_true, y_pred, _ = make_prediction(binary=False)
y_true = np.array(["blue", "green", "red"])[y_true]
y_pred = np.array(["blue", "green", "red"])[y_pred]
expected_report = """\
precision recall f1-score support
blue 0.83 0.79 0.81 24
green 0.33 0.10 0.15 31
red 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
expected_report = """\
precision recall f1-score support
a 0.83 0.79 0.81 24
b 0.33 0.10 0.15 31
c 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
report = classification_report(y_true, y_pred,
target_names=["a", "b", "c"])
assert_equal(report, expected_report)
def test_classification_report_multiclass_with_unicode_label():
y_true, y_pred, _ = make_prediction(binary=False)
labels = np.array([u"blue\xa2", u"green\xa2", u"red\xa2"])
y_true = labels[y_true]
y_pred = labels[y_pred]
expected_report = u"""\
precision recall f1-score support
blue\xa2 0.83 0.79 0.81 24
green\xa2 0.33 0.10 0.15 31
red\xa2 0.42 0.90 0.57 20
avg / total 0.51 0.53 0.47 75
"""
if np_version[:3] < (1, 7, 0):
expected_message = ("NumPy < 1.7.0 does not implement"
" searchsorted on unicode data correctly.")
assert_raise_message(RuntimeError, expected_message,
classification_report, y_true, y_pred)
else:
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_classification_report():
n_classes = 4
n_samples = 50
_, y_true = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=0)
_, y_pred = make_multilabel_classification(n_features=1,
n_samples=n_samples,
n_classes=n_classes,
random_state=1)
expected_report = """\
precision recall f1-score support
0 0.50 0.67 0.57 24
1 0.51 0.74 0.61 27
2 0.29 0.08 0.12 26
3 0.52 0.56 0.54 27
avg / total 0.45 0.51 0.46 104
"""
report = classification_report(y_true, y_pred)
assert_equal(report, expected_report)
def test_multilabel_zero_one_loss_subset():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(zero_one_loss(y1, y2), 0.5)
assert_equal(zero_one_loss(y1, y1), 0)
assert_equal(zero_one_loss(y2, y2), 0)
assert_equal(zero_one_loss(y2, np.logical_not(y2)), 1)
assert_equal(zero_one_loss(y1, np.logical_not(y1)), 1)
assert_equal(zero_one_loss(y1, np.zeros(y1.shape)), 1)
assert_equal(zero_one_loss(y2, np.zeros(y1.shape)), 1)
def test_multilabel_hamming_loss():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
assert_equal(hamming_loss(y1, y2), 1 / 6)
assert_equal(hamming_loss(y1, y1), 0)
assert_equal(hamming_loss(y2, y2), 0)
assert_equal(hamming_loss(y2, np.logical_not(y2)), 1)
assert_equal(hamming_loss(y1, np.logical_not(y1)), 1)
assert_equal(hamming_loss(y1, np.zeros(y1.shape)), 4 / 6)
assert_equal(hamming_loss(y2, np.zeros(y1.shape)), 0.5)
def test_multilabel_jaccard_similarity_score():
# Dense label indicator matrix format
y1 = np.array([[0, 1, 1], [1, 0, 1]])
y2 = np.array([[0, 0, 1], [1, 0, 1]])
# size(y1 \inter y2) = [1, 2]
# size(y1 \union y2) = [2, 2]
assert_equal(jaccard_similarity_score(y1, y2), 0.75)
assert_equal(jaccard_similarity_score(y1, y1), 1)
assert_equal(jaccard_similarity_score(y2, y2), 1)
assert_equal(jaccard_similarity_score(y2, np.logical_not(y2)), 0)
assert_equal(jaccard_similarity_score(y1, np.logical_not(y1)), 0)
assert_equal(jaccard_similarity_score(y1, np.zeros(y1.shape)), 0)
assert_equal(jaccard_similarity_score(y2, np.zeros(y1.shape)), 0)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_1():
# Test precision_recall_f1_score on a crafted multilabel example
# First crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 1]])
y_pred = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [1, 0, 1, 0]])
p, r, f, s = precision_recall_fscore_support(y_true, y_pred, average=None)
# tp = [0, 1, 1, 0]
# fn = [1, 0, 0, 1]
# fp = [1, 1, 0, 0]
# Check per class
assert_array_almost_equal(p, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 1, 1, 1], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.83, 1, 0], 2)
# Check macro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="macro"),
np.mean(f2))
# Check micro
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
# Check weighted
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 1.5 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2.5 / 1.5 * 0.25)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
# Check samples
# |h(x_i) inter y_i | = [0, 1, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2, average="samples"),
0.5)
@ignore_warnings
def test_precision_recall_f1_score_multilabel_2():
# Test precision_recall_f1_score on a crafted multilabel example 2
# Second crafted example
y_true = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 1], [0, 0, 0, 1], [1, 1, 0, 0]])
# tp = [ 0. 1. 0. 0.]
# fp = [ 1. 0. 0. 2.]
# fn = [ 1. 1. 1. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 0.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 0.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 0.66, 0.0, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 0, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.25)
assert_almost_equal(f, 2 * 0.25 * 0.25 / 0.5)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.25)
assert_almost_equal(r, 0.125)
assert_almost_equal(f, 2 / 12)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 2 / 4)
assert_almost_equal(r, 1 / 4)
assert_almost_equal(f, 2 / 3 * 2 / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# Check samples
# |h(x_i) inter y_i | = [0, 0, 1]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [1, 1, 2]
assert_almost_equal(p, 1 / 6)
assert_almost_equal(r, 1 / 6)
assert_almost_equal(f, 2 / 4 * 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.1666, 2)
def test_precision_recall_f1_score_with_an_empty_prediction():
y_true = np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 1, 1, 0]])
y_pred = np.array([[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 1, 0]])
# true_pos = [ 0. 1. 1. 0.]
# false_pos = [ 0. 0. 0. 1.]
# false_neg = [ 1. 1. 0. 0.]
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average=None)
assert_array_almost_equal(p, [0.0, 1.0, 1.0, 0.0], 2)
assert_array_almost_equal(r, [0.0, 0.5, 1.0, 0.0], 2)
assert_array_almost_equal(f, [0.0, 1 / 1.5, 1, 0.0], 2)
assert_array_almost_equal(s, [1, 2, 1, 0], 2)
f2 = fbeta_score(y_true, y_pred, beta=2, average=None)
support = s
assert_array_almost_equal(f2, [0, 0.55, 1, 0], 2)
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="macro")
assert_almost_equal(p, 0.5)
assert_almost_equal(r, 1.5 / 4)
assert_almost_equal(f, 2.5 / (4 * 1.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="macro"),
np.mean(f2))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="micro")
assert_almost_equal(p, 2 / 3)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, 2 / 3 / (2 / 3 + 0.5))
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="micro"),
(1 + 4) * p * r / (4 * p + r))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="weighted")
assert_almost_equal(p, 3 / 4)
assert_almost_equal(r, 0.5)
assert_almost_equal(f, (2 / 1.5 + 1) / 4)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="weighted"),
np.average(f2, weights=support))
p, r, f, s = precision_recall_fscore_support(y_true, y_pred,
average="samples")
# |h(x_i) inter y_i | = [0, 0, 2]
# |y_i| = [1, 1, 2]
# |h(x_i)| = [0, 1, 2]
assert_almost_equal(p, 1 / 3)
assert_almost_equal(r, 1 / 3)
assert_almost_equal(f, 1 / 3)
assert_equal(s, None)
assert_almost_equal(fbeta_score(y_true, y_pred, beta=2,
average="samples"),
0.333, 2)
def test_precision_recall_f1_no_labels():
y_true = np.zeros((20, 3))
y_pred = np.zeros_like(y_true)
# tp = [0, 0, 0]
# fn = [0, 0, 0]
# fp = [0, 0, 0]
# support = [0, 0, 0]
# |y_hat_i inter y_i | = [0, 0, 0]
# |y_i| = [0, 0, 0]
# |y_hat_i| = [0, 0, 0]
for beta in [1]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=None, beta=beta)
assert_array_almost_equal(p, [0, 0, 0], 2)
assert_array_almost_equal(r, [0, 0, 0], 2)
assert_array_almost_equal(f, [0, 0, 0], 2)
assert_array_almost_equal(s, [0, 0, 0], 2)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred, beta=beta, average=None)
assert_array_almost_equal(fbeta, [0, 0, 0], 2)
for average in ["macro", "micro", "weighted", "samples"]:
p, r, f, s = assert_warns(UndefinedMetricWarning,
precision_recall_fscore_support,
y_true, y_pred, average=average,
beta=beta)
assert_almost_equal(p, 0)
assert_almost_equal(r, 0)
assert_almost_equal(f, 0)
assert_equal(s, None)
fbeta = assert_warns(UndefinedMetricWarning, fbeta_score,
y_true, y_pred,
beta=beta, average=average)
assert_almost_equal(fbeta, 0)
def test_prf_warnings():
# average of per-label scores
f, w = precision_recall_fscore_support, UndefinedMetricWarning
my_assert = assert_warns_message
for average in [None, 'weighted', 'macro']:
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in labels with no predicted samples.')
my_assert(w, msg, f, [0, 1, 2], [1, 1, 2], average=average)
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in labels with no true samples.')
my_assert(w, msg, f, [1, 1, 2], [0, 1, 2], average=average)
# average of per-sample scores
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 in samples with no predicted labels.')
my_assert(w, msg, f, np.array([[1, 0], [1, 0]]),
np.array([[1, 0], [0, 0]]), average='samples')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 in samples with no true labels.')
my_assert(w, msg, f, np.array([[1, 0], [0, 0]]),
np.array([[1, 0], [1, 0]]),
average='samples')
# single score: micro-average
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]), average='micro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]), average='micro')
# single postive label
msg = ('Precision and F-score are ill-defined and '
'being set to 0.0 due to no predicted samples.')
my_assert(w, msg, f, [1, 1], [-1, -1], average='macro')
msg = ('Recall and F-score are ill-defined and '
'being set to 0.0 due to no true samples.')
my_assert(w, msg, f, [-1, -1], [1, 1], average='macro')
def test_recall_warnings():
assert_no_warnings(recall_score,
np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
recall_score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'Recall is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_precision_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
precision_score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'Precision is ill-defined and '
'being set to 0.0 due to no predicted samples.')
assert_no_warnings(precision_score,
np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
def test_fscore_warnings():
clean_warning_registry()
with warnings.catch_warnings(record=True) as record:
warnings.simplefilter('always')
for score in [f1_score, partial(fbeta_score, beta=2)]:
score(np.array([[1, 1], [1, 1]]),
np.array([[0, 0], [0, 0]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no predicted samples.')
score(np.array([[0, 0], [0, 0]]),
np.array([[1, 1], [1, 1]]),
average='micro')
assert_equal(str(record.pop().message),
'F-score is ill-defined and '
'being set to 0.0 due to no true samples.')
def test_prf_average_compat():
# Ensure warning if f1_score et al.'s average is implicit for multiclass
y_true = [1, 2, 3, 3]
y_pred = [1, 2, 3, 1]
y_true_bin = [0, 1, 1]
y_pred_bin = [0, 1, 0]
for metric in [precision_score, recall_score, f1_score,
partial(fbeta_score, beta=2)]:
score = assert_warns(DeprecationWarning, metric, y_true, y_pred)
score_weighted = assert_no_warnings(metric, y_true, y_pred,
average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default')
# check binary passes without warning
assert_no_warnings(metric, y_true_bin, y_pred_bin)
# but binary with pos_label=None should behave like multiclass
score = assert_warns(DeprecationWarning, metric,
y_true_bin, y_pred_bin, pos_label=None)
score_weighted = assert_no_warnings(metric, y_true_bin, y_pred_bin,
pos_label=None, average='weighted')
assert_equal(score, score_weighted,
'average does not act like "weighted" by default with '
'binary data and pos_label=None')
def test__check_targets():
# Check that _check_targets correctly merges target types, squeezes
# output and fails if input lengths differ.
IND = 'multilabel-indicator'
MC = 'multiclass'
BIN = 'binary'
CNT = 'continuous'
MMC = 'multiclass-multioutput'
MCN = 'continuous-multioutput'
# all of length 3
EXAMPLES = [
(IND, np.array([[0, 1, 1], [1, 0, 0], [0, 0, 1]])),
# must not be considered binary
(IND, np.array([[0, 1], [1, 0], [1, 1]])),
(MC, [2, 3, 1]),
(BIN, [0, 1, 1]),
(CNT, [0., 1.5, 1.]),
(MC, np.array([[2], [3], [1]])),
(BIN, np.array([[0], [1], [1]])),
(CNT, np.array([[0.], [1.5], [1.]])),
(MMC, np.array([[0, 2], [1, 3], [2, 3]])),
(MCN, np.array([[0.5, 2.], [1.1, 3.], [2., 3.]])),
]
# expected type given input types, or None for error
# (types will be tried in either order)
EXPECTED = {
(IND, IND): IND,
(MC, MC): MC,
(BIN, BIN): BIN,
(MC, IND): None,
(BIN, IND): None,
(BIN, MC): MC,
# Disallowed types
(CNT, CNT): None,
(MMC, MMC): None,
(MCN, MCN): None,
(IND, CNT): None,
(MC, CNT): None,
(BIN, CNT): None,
(MMC, CNT): None,
(MCN, CNT): None,
(IND, MMC): None,
(MC, MMC): None,
(BIN, MMC): None,
(MCN, MMC): None,
(IND, MCN): None,
(MC, MCN): None,
(BIN, MCN): None,
}
for (type1, y1), (type2, y2) in product(EXAMPLES, repeat=2):
try:
expected = EXPECTED[type1, type2]
except KeyError:
expected = EXPECTED[type2, type1]
if expected is None:
assert_raises(ValueError, _check_targets, y1, y2)
if type1 != type2:
assert_raise_message(
ValueError,
"Can't handle mix of {0} and {1}".format(type1, type2),
_check_targets, y1, y2)
else:
if type1 not in (BIN, MC, IND):
assert_raise_message(ValueError,
"{0} is not supported".format(type1),
_check_targets, y1, y2)
else:
merged_type, y1out, y2out = _check_targets(y1, y2)
assert_equal(merged_type, expected)
if merged_type.startswith('multilabel'):
assert_equal(y1out.format, 'csr')
assert_equal(y2out.format, 'csr')
else:
assert_array_equal(y1out, np.squeeze(y1))
assert_array_equal(y2out, np.squeeze(y2))
assert_raises(ValueError, _check_targets, y1[:-1], y2)
# Make sure seq of seq is not supported
y1 = [(1, 2,), (0, 2, 3)]
y2 = [(2,), (0, 2,)]
msg = ('You appear to be using a legacy multi-label data representation. '
'Sequence of sequences are no longer supported; use a binary array'
' or sparse matrix instead.')
assert_raise_message(ValueError, msg, _check_targets, y1, y2)
def test_hinge_loss_binary():
y_true = np.array([-1, 1, 1, -1])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
y_true = np.array([0, 2, 2, 0])
pred_decision = np.array([-8.5, 0.5, 1.5, -0.3])
assert_equal(hinge_loss(y_true, pred_decision), 1.2 / 4)
def test_hinge_loss_multiclass():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.54, -0.37, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.54, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 3, 2])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_hinge_loss_multiclass_missing_labels_with_labels_none():
y_true = np.array([0, 1, 2, 2])
pred_decision = np.array([
[1.27, 0.034, -0.68, -1.40],
[-1.45, -0.58, -0.38, -0.17],
[-2.36, -0.79, -0.27, 0.24],
[-2.36, -0.79, -0.27, 0.24]
])
error_message = ("Please include all labels in y_true "
"or pass labels as third argument")
assert_raise_message(ValueError,
error_message,
hinge_loss, y_true, pred_decision)
def test_hinge_loss_multiclass_with_missing_labels():
pred_decision = np.array([
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17]
])
y_true = np.array([0, 1, 2, 1, 2])
labels = np.array([0, 1, 2, 3])
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][2] + pred_decision[4][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision, labels=labels),
dummy_hinge_loss)
def test_hinge_loss_multiclass_invariance_lists():
# Currently, invariance of string and integer labels cannot be tested
# in common invariance tests because invariance tests for multiclass
# decision functions is not implemented yet.
y_true = ['blue', 'green', 'red',
'green', 'white', 'red']
pred_decision = [
[0.36, -0.17, -0.58, -0.99],
[-0.55, -0.38, -0.48, -0.58],
[-1.45, -0.58, -0.38, -0.17],
[-0.55, -0.38, -0.48, -0.58],
[-2.36, -0.79, -0.27, 0.24],
[-1.45, -0.58, -0.38, -0.17]]
dummy_losses = np.array([
1 - pred_decision[0][0] + pred_decision[0][1],
1 - pred_decision[1][1] + pred_decision[1][2],
1 - pred_decision[2][2] + pred_decision[2][3],
1 - pred_decision[3][1] + pred_decision[3][2],
1 - pred_decision[4][3] + pred_decision[4][2],
1 - pred_decision[5][2] + pred_decision[5][3]
])
dummy_losses[dummy_losses <= 0] = 0
dummy_hinge_loss = np.mean(dummy_losses)
assert_equal(hinge_loss(y_true, pred_decision),
dummy_hinge_loss)
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array([[0.5, 0.5], [0.1, 0.9], [0.01, 0.99],
[0.9, 0.1], [0.75, 0.25], [0.001, 0.999]])
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > .5
loss = log_loss(y_true, y_pred, normalize=True, eps=.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, .1, .9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
assert_raises(ValueError, log_loss, y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
def test_brier_score_loss():
# Check brier_score_loss function
y_true = np.array([0, 1, 1, 0, 1, 1])
y_pred = np.array([0.1, 0.8, 0.9, 0.3, 1., 0.95])
true_score = linalg.norm(y_true - y_pred) ** 2 / len(y_true)
assert_almost_equal(brier_score_loss(y_true, y_true), 0.0)
assert_almost_equal(brier_score_loss(y_true, y_pred), true_score)
assert_almost_equal(brier_score_loss(1. + y_true, y_pred),
true_score)
assert_almost_equal(brier_score_loss(2 * y_true - 1, y_pred),
true_score)
assert_raises(ValueError, brier_score_loss, y_true, y_pred[1:])
assert_raises(ValueError, brier_score_loss, y_true, y_pred + 1.)
assert_raises(ValueError, brier_score_loss, y_true, y_pred - 1.)
|
bsd-3-clause
|
krez13/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
87
|
3903
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import Memory
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_.reshape(1, -1))
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/io/feather_format.py
|
2
|
3807
|
""" feather-format compat """
from typing import AnyStr
from pandas._typing import FilePathOrBuffer, StorageOptions
from pandas.compat._optional import import_optional_dependency
from pandas.util._decorators import doc
from pandas import DataFrame, Int64Index, RangeIndex
from pandas.core import generic
from pandas.io.common import get_handle
@doc(storage_options=generic._shared_docs["storage_options"])
def to_feather(
df: DataFrame,
path: FilePathOrBuffer[AnyStr],
storage_options: StorageOptions = None,
**kwargs,
):
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
df : DataFrame
path : string file path, or file-like object
{storage_options}
.. versionadded:: 1.2.0
**kwargs :
Additional keywords passed to `pyarrow.feather.write_feather`.
.. versionadded:: 1.1.0
"""
import_optional_dependency("pyarrow")
from pyarrow import feather
if not isinstance(df, DataFrame):
raise ValueError("feather only support IO with DataFrames")
valid_types = {"string", "unicode"}
# validate index
# --------------
# validate that we have only a default index
# raise on anything else as we don't serialize the index
if not isinstance(df.index, Int64Index):
typ = type(df.index)
raise ValueError(
f"feather does not support serializing {typ} "
"for the index; you can .reset_index() to make the index into column(s)"
)
if not df.index.equals(RangeIndex.from_range(range(len(df)))):
raise ValueError(
"feather does not support serializing a non-default index for the index; "
"you can .reset_index() to make the index into column(s)"
)
if df.index.name is not None:
raise ValueError(
"feather does not serialize index meta-data on a default index"
)
# validate columns
# ----------------
# must have value column names (strings only)
if df.columns.inferred_type not in valid_types:
raise ValueError("feather must have string column names")
with get_handle(
path, "wb", storage_options=storage_options, is_text=False
) as handles:
feather.write_feather(df, handles.handle, **kwargs)
@doc(storage_options=generic._shared_docs["storage_options"])
def read_feather(
path, columns=None, use_threads: bool = True, storage_options: StorageOptions = None
):
"""
Load a feather-format object from the file path.
Parameters
----------
path : str, path object or file-like object
Any valid string path is acceptable. The string could be a URL. Valid
URL schemes include http, ftp, s3, and file. For file URLs, a host is
expected. A local file could be:
``file://localhost/path/to/table.feather``.
If you want to pass in a path object, pandas accepts any
``os.PathLike``.
By file-like object, we refer to objects with a ``read()`` method,
such as a file handle (e.g. via builtin ``open`` function)
or ``StringIO``.
columns : sequence, default None
If not provided, all columns are read.
.. versionadded:: 0.24.0
use_threads : bool, default True
Whether to parallelize reading using multiple threads.
.. versionadded:: 0.24.0
{storage_options}
.. versionadded:: 1.2.0
Returns
-------
type of object stored in file
"""
import_optional_dependency("pyarrow")
from pyarrow import feather
with get_handle(
path, "rb", storage_options=storage_options, is_text=False
) as handles:
return feather.read_feather(
handles.handle, columns=columns, use_threads=bool(use_threads)
)
|
bsd-3-clause
|
rkmaddox/mne-python
|
mne/viz/_brain/colormap.py
|
10
|
6336
|
# Authors: Alexandre Gramfort <[email protected]>
# Eric Larson <[email protected]>
# Oleh Kozynets <[email protected]>
# Guillaume Favelier <[email protected]>
#
# License: Simplified BSD
import numpy as np
def create_lut(cmap, n_colors=256, center=None):
"""Return a colormap suitable for setting as a LUT."""
from .._3d import _get_cmap
assert not (isinstance(cmap, str) and cmap == 'auto')
cmap = _get_cmap(cmap)
lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64)
return lut
def scale_sequential_lut(lut_table, fmin, fmid, fmax):
"""Scale a sequential colormap."""
assert fmin <= fmid <= fmax # guaranteed by calculate_lut
lut_table_new = lut_table.copy()
n_colors = lut_table.shape[0]
n_colors2 = n_colors // 2
if fmax == fmin:
fmid_idx = 0
else:
fmid_idx = np.clip(int(np.round(
n_colors * ((fmid - fmin) / (fmax - fmin))) - 1), 0, n_colors - 2)
n_left = fmid_idx + 1
n_right = n_colors - n_left
for i in range(4):
lut_table_new[:fmid_idx + 1, i] = np.interp(
np.linspace(0, n_colors2 - 1, n_left),
np.arange(n_colors), lut_table[:, i])
lut_table_new[fmid_idx + 1:, i] = np.interp(
np.linspace(n_colors - 1, n_colors2, n_right)[::-1],
np.arange(n_colors), lut_table[:, i])
return lut_table_new
def get_fill_colors(cols, n_fill):
"""Get the fill colors for the middle of divergent colormaps."""
steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0),
axis=1)
ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3)
if ind.size > 0:
# choose the two colors between which there is the large step
ind = ind[0] + 1
fillcols = np.r_[np.tile(cols[ind, :], (n_fill / 2, 1)),
np.tile(cols[ind + 1, :],
(n_fill - n_fill / 2, 1))]
else:
# choose a color from the middle of the colormap
fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1))
return fillcols
def calculate_lut(lut_table, alpha, fmin, fmid, fmax, center=None,
transparent=True):
u"""Transparent color map calculation.
A colormap may be sequential or divergent. When the colormap is
divergent indicate this by providing a value for 'center'. The
meanings of fmin, fmid and fmax are different for sequential and
divergent colormaps. A sequential colormap is characterised by::
[fmin, fmid, fmax]
where fmin and fmax define the edges of the colormap and fmid
will be the value mapped to the center of the originally chosen colormap.
A divergent colormap is characterised by::
[center-fmax, center-fmid, center-fmin, center,
center+fmin, center+fmid, center+fmax]
i.e., values between center-fmin and center+fmin will not be shown
while center-fmid will map to the fmid of the first half of the
original colormap and center-fmid to the fmid of the second half.
Parameters
----------
lim_cmap : Colormap
Color map obtained from _process_mapdata.
alpha : float
Alpha value to apply globally to the overlay. Has no effect with mpl
backend.
fmin : float
Min value in colormap.
fmid : float
Intermediate value in colormap.
fmax : float
Max value in colormap.
center : float or None
If not None, center of a divergent colormap, changes the meaning of
fmin, fmax and fmid.
transparent : boolean
if True: use a linear transparency between fmin and fmid and make
values below fmin fully transparent (symmetrically for divergent
colormaps)
Returns
-------
cmap : matplotlib.ListedColormap
Color map with transparency channel.
"""
if not fmin <= fmid <= fmax:
raise ValueError('Must have fmin (%s) <= fmid (%s) <= fmax (%s)'
% (fmin, fmid, fmax))
lut_table = create_lut(lut_table)
assert lut_table.dtype.kind == 'i'
divergent = center is not None
n_colors = lut_table.shape[0]
# Add transparency if needed
n_colors2 = n_colors // 2
if transparent:
if divergent:
N4 = np.full(4, n_colors // 4)
N4[[0, 3, 1, 2][:np.mod(n_colors, 4)]] += 1
assert N4.sum() == n_colors
lut_table[:, -1] = np.round(np.hstack([
np.full(N4[0], 255.),
np.linspace(0, 255, N4[1])[::-1],
np.linspace(0, 255, N4[2]),
np.full(N4[3], 255.)]))
else:
lut_table[:n_colors2, -1] = np.round(np.linspace(
0, 255, n_colors2))
lut_table[n_colors2:, -1] = 255
alpha = float(alpha)
if alpha < 1.0:
lut_table[:, -1] = np.round(lut_table[:, -1] * alpha)
if divergent:
if fmax == fmin:
lut_table = np.r_[
lut_table[:1],
get_fill_colors(
lut_table[n_colors2 - 3:n_colors2 + 3, :], n_colors - 2),
lut_table[-1:]]
else:
n_fill = int(round(fmin * n_colors2 / (fmax - fmin))) * 2
lut_table = np.r_[
scale_sequential_lut(lut_table[:n_colors2, :],
center - fmax, center - fmid,
center - fmin),
get_fill_colors(
lut_table[n_colors2 - 3:n_colors2 + 3, :], n_fill),
scale_sequential_lut(lut_table[n_colors2:, :][::-1],
center - fmax, center - fmid,
center - fmin)[::-1]]
else:
lut_table = scale_sequential_lut(lut_table, fmin, fmid, fmax)
n_colors = lut_table.shape[0]
if n_colors != 256:
lut = np.zeros((256, 4))
x = np.linspace(1, n_colors, 256)
for chan in range(4):
lut[:, chan] = np.interp(x,
np.arange(1, n_colors + 1),
lut_table[:, chan])
lut_table = lut
lut_table = lut_table.astype(np.float64) / 255.0
return lut_table
|
bsd-3-clause
|
cllamb0/dosenet-raspberrypi
|
plot_data.py
|
2
|
3054
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 17 10:46:09 2017
@author: Ludi Cao
"""
import weather_DAQ
import tkinter as tk
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
LARGE_FONT= ("Verdana", 12)
wdaq = weather_DAQ.weather_DAQ()
class SeaofBTCapp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, PageOne, PageTwo):
frame = F(container, self)
self.frames[F] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Start Page", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button = tk.Button(self, text="Visit Page 1",
command=lambda: controller.show_frame(PageOne))
button.pack()
button2 = tk.Button(self, text="Visit Page 2",
command=lambda: controller.show_frame(PageTwo))
button2.pack()
class PageOne(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Page One!!!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button1 = tk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(StartPage))
button1.pack()
button2 = tk.Button(self, text="Page Two",
command=lambda: controller.show_frame(PageTwo))
button2.pack()
class PageTwo(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Page Two!!!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button1 = tk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(StartPage))
button1.pack()
button2 = tk.Button(self, text="Page One",
command=lambda: controller.show_frame(PageOne))
button2.pack()
class PageThree(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
label = tk.Label(self, text="Graph Page!", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button1 = tk.Button(self, text="Back to Home",
command=lambda: controller.show_frame(StartPage))
button1.pack()
wdaq.plotdata()
app = SeaofBTCapp()
app.mainloop()
|
mit
|
bigdataelephants/scikit-learn
|
examples/feature_selection/plot_feature_selection.py
|
249
|
2827
|
"""
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
|
bsd-3-clause
|
NetSys/demi-applications
|
interposition/src/main/python/minimization_stats/generate_graph.py
|
2
|
6239
|
#!/usr/bin/env python2.7
#
# Copyright 2011-2013 Colin Scott
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(cs): should use matplotlib instead of the template
from gpi_template import template
import string
import argparse
import json
import os
from collections import namedtuple
# Returns:
# hashmap:
# {
# { iteration -> iteration size }
# { internal_iteration -> iteration size }
# }
def interpolate_datapoints(stats):
# Two tasks:
# - String all of the inner json objects into a single hashmap of datapoints
# - If there is a gap in iteration sizes, fill it in with the previous
# value
# TODO(cs): visually delineate the different stages of minimization
return {
"iteration_size": get_external_progress(stats),
"internal_iteration_size": get_internal_progress(stats)
}
def get_internal_progress(stats):
ret = {}
iteration = 0
# Provenance isn't applied in all cases, so use original
# provenance = stats[1]
orig = stats[1]
ret[iteration] = int(orig["minimized_deliveries"])
iteration += 1
for obj in stats[2:]:
sorted_keys = sorted(map(int, obj["internal_iteration_size"].keys()))
if (len(sorted_keys) == 0):
# This wasn't an internal minimization run. The only (monotonic) progress that would
# have been made would be at the very end, where the external event MCS
# was verified, and there were absent expected events.
# TODO(cs): probably off by one on the x-axis
for i in range(obj["total_replays"]-1):
ret[iteration] = ret[iteration-1]
iteration += 1
ret[iteration] = obj["minimized_deliveries"]
iteration += 1
continue
# Smallest index so far for this object:
bottom = 1
for k in sorted_keys:
while bottom < k:
ret[iteration] = ret[iteration-1]
iteration += 1
bottom += 1
ret[iteration] = int(obj["internal_iteration_size"][str(k)])
iteration += 1
bottom += 1
return ret
def get_external_progress(stats):
ret = {}
iteration = 0
# Get initial iteration_size from first DDMin run.
# ["minimized_externals" does not include Start events]
lowest_key = str(map(int, sorted(stats[2]["iteration_size"].keys()))[0])
ret[iteration] = stats[2]["iteration_size"][lowest_key]
iteration += 1
for obj in stats[2:]:
sorted_keys = sorted(map(int, obj["iteration_size"].keys()))
if (len(sorted_keys) == 0):
# This wasn't an external minimization run, so no external progress was
# made
for i in range(obj["total_replays"]):
ret[iteration] = ret[iteration-1]
iteration += 1
continue
# Smallest index so far for this object:
bottom = 1
for k in sorted_keys:
while bottom < k:
ret[iteration] = ret[iteration-1]
iteration += 1
bottom += 1
ret[iteration] = int(obj["iteration_size"][str(k)])
iteration += 1
bottom += 1
return ret
def write_data_file(dat_filename, stats):
''' Write out the datapoints. Return the maximum x-value '''
xmax = 0
datapoints = interpolate_datapoints(stats)
for t in ["internal_iteration_size", "iteration_size"]:
sorted_keys = datapoints[t].keys()
sorted_keys.sort(lambda a,b: -1 if int(a) < int(b) else 1 if int(a) > int(b) else 0)
if int(sorted_keys[-1]) > xmax:
xmax = int(sorted_keys[-1])
with open(dat_filename + "_" + t, "w") as dat:
for key in sorted_keys:
dat.write(str(key) + " " + str(datapoints[t][key]) + '\n')
return xmax
def load_json(json_input):
with open(json_input) as json_input_file:
return json.load(json_input_file)
DataInfo = namedtuple('DataInfo', ['filename', 'title'])
def write_gpi_template(gpi_filename, output_filename, data_info_list, xmax=None, title=""):
with open(gpi_filename, "w") as gpi:
# Finish off the rest of the template
gpi.write(template)
if title != "":
gpi.write('''set title "%s"\n''' % title)
gpi.write('''set output "%s"\n''' % output_filename)
if xmax != None:
gpi.write('''set xrange [0:%d]\n''' % xmax)
gpi.write('''plot ''')
line_type_counter = 1
for i, data_info in enumerate(data_info_list):
first_t = True
for t,title in [("internal_iteration_size", "Deliveries"),
("iteration_size", "Externals")]:
expanded_title = title if data_info.title == "" else data_info.title + "_" + title
gpi.write('''"%s" index 0:1 title "%s" with steps ls %d''' %
(data_info.filename + "_" + t, expanded_title,
line_type_counter))
line_type_counter += 1
if first_t:
gpi.write(", \\\n")
first_t = False
elif i != len(data_info_list) - 1:
gpi.write(", \\\n")
else:
gpi.write("\n")
def invoke_gnuplot(gpi_filename):
os.system("gnuplot %s" % gpi_filename)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="generate a plot")
parser.add_argument('input', metavar="INPUT",
help='''The input json file''')
parser.add_argument('-x', '--xmax', type=int,
help='''Truncate the x dimension''')
args = parser.parse_args()
stats = load_json(args.input)
gpi_filename = string.replace(args.input, ".json", ".gpi")
output_filename = string.replace(args.input, ".json", ".pdf")
dat_filename = string.replace(args.input, ".json", ".dat")
max_x_value = write_data_file(dat_filename, stats)
data_info_list = [DataInfo(title="", filename=dat_filename)]
xmax = args.xmax
if (xmax == None):
xmax = max_x_value
write_gpi_template(gpi_filename, output_filename, data_info_list, xmax=xmax)
invoke_gnuplot(gpi_filename)
|
bsd-2-clause
|
JPFrancoia/scikit-learn
|
examples/ensemble/plot_random_forest_regression_multioutput.py
|
46
|
2640
|
"""
============================================================
Comparing random forests and the multi-output meta estimator
============================================================
An example to compare multi-output regression with random forest and
the :ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator.
This example illustrates the use of the
:ref:`multioutput.MultiOutputRegressor <multiclass>` meta-estimator
to perform multi-output regression. A random forest regressor is used,
which supports multi-output regression natively, so the results can be
compared.
The random forest regressor will only ever predict values within the
range of observations or closer to zero for each of the targets. As a
result the predictions are biased towards the centre of the circle.
Using a single underlying feature the model learns both the
x and y coordinate as output.
"""
print(__doc__)
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.multioutput import MultiOutputRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size=400,
random_state=4)
max_depth = 30
regr_multirf = MultiOutputRegressor(RandomForestRegressor(max_depth=max_depth,
random_state=0))
regr_multirf.fit(X_train, y_train)
regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2)
regr_rf.fit(X_train, y_train)
# Predict on new data
y_multirf = regr_multirf.predict(X_test)
y_rf = regr_rf.predict(X_test)
# Plot the results
plt.figure()
s = 50
a = 0.4
plt.scatter(y_test[:, 0], y_test[:, 1],
c="navy", s=s, marker="s", alpha=a, label="Data")
plt.scatter(y_multirf[:, 0], y_multirf[:, 1],
c="cornflowerblue", s=s, alpha=a,
label="Multi RF score=%.2f" % regr_multirf.score(X_test, y_test))
plt.scatter(y_rf[:, 0], y_rf[:, 1],
c="c", s=s, marker="^", alpha=a,
label="RF score=%.2f" % regr_rf.score(X_test, y_test))
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Comparing random forests and the multi-output meta estimator")
plt.legend()
plt.show()
|
bsd-3-clause
|
Eric89GXL/scikit-learn
|
examples/manifold/plot_manifold_sphere.py
|
8
|
4585
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=============================================
Manifold Learning methods on a severed sphere
=============================================
An application of the different :ref:`manifold` techniques
on a spherical data-set. Here one can see the use of
dimensionality reduction in order to gain some intuition
regarding the Manifold learning methods. Regarding the dataset,
the poles are cut from the sphere, as well as a thin slice down its
side. This enables the manifold learning techniques to
'spread it open' whilst projecting it onto two dimensions.
For a similar example, where the methods are applied to the
S-curve dataset, see :ref:`example_manifold_plot_compare_methods.py`
Note that the purpose of the :ref:`MDS <multidimensional_scaling>` is
to find a low-dimensional representation of the data (here 2D) in
which the distances respect well the distances in the original
high-dimensional space, unlike other manifold-learning algorithms,
it does not seeks an isotropic representation of the data in
the low-dimensional space. Here the manifold problem matches fairly
that of representing a flat map of the Earth, as with
`map projection <http://en.wikipedia.org/wiki/Map_projection>`_
"""
# Author: Jaques Grobler <[email protected]>
# License: BSD 3 clause
print(__doc__)
from time import time
import numpy as np
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold
from sklearn.utils import check_random_state
# Next line to silence pyflakes.
Axes3D
# Variables for manifold learning.
n_neighbors = 10
n_samples = 1000
# Create our sphere.
random_state = check_random_state(0)
p = random_state.rand(n_samples) * (2 * np.pi - 0.55)
t = random_state.rand(n_samples) * np.pi
# Sever the poles from the sphere.
indices = ((t < (np.pi - (np.pi / 8))) & (t > ((np.pi / 8))))
colors = p[indices]
x, y, z = np.sin(t[indices]) * np.cos(p[indices]), \
np.sin(t[indices]) * np.sin(p[indices]), \
np.cos(t[indices])
# Plot our dataset.
fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
ax = fig.add_subplot(241, projection='3d')
ax.scatter(x, y, z, c=p[indices], cmap=pl.cm.rainbow)
try:
# compatibility matplotlib < 1.0
ax.view_init(40, -10)
except:
pass
sphere_data = np.array([x, y, z]).T
# Perform Locally Linear Embedding Manifold learning
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
trans_data = manifold\
.LocallyLinearEmbedding(n_neighbors, 2,
method=method).fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(242 + i)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
# Perform Isomap Manifold learning.
t0 = time()
trans_data = manifold.Isomap(n_neighbors, n_components=2)\
.fit_transform(sphere_data).T
t1 = time()
print("%s: %.2g sec" % ('ISO', t1 - t0))
ax = fig.add_subplot(246)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("%s (%.2g sec)" % ('Isomap', t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
# Perform Multi-dimensional scaling.
t0 = time()
mds = manifold.MDS(2, max_iter=100, n_init=1)
trans_data = mds.fit_transform(sphere_data).T
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(247)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
# Perform Spectral Embedding.
t0 = time()
se = manifold.SpectralEmbedding(n_components=2,
n_neighbors=n_neighbors)
trans_data = se.fit_transform(sphere_data).T
t1 = time()
print("Spectral Embedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(248)
pl.scatter(trans_data[0], trans_data[1], c=colors, cmap=pl.cm.rainbow)
pl.title("Spectral Embedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
ashhher3/ibis
|
ibis/sql/alchemy.py
|
6
|
22574
|
# Copyright 2015 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import six
import sqlalchemy as sa
import sqlalchemy.sql as sql
from ibis.client import SQLClient, AsyncQuery, Query
from ibis.sql.compiler import Select, Union, TableSetFormatter
import ibis.common as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
import ibis.expr.types as ir
import ibis.sql.compiler as comp
import ibis.sql.transforms as transforms
import ibis
_ibis_type_to_sqla = {
dt.Int8: sa.types.SmallInteger,
dt.Int16: sa.types.SmallInteger,
dt.Int32: sa.types.Integer,
dt.Int64: sa.types.BigInteger,
# Mantissa-based
dt.Float: sa.types.Float(precision=24),
dt.Double: sa.types.Float(precision=53),
dt.Boolean: sa.types.Boolean,
dt.String: sa.types.String,
dt.Timestamp: sa.types.DateTime,
dt.Decimal: sa.types.NUMERIC,
}
_sqla_type_mapping = {
sa.types.SmallInteger: dt.Int16,
sa.types.INTEGER: dt.Int64,
sa.types.BOOLEAN: dt.Boolean,
sa.types.BIGINT: dt.Int64,
sa.types.FLOAT: dt.Double,
sa.types.REAL: dt.Double,
sa.types.TEXT: dt.String,
sa.types.NullType: dt.String,
sa.types.Text: dt.String,
}
_sqla_type_to_ibis = dict((v, k) for k, v in
_ibis_type_to_sqla.items())
_sqla_type_to_ibis.update(_sqla_type_mapping)
def schema_from_table(table):
# Convert SQLA table to Ibis schema
names = table.columns.keys()
types = []
for c in table.columns.values():
type_class = type(c.type)
if isinstance(c.type, sa.types.NUMERIC):
t = dt.Decimal(c.type.precision,
c.type.scale,
nullable=c.nullable)
else:
if c.type in _sqla_type_to_ibis:
ibis_class = _sqla_type_to_ibis[c.type]
elif type_class in _sqla_type_to_ibis:
ibis_class = _sqla_type_to_ibis[type_class]
else:
raise NotImplementedError(c.type)
t = ibis_class(c.nullable)
types.append(t)
return dt.Schema(names, types)
def table_from_schema(name, meta, schema):
# Convert Ibis schema to SQLA table
sqla_cols = []
for cname, itype in zip(schema.names, schema.types):
ctype = _to_sqla_type(itype)
col = sa.Column(cname, ctype, nullable=itype.nullable)
sqla_cols.append(col)
return sa.Table(name, meta, *sqla_cols)
def _to_sqla_type(itype):
if isinstance(itype, dt.Decimal):
return sa.types.NUMERIC(itype.precision, itype.scale)
else:
return _ibis_type_to_sqla[type(itype)]
def fixed_arity(sa_func, arity):
if isinstance(sa_func, six.string_types):
sa_func = getattr(sa.func, sa_func)
def formatter(t, expr):
if arity != len(expr.op().args):
raise com.IbisError('incorrect number of args')
return _varargs_call(sa_func, t, expr)
return formatter
def varargs(sa_func):
def formatter(t, expr):
op = expr.op()
trans_args = [t.translate(arg) for arg in op.args]
return sa_func(*trans_args)
return formatter
def _varargs_call(sa_func, t, expr):
op = expr.op()
trans_args = [t.translate(arg) for arg in op.args]
return sa_func(*trans_args)
def _table_column(t, expr):
op = expr.op()
ctx = t.context
table = op.table
sa_table = _get_sqla_table(ctx, table)
out_expr = getattr(sa_table.c, op.name)
# If the column does not originate from the table set in the current SELECT
# context, we should format as a subquery
if t.permit_subquery and ctx.is_foreign_expr(table):
return sa.select([out_expr])
return out_expr
def _get_sqla_table(ctx, table):
if ctx.has_ref(table):
ctx_level = ctx
sa_table = ctx_level.get_table(table)
while sa_table is None and ctx_level.parent is not ctx_level:
ctx_level = ctx_level.parent
sa_table = ctx_level.get_table(table)
else:
sa_table = table.op().sqla_table
return sa_table
def _table_array_view(t, expr):
ctx = t.context
table = ctx.get_compiled_expr(expr.op().table)
return table
def _exists_subquery(t, expr):
op = expr.op()
ctx = t.context
filtered = (op.foreign_table.filter(op.predicates)
.projection([ir.literal(1).name(ir.unnamed)]))
sub_ctx = ctx.subcontext()
clause = to_sqlalchemy(filtered, context=sub_ctx, exists=True)
if isinstance(op, transforms.NotExistsSubquery):
clause = sa.not_(clause)
return clause
def _cast(t, expr):
op = expr.op()
arg, target_type = op.args
sa_arg = t.translate(arg)
sa_type = t.get_sqla_type(target_type)
if isinstance(arg, ir.CategoryValue) and target_type == 'int32':
return sa_arg
else:
return sa.cast(sa_arg, sa_type)
def _contains(t, expr):
op = expr.op()
left, right = [t.translate(arg) for arg in op.args]
return left.in_(right)
def _reduction(sa_func):
def formatter(t, expr):
op = expr.op()
# HACK: support trailing arguments
arg, where = op.args[:2]
return _reduction_format(t, sa_func, arg, where)
return formatter
def _reduction_format(t, sa_func, arg, where):
if where is not None:
case = where.ifelse(arg, ibis.NA)
arg = t.translate(case)
else:
arg = t.translate(arg)
return sa_func(arg)
def _literal(t, expr):
return sa.literal(expr.op().value)
def _value_list(t, expr):
return [t.translate(x) for x in expr.op().values]
def _is_null(t, expr):
arg = t.translate(expr.op().args[0])
return arg.is_(sa.null())
def _not_null(t, expr):
arg = t.translate(expr.op().args[0])
return arg.isnot(sa.null())
def _round(t, expr):
op = expr.op()
arg, digits = op.args
sa_arg = t.translate(arg)
f = sa.func.round
if digits is not None:
sa_digits = t.translate(digits)
return f(sa_arg, sa_digits)
else:
return f(sa_arg)
def _count_distinct(t, expr):
arg, = expr.op().args
sa_arg = t.translate(arg)
return sa.func.count(sa_arg.distinct())
def _simple_case(t, expr):
op = expr.op()
cases = [op.base == case for case in op.cases]
return _translate_case(t, cases, op.results, op.default)
def _searched_case(t, expr):
op = expr.op()
return _translate_case(t, op.cases, op.results, op.default)
def _translate_case(t, cases, results, default):
case_args = [t.translate(arg) for arg in cases]
result_args = [t.translate(arg) for arg in results]
whens = zip(case_args, result_args)
default = t.translate(default)
return sa.case(whens, else_=default)
def unary(sa_func):
return fixed_arity(sa_func, 1)
_operation_registry = {
ops.And: fixed_arity(sql.and_, 2),
ops.Or: fixed_arity(sql.or_, 2),
ops.Abs: unary(sa.func.abs),
ops.Cast: _cast,
ops.Coalesce: varargs(sa.func.coalesce),
ops.NullIf: fixed_arity(sa.func.nullif, 2),
ops.Contains: _contains,
ops.Count: _reduction(sa.func.count),
ops.Sum: _reduction(sa.func.sum),
ops.Mean: _reduction(sa.func.avg),
ops.Min: _reduction(sa.func.min),
ops.Max: _reduction(sa.func.max),
ops.CountDistinct: _count_distinct,
ops.GroupConcat: fixed_arity(sa.func.group_concat, 2),
ops.Between: fixed_arity(sa.between, 3),
ops.IsNull: _is_null,
ops.NotNull: _not_null,
ops.Negate: unary(sa.not_),
ops.Round: _round,
ops.TypeOf: unary(sa.func.typeof),
ir.Literal: _literal,
ir.ValueList: _value_list,
ir.NullLiteral: lambda *args: sa.null(),
ops.SimpleCase: _simple_case,
ops.SearchedCase: _searched_case,
ops.TableColumn: _table_column,
ops.TableArrayView: _table_array_view,
transforms.ExistsSubquery: _exists_subquery,
transforms.NotExistsSubquery: _exists_subquery,
}
# TODO: unit tests for each of these
_binary_ops = {
# Binary arithmetic
ops.Add: operator.add,
ops.Subtract: operator.sub,
ops.Multiply: operator.mul,
ops.Divide: operator.truediv,
ops.Power: operator.pow,
ops.Modulus: operator.mod,
# Comparisons
ops.Equals: operator.eq,
ops.NotEquals: operator.ne,
ops.Less: operator.lt,
ops.LessEqual: operator.le,
ops.Greater: operator.gt,
ops.GreaterEqual: operator.ge,
# Boolean comparisons
# TODO
}
for _k, _v in _binary_ops.items():
_operation_registry[_k] = fixed_arity(_v, 2)
class AlchemySelectBuilder(comp.SelectBuilder):
@property
def _select_class(self):
return AlchemySelect
def _convert_group_by(self, exprs):
return exprs
class AlchemyContext(comp.QueryContext):
def __init__(self, *args, **kwargs):
self._table_objects = {}
self.dialect = kwargs.pop('dialect', AlchemyDialect)
comp.QueryContext.__init__(self, *args, **kwargs)
def subcontext(self):
return type(self)(dialect=self.dialect, parent=self)
def _to_sql(self, expr, ctx):
return to_sqlalchemy(expr, context=ctx)
def _compile_subquery(self, expr):
sub_ctx = self.subcontext()
return self._to_sql(expr, sub_ctx)
def has_table(self, expr, parent_contexts=False):
key = self._get_table_key(expr)
return self._key_in(key, '_table_objects',
parent_contexts=parent_contexts)
def set_table(self, expr, obj):
key = self._get_table_key(expr)
self._table_objects[key] = obj
def get_table(self, expr):
"""
Get the memoized SQLAlchemy expression object
"""
return self._get_table_item('_table_objects', expr)
class AlchemyQueryBuilder(comp.QueryBuilder):
select_builder = AlchemySelectBuilder
def __init__(self, expr, context=None, dialect=None):
if dialect is None:
dialect = AlchemyDialect
self.dialect = dialect
comp.QueryBuilder.__init__(self, expr, context=context)
def _make_context(self):
return AlchemyContext(dialect=self.dialect)
@property
def _union_class(self):
return AlchemyUnion
def to_sqlalchemy(expr, context=None, exists=False, dialect=None):
if context is not None:
dialect = dialect or context.dialect
ast = build_ast(expr, context=context, dialect=dialect)
query = ast.queries[0]
if exists:
query.exists = exists
return query.compile()
def build_ast(expr, context=None, dialect=None):
builder = AlchemyQueryBuilder(expr, context=context, dialect=dialect)
return builder.get_result()
class AlchemyTable(ops.DatabaseTable):
def __init__(self, table, source):
self.sqla_table = table
schema = schema_from_table(table)
name = table.name
ops.TableNode.__init__(self, [name, schema, source])
ops.HasSchema.__init__(self, schema, name=name)
class AlchemyExprTranslator(comp.ExprTranslator):
_registry = _operation_registry
_rewrites = comp.ExprTranslator._rewrites.copy()
_type_map = _ibis_type_to_sqla
def name(self, translated, name, force=True):
return translated.label(name)
@property
def _context_class(self):
return AlchemyContext
def get_sqla_type(self, data_type):
return self._type_map[type(data_type)]
rewrites = AlchemyExprTranslator.rewrites
class AlchemyQuery(Query):
def _fetch_from_cursor(self, cursor):
# No guarantees that the DBAPI cursor has data types
import pandas as pd
proxy = cursor.proxy
rows = proxy.fetchall()
colnames = proxy.keys()
return pd.DataFrame.from_records(rows, columns=colnames,
coerce_float=True)
class AlchemyAsyncQuery(AsyncQuery):
pass
class AlchemyDialect(object):
translator = AlchemyExprTranslator
class AlchemyClient(SQLClient):
dialect = AlchemyDialect
sync_query = AlchemyQuery
@property
def async_query(self):
raise NotImplementedError
def create_table(self, name, expr=None, schema=None, database=None):
pass
def list_tables(self, like=None, database=None):
"""
List tables in the current (or indicated) database.
Parameters
----------
like : string, default None
Checks for this string contained in name
database : string, default None
If not passed, uses the current/default database
Returns
-------
tables : list of strings
"""
if database is None:
database = self.current_database
names = self.con.table_names(schema=database)
if like is not None:
names = [x for x in names if like in x]
return names
def _execute(self, query, results=True):
return AlchemyProxy(self.con.execute(query))
def _build_ast(self, expr):
return build_ast(expr, dialect=self.dialect)
def _get_sqla_table(self, name):
return sa.Table(name, self.meta, autoload=True)
def _sqla_table_to_expr(self, table):
node = AlchemyTable(table, self)
return self._table_expr_klass(node)
class AlchemySelect(Select):
def __init__(self, *args, **kwargs):
self.exists = kwargs.pop('exists', False)
Select.__init__(self, *args, **kwargs)
def compile(self):
# Can't tell if this is a hack or not. Revisit later
self.context.set_query(self)
self._compile_subqueries()
frag = self._compile_table_set()
steps = [self._add_select,
self._add_groupby,
self._add_where,
self._add_order_by,
self._add_limit]
for step in steps:
frag = step(frag)
return frag
def _compile_subqueries(self):
if len(self.subqueries) == 0:
return
for expr in self.subqueries:
result = self.context.get_compiled_expr(expr)
alias = self.context.get_ref(expr)
result = result.cte(alias)
self.context.set_table(expr, result)
def _compile_table_set(self):
if self.table_set is not None:
helper = _AlchemyTableSet(self, self.table_set)
return helper.get_result()
else:
return None
def _add_select(self, table_set):
to_select = []
for expr in self.select_set:
if isinstance(expr, ir.ValueExpr):
arg = self._translate(expr, named=True)
elif isinstance(expr, ir.TableExpr):
if expr.equals(self.table_set):
cached_table = self.context.get_table(expr)
if cached_table is None:
# the select * case from materialized join
arg = '*'
else:
arg = table_set
else:
arg = self.context.get_table(expr)
if arg is None:
raise ValueError(expr)
to_select.append(arg)
if self.exists:
clause = sa.exists(to_select)
else:
clause = sa.select(to_select)
if self.distinct:
clause = clause.distinct()
if table_set is not None:
return clause.select_from(table_set)
else:
return clause
def _add_groupby(self, fragment):
# GROUP BY and HAVING
if not len(self.group_by):
return fragment
group_keys = [self._translate(arg) for arg in self.group_by]
fragment = fragment.group_by(*group_keys)
if len(self.having) > 0:
having_args = [self._translate(arg) for arg in self.having]
having_clause = _and_all(having_args)
fragment = fragment.having(having_clause)
return fragment
def _add_where(self, fragment):
if not len(self.where):
return fragment
args = [self._translate(pred, permit_subquery=True)
for pred in self.where]
clause = _and_all(args)
return fragment.where(clause)
def _add_order_by(self, fragment):
if not len(self.order_by):
return fragment
clauses = []
for expr in self.order_by:
key = expr.op()
sort_expr = key.expr
# here we have to determine if key.expr is in the select set (as it
# will be in the case of order_by fused with an aggregation
if _can_lower_sort_column(self.table_set, sort_expr):
arg = sort_expr.get_name()
else:
arg = self._translate(sort_expr)
if not key.ascending:
arg = sa.desc(arg)
clauses.append(arg)
return fragment.order_by(*clauses)
def _among_select_set(self, expr):
for other in self.select_set:
if expr.equals(other):
return True
return False
def _add_limit(self, fragment):
if self.limit is None:
return fragment
n, offset = self.limit['n'], self.limit['offset']
fragment = fragment.limit(n)
if offset is not None and offset != 0:
fragment = fragment.offset(offset)
return fragment
@property
def translator(self):
return self.dialect.translator
@property
def dialect(self):
return self.context.dialect
class _AlchemyTableSet(TableSetFormatter):
def get_result(self):
# Got to unravel the join stack; the nesting order could be
# arbitrary, so we do a depth first search and push the join tokens
# and predicates onto a flat list, then format them
op = self.expr.op()
if isinstance(op, ops.Join):
self._walk_join_tree(op)
else:
self.join_tables.append(self._format_table(self.expr))
result = self.join_tables[0]
for jtype, table, preds in zip(self.join_types,
self.join_tables[1:],
self.join_predicates):
if len(preds):
sqla_preds = [self._translate(pred) for pred in preds]
onclause = _and_all(sqla_preds)
else:
onclause = None
if jtype in (ops.InnerJoin, ops.CrossJoin):
result = result.join(table, onclause)
elif jtype is ops.LeftJoin:
result = result.join(table, onclause, isouter=True)
elif jtype is ops.RightJoin:
result = table.join(result, onclause, isouter=True)
elif jtype is ops.OuterJoin:
result = result.outerjoin(table, onclause)
else:
raise NotImplementedError(jtype)
return result
def _get_join_type(self, op):
return type(op)
def _format_table(self, expr):
ctx = self.context
ref_expr = expr
op = ref_op = expr.op()
if isinstance(op, ops.SelfReference):
ref_expr = op.table
ref_op = ref_expr.op()
alias = ctx.get_ref(expr)
if isinstance(ref_op, AlchemyTable):
result = ref_op.sqla_table
else:
# A subquery
if ctx.is_extracted(ref_expr):
# Was put elsewhere, e.g. WITH block, we just need to grab
# its alias
alias = ctx.get_ref(expr)
# hack
if isinstance(op, ops.SelfReference):
table = ctx.get_table(ref_expr)
self_ref = table.alias(alias)
ctx.set_table(expr, self_ref)
return self_ref
else:
return ctx.get_table(expr)
result = ctx.get_compiled_expr(expr)
alias = ctx.get_ref(expr)
result = result.alias(alias)
ctx.set_table(expr, result)
return result
def _can_lower_sort_column(table_set, expr):
# we can currently sort by just-appeared aggregate metrics, but the way
# these are references in the expression DSL is as a SortBy (blocking
# table operation) on an aggregation. There's a hack in _collect_SortBy
# in the generic SQL compiler that "fuses" the sort with the
# aggregation so they appear in same query. It's generally for
# cosmetics and doesn't really affect query semantics.
bases = ir.find_all_base_tables(expr)
if len(bases) > 1:
return False
base = list(bases.values())[0]
base_op = base.op()
if isinstance(base_op, ops.Aggregation):
return base_op.table.equals(table_set)
elif isinstance(base_op, ops.Projection):
return base.equals(table_set)
else:
return False
def _and_all(clauses):
result = clauses[0]
for clause in clauses[1:]:
result = sql.and_(result, clause)
return result
class AlchemyUnion(Union):
def compile(self):
context = self.context
if self.distinct:
sa_func = sa.union
else:
sa_func = sa.union_all
left_set = context.get_compiled_expr(self.left)
right_set = context.get_compiled_expr(self.right)
return sa_func(left_set, right_set)
class AlchemyProxy(object):
"""
Wraps a SQLAlchemy ResultProxy and ensures that .close() is called on
garbage collection
"""
def __init__(self, proxy):
self.proxy = proxy
def __del__(self):
self._close_cursor()
def _close_cursor(self):
self.proxy.close()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self._close_cursor()
def fetchall(self):
return self.proxy.fetchall()
@rewrites(ops.NullIfZero)
def _nullifzero(expr):
arg = expr.op().args[0]
return (arg == 0).ifelse(ibis.NA, arg)
|
apache-2.0
|
rafwiewiora/msmbuilder
|
msmbuilder/io/gather_metadata.py
|
9
|
6112
|
# Author: Matthew Harrigan <[email protected]>
# Contributors:
# Copyright (c) 2016, Stanford University
# All rights reserved.
import glob
import os
import re
import warnings
import mdtraj as md
import pandas as pd
class ParseWarning(UserWarning):
pass
class _Parser(object):
def parse_fn(self, fn):
raise NotImplementedError
@property
def index(self):
raise NotImplementedError
class GenericParser(_Parser):
"""Parse trajectories in a fully configurable manner
Parameters
----------
fn_re : str
Regular expression with capture groups to transform trajectory
filenames into keys
group_names : list of str
Capture group names (to serve as MultiIndex names)
group_transforms : list of functions
Apply these functions to capture groups
top_fn : str
Topology filename
step_ps : int
Timestep of frames in picoseconds
"""
def __init__(self,
fn_re,
group_names,
group_transforms,
top_fn,
step_ps,
):
self.fn_re = re.compile(fn_re)
self.group_names = group_names
self.group_transforms = group_transforms
self.top_fn = top_fn
self.step_ps = step_ps
try:
assert os.path.exists(top_fn)
except:
warnings.warn("Topology file doesn't actually exist! "
"You may (will) run into issues later when you "
"try to load it.", ParseWarning)
assert len(group_names) == len(group_transforms)
assert len(group_names) == self.fn_re.groups
@property
def index(self):
return list(self.group_names)
def parse_fn(self, fn):
meta = {
'traj_fn': fn,
'top_fn': self.top_fn,
'top_abs_fn': os.path.abspath(self.top_fn),
}
try:
with md.open(fn) as f:
meta['nframes'] = len(f)
except Exception as e:
warnings.warn("Could not determine the number of frames for {}: {}"
.format(fn, e), ParseWarning)
if self.step_ps is not None:
meta['step_ps'] = self.step_ps
# Get indices
ma = self.fn_re.search(fn)
if ma is None:
raise ValueError("Filename {} did not match the "
"regular rexpression {}".format(fn, self.fn_re))
meta.update({gn: transform(ma.group(gi))
for gn, transform, gi
in zip(self.group_names, self.group_transforms,
range(1, len(self.group_names) + 1))
})
return meta
class NumberedRunsParser(GenericParser):
"""Parse trajectories that are numbered with integers.
Parameters
----------
traj_fmt : str
A format string with {run} in it that gives the filename to look
for. {run} will be captured and turned into an integer.
top_fn : str
Topology filename
step_ps : int
Trajectory frame step in picoseconds
"""
def __init__(self, traj_fmt="trajectory-{run}.xtc", top_fn="",
step_ps=None):
# Test the input
try:
traj_fmt.format(run=0)
except:
raise ValueError("Invalid format string {}".format(traj_fmt))
# Build a regex from format string
s1, s2 = re.split(r'\{run\}', traj_fmt)
capture_group = r'(\d+)'
fn_re = re.escape(s1) + capture_group + re.escape(s2)
# Call generic
super(NumberedRunsParser, self).__init__(
fn_re=fn_re,
group_names=['run'],
group_transforms=[int],
top_fn=top_fn,
step_ps=step_ps
)
class HierarchyParser(GenericParser):
"""Parse a hierarchical index from files nested in directories
A trajectory with path:
PROJ9704.new/RUN4/CLONE10.xtc
will be given an index of
('PROJ9704.new', 'RUN4', 'CLONE10.xtc')
If you set the flag ignore_fext=True, it will be given an index of
('PROJ9704', 'RUN4', 'CLONE10')
Parameters
----------
levels : list of str
Level names
n_levels : int
Number of levels. Either this or levels must be provided (but
not both). The levels will be named i0, i1, ... i(n-1)
top_fn : str
Topology filename
step_ps : int
Number of picoseconds per frame
ignore_fext : bool
Ignore file extensions. If set to true, this will fail if there is
more than one "." per file/directory name. Anything after a "." is
considered a file extension and will be ignored; including in
directory names
"""
def __init__(self, levels=None, n_levels=None, top_fn="", step_ps=None,
ignore_fext=False):
if (levels is None) == (n_levels is None):
raise ValueError("Please specify levels or n_levels, but not both")
if levels is None:
levels = ["i{i}".format(i=i) for i in range(n_levels)]
if ignore_fext:
# regex notes:
# 1. (?:...) means non-capturing group
subre = r'([a-zA-Z0-9_\-]+)(?:\.[a-zA-Z0-9]+)?'
else:
subre = r'([a-zA-Z0-9_\.\-]+)'
fn_re = r'\/'.join(subre for _ in levels)
super(HierarchyParser, self).__init__(
fn_re=fn_re,
group_names=levels,
group_transforms=[str for _ in levels],
top_fn=top_fn,
step_ps=step_ps
)
def gather_metadata(fn_glob, parser):
"""Given a glob and a parser object, create a metadata dataframe.
Parameters
----------
fn_glob : str
Glob string to find trajectory files.
parser : descendant of _Parser
Object that handles conversion of filenames to metadata rows.
"""
meta = pd.DataFrame(parser.parse_fn(fn) for fn in glob.iglob(fn_glob))
return meta.set_index(parser.index).sort_index()
|
lgpl-2.1
|
Ghalko/idscapstone
|
ids/reduce_avg.py
|
1
|
2926
|
import pandas
import pandasql
#look at seaborn for pandas visualization.
def sql_query(data=None, query=None, cols=None, index=None):
"""Takes dataframe, query, and columns, index, returns dataframe.
Query needs to have 'data' as table."""
if data is None or query is None or cols is None or index is None:
print "Missing input."
return
ndata = pandasql.sqldf(query.lower(), locals())
ndata.columns = cols
ndata = ndata.set_index(index)
return ndata
def sub_data(data=None, col=None, new=None, shift=1):
if data is None or col is None:
print "missing data or column"
return
if new is None:
new = col + "_shifted"
data[new] = data[col] - data.shift(shift)[col]
data = data.fillna(1)
return data
def extract_hour(data=None):
data['HOUR'] = data.DATETIME.map(lambda x: int(x[11])*10 + int(x[12]))
return data
if __name__=="__main__":
master = "/export/data/bgorges/udacityIDS/data/cleanWall.csv"
#Read csv file
mta_data = pandas.read_csv(master)
mta_data = extract_hour(mta_data)
#shifts by one and subtracts.
mta_data = sub_data(mta_data, 'ENTRIES', 'ENTRIES_hourly')
mta_data = sub_data(mta_data, 'EXITS', 'EXITS_hourly')
print mta_data.describe() #just to see what's coming.
#--- Group by time over all the data ------------------------
q = '''SELECT hour,avg(cast(entries_hourly as integer)),
avg(cast(exits_hourly as integer)) FROM mta_data
GROUP BY hour'''
group1 = sql_query(mta_data, q, ['HOUR', 'AVG_ENTRIES', 'AVG_EXITS'],
'HOUR')
filename = "/export/data/bgorges/udacityIDS/data/avg_per_hour_all.csv"
group1.to_csv(filename)
print "All done."
#-------------------------------------------------------------
#--- by weekends --------------------------------------------
q = '''SELECT hour,avg(cast(entries_hourly as integer)),
avg(cast(exits_hourly as integer)) FROM mta_data
WHERE cast(strftime('%w', datetime) as integer)=0
OR cast(strftime('%w', datetime) as integer)=6
GROUP BY hour;'''
group2 = pandasql.sqldf(q.lower(), locals())
group2.columns = ['HOUR', 'AVG_ENTRIES', 'AVG_EXITS']
group2 = group2.set_index('HOUR')
filename = "/export/data/bgorges/udacityIDS/data/avg_per_hour_wkends.csv"
group2.to_csv(filename)
print "Weekends done."
#-----------------------------------------------------------
#--- by weekdays -------------------------------------------
q = '''SELECT hour,avg(cast(entries_hourly as integer)),
avg(cast(exits_hourly as integer)) FROM mta_data
WHERE cast(strftime('%w', datetime) as integer)<>0
OR cast(strftime('%w', datetime) as integer)<>6
GROUP BY hour;'''
group3 = pandasql.sqldf(q.lower(), locals())
group3.columns = ['HOUR', 'AVG_ENTRIES', 'AVG_EXITS']
group3 = group3.set_index('HOUR')
filename = "/export/data/bgorges/udacityIDS/data/avg_per_hour_wk.csv"
group3.to_csv(filename)
print "done"
|
mit
|
quheng/scikit-learn
|
sklearn/linear_model/ridge.py
|
60
|
44642
|
"""
Ridge regression
"""
# Author: Mathieu Blondel <[email protected]>
# Reuben Fletcher-Costin <[email protected]>
# Fabian Pedregosa <[email protected]>
# Michael Eickenberg <[email protected]>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import linalg
from scipy import sparse
from scipy.sparse import linalg as sp_linalg
from .base import LinearClassifierMixin, LinearModel, _rescale_data
from .sag import sag_solver
from .sag_fast import get_max_squared_sum
from ..base import RegressorMixin
from ..utils.extmath import safe_sparse_dot
from ..utils import check_X_y
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import compute_sample_weight
from ..utils import column_or_1d
from ..preprocessing import LabelBinarizer
from ..grid_search import GridSearchCV
from ..externals import six
from ..metrics.scorer import check_scoring
def _solve_sparse_cg(X, y, alpha, max_iter=None, tol=1e-3, verbose=0):
n_samples, n_features = X.shape
X1 = sp_linalg.aslinearoperator(X)
coefs = np.empty((y.shape[1], n_features))
if n_features > n_samples:
def create_mv(curr_alpha):
def _mv(x):
return X1.matvec(X1.rmatvec(x)) + curr_alpha * x
return _mv
else:
def create_mv(curr_alpha):
def _mv(x):
return X1.rmatvec(X1.matvec(x)) + curr_alpha * x
return _mv
for i in range(y.shape[1]):
y_column = y[:, i]
mv = create_mv(alpha[i])
if n_features > n_samples:
# kernel ridge
# w = X.T * inv(X X^t + alpha*Id) y
C = sp_linalg.LinearOperator(
(n_samples, n_samples), matvec=mv, dtype=X.dtype)
coef, info = sp_linalg.cg(C, y_column, tol=tol)
coefs[i] = X1.rmatvec(coef)
else:
# linear ridge
# w = inv(X^t X + alpha*Id) * X.T y
y_column = X1.rmatvec(y_column)
C = sp_linalg.LinearOperator(
(n_features, n_features), matvec=mv, dtype=X.dtype)
coefs[i], info = sp_linalg.cg(C, y_column, maxiter=max_iter,
tol=tol)
if info < 0:
raise ValueError("Failed with error code %d" % info)
if max_iter is None and info > 0 and verbose:
warnings.warn("sparse_cg did not converge after %d iterations." %
info)
return coefs
def _solve_lsqr(X, y, alpha, max_iter=None, tol=1e-3):
n_samples, n_features = X.shape
coefs = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
# According to the lsqr documentation, alpha = damp^2.
sqrt_alpha = np.sqrt(alpha)
for i in range(y.shape[1]):
y_column = y[:, i]
info = sp_linalg.lsqr(X, y_column, damp=sqrt_alpha[i],
atol=tol, btol=tol, iter_lim=max_iter)
coefs[i] = info[0]
n_iter[i] = info[2]
return coefs, n_iter
def _solve_cholesky(X, y, alpha):
# w = inv(X^t X + alpha*Id) * X.T y
n_samples, n_features = X.shape
n_targets = y.shape[1]
A = safe_sparse_dot(X.T, X, dense_output=True)
Xy = safe_sparse_dot(X.T, y, dense_output=True)
one_alpha = np.array_equal(alpha, len(alpha) * [alpha[0]])
if one_alpha:
A.flat[::n_features + 1] += alpha[0]
return linalg.solve(A, Xy, sym_pos=True,
overwrite_a=True).T
else:
coefs = np.empty([n_targets, n_features])
for coef, target, current_alpha in zip(coefs, Xy.T, alpha):
A.flat[::n_features + 1] += current_alpha
coef[:] = linalg.solve(A, target, sym_pos=True,
overwrite_a=False).ravel()
A.flat[::n_features + 1] -= current_alpha
return coefs
def _solve_cholesky_kernel(K, y, alpha, sample_weight=None, copy=False):
# dual_coef = inv(X X^t + alpha*Id) y
n_samples = K.shape[0]
n_targets = y.shape[1]
if copy:
K = K.copy()
alpha = np.atleast_1d(alpha)
one_alpha = (alpha == alpha[0]).all()
has_sw = isinstance(sample_weight, np.ndarray) \
or sample_weight not in [1.0, None]
if has_sw:
# Unlike other solvers, we need to support sample_weight directly
# because K might be a pre-computed kernel.
sw = np.sqrt(np.atleast_1d(sample_weight))
y = y * sw[:, np.newaxis]
K *= np.outer(sw, sw)
if one_alpha:
# Only one penalty, we can solve multi-target problems in one time.
K.flat[::n_samples + 1] += alpha[0]
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
dual_coef = linalg.solve(K, y, sym_pos=True,
overwrite_a=False)
except np.linalg.LinAlgError:
warnings.warn("Singular matrix in solving dual problem. Using "
"least-squares solution instead.")
dual_coef = linalg.lstsq(K, y)[0]
# K is expensive to compute and store in memory so change it back in
# case it was user-given.
K.flat[::n_samples + 1] -= alpha[0]
if has_sw:
dual_coef *= sw[:, np.newaxis]
return dual_coef
else:
# One penalty per target. We need to solve each target separately.
dual_coefs = np.empty([n_targets, n_samples])
for dual_coef, target, current_alpha in zip(dual_coefs, y.T, alpha):
K.flat[::n_samples + 1] += current_alpha
dual_coef[:] = linalg.solve(K, target, sym_pos=True,
overwrite_a=False).ravel()
K.flat[::n_samples + 1] -= current_alpha
if has_sw:
dual_coefs *= sw[np.newaxis, :]
return dual_coefs.T
def _solve_svd(X, y, alpha):
U, s, Vt = linalg.svd(X, full_matrices=False)
idx = s > 1e-15 # same default value as scipy.linalg.pinv
s_nnz = s[idx][:, np.newaxis]
UTy = np.dot(U.T, y)
d = np.zeros((s.size, alpha.size))
d[idx] = s_nnz / (s_nnz ** 2 + alpha)
d_UT_y = d * UTy
return np.dot(Vt.T, d_UT_y).T
def ridge_regression(X, y, alpha, sample_weight=None, solver='auto',
max_iter=None, tol=1e-3, verbose=0, random_state=None,
return_n_iter=False):
"""Solve the ridge equation by the method of normal equations.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
X : {array-like, sparse matrix, LinearOperator},
shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
alpha : {float, array-like},
shape = [n_targets] if array-like
The l_2 penalty to be used. If an array is passed, penalties are
assumed to be specific to targets
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample. If sample_weight is not None and
solver='auto', the solver will be set to 'cholesky'.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution via a Cholesky decomposition of
dot(X.T, X)
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
verbose : int
Verbosity level. Setting verbose > 0 will display additional
information depending on the solver used.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
return_n_iter : boolean, default False
If True, the method also returns `n_iter`, the actual number of
iteration performed by the solver.
Returns
-------
coef : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
n_iter : int, optional
The actual number of iteration performed by the solver.
Only returned if `return_n_iter` is True.
Notes
-----
This function won't compute the intercept.
"""
# SAG needs X and y columns to be C-contiguous and np.float64
if solver == 'sag':
X = check_array(X, accept_sparse=['csr'],
dtype=np.float64, order='C')
y = check_array(y, dtype=np.float64, ensure_2d=False, order='F')
else:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64)
y = check_array(y, dtype='numeric', ensure_2d=False)
check_consistent_length(X, y)
n_samples, n_features = X.shape
if y.ndim > 2:
raise ValueError("Target y has the wrong shape %s" % str(y.shape))
ravel = False
if y.ndim == 1:
y = y.reshape(-1, 1)
ravel = True
n_samples_, n_targets = y.shape
if n_samples != n_samples_:
raise ValueError("Number of samples in X and y does not correspond:"
" %d != %d" % (n_samples, n_samples_))
has_sw = sample_weight is not None
if solver == 'auto':
# cholesky if it's a dense array and cg in any other case
if not sparse.issparse(X) or has_sw:
solver = 'cholesky'
else:
solver = 'sparse_cg'
elif solver == 'lsqr' and not hasattr(sp_linalg, 'lsqr'):
warnings.warn("""lsqr not available on this machine, falling back
to sparse_cg.""")
solver = 'sparse_cg'
if has_sw:
if np.atleast_1d(sample_weight).ndim > 1:
raise ValueError("Sample weights must be 1D array or scalar")
if solver != 'sag':
# SAG supports sample_weight directly. For other solvers,
# we implement sample_weight via a simple rescaling.
X, y = _rescale_data(X, y, sample_weight)
# There should be either 1 or n_targets penalties
alpha = np.asarray(alpha).ravel()
if alpha.size not in [1, n_targets]:
raise ValueError("Number of targets and number of penalties "
"do not correspond: %d != %d"
% (alpha.size, n_targets))
if alpha.size == 1 and n_targets > 1:
alpha = np.repeat(alpha, n_targets)
if solver not in ('sparse_cg', 'cholesky', 'svd', 'lsqr', 'sag'):
raise ValueError('Solver %s not understood' % solver)
n_iter = None
if solver == 'sparse_cg':
coef = _solve_sparse_cg(X, y, alpha, max_iter, tol, verbose)
elif solver == 'lsqr':
coef, n_iter = _solve_lsqr(X, y, alpha, max_iter, tol)
elif solver == 'cholesky':
if n_features > n_samples:
K = safe_sparse_dot(X, X.T, dense_output=True)
try:
dual_coef = _solve_cholesky_kernel(K, y, alpha)
coef = safe_sparse_dot(X.T, dual_coef, dense_output=True).T
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
else:
try:
coef = _solve_cholesky(X, y, alpha)
except linalg.LinAlgError:
# use SVD solver if matrix is singular
solver = 'svd'
elif solver == 'sag':
# precompute max_squared_sum for all targets
max_squared_sum = get_max_squared_sum(X)
coef = np.empty((y.shape[1], n_features))
n_iter = np.empty(y.shape[1], dtype=np.int32)
for i, (alpha_i, target) in enumerate(zip(alpha, y.T)):
coef_, n_iter_, _ = sag_solver(
X, target.ravel(), sample_weight, 'squared', alpha_i,
max_iter, tol, verbose, random_state, False, max_squared_sum,
dict())
coef[i] = coef_
n_iter[i] = n_iter_
coef = np.asarray(coef)
if solver == 'svd':
if sparse.issparse(X):
raise TypeError('SVD solver does not support sparse'
' inputs currently')
coef = _solve_svd(X, y, alpha)
if ravel:
# When y was passed as a 1d-array, we flatten the coefficients.
coef = coef.ravel()
if return_n_iter:
return coef, n_iter
else:
return coef
class _BaseRidge(six.with_metaclass(ABCMeta, LinearModel)):
@abstractmethod
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.max_iter = max_iter
self.tol = tol
self.solver = solver
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64,
multi_output=True, y_numeric=True)
if ((sample_weight is not None) and
np.atleast_1d(sample_weight).ndim > 1):
raise ValueError("Sample weights must be 1D array or scalar")
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
self.coef_, self.n_iter_ = ridge_regression(
X, y, alpha=self.alpha, sample_weight=sample_weight,
max_iter=self.max_iter, tol=self.tol, solver=self.solver,
random_state=self.random_state, return_n_iter=True)
self._set_intercept(X_mean, y_mean, X_std)
return self
class Ridge(_BaseRidge, RegressorMixin):
"""Linear least squares with l2 regularization.
This model solves a regression model where the loss function is
the linear least squares function and regularization is given by
the l2-norm. Also known as Ridge Regression or Tikhonov regularization.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : {float, array-like}, shape (n_targets)
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
For 'sparse_cg' and 'lsqr' solvers, the default value is determined
by scipy.sparse.linalg. For 'sag' solver, the default value is 1000.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is often faster than other solvers when
both n_samples and n_features are large. Note that 'sag' fast
convergence is only guaranteed on features with approximately the
same scale. You can preprocess the data with a scaler from
sklearn.preprocessing.
All last four solvers support both dense and sparse data.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_targets, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
RidgeClassifier, RidgeCV, KernelRidge
Examples
--------
>>> from sklearn.linear_model import Ridge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = Ridge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=None, solver='auto', tol=0.001)
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, solver="auto",
random_state=None):
super(Ridge, self).__init__(alpha=alpha, fit_intercept=fit_intercept,
normalize=normalize, copy_X=copy_X,
max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample
Returns
-------
self : returns an instance of self.
"""
return super(Ridge, self).fit(X, y, sample_weight=sample_weight)
class RidgeClassifier(LinearClassifierMixin, _BaseRidge):
"""Classifier using Ridge regression.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alpha : float
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``C^-1`` in other linear models such as LogisticRegression or
LinearSVC.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set to false, no
intercept will be used in calculations (e.g. data is expected to be
already centered).
max_iter : int, optional
Maximum number of iterations for conjugate gradient solver.
The default value is determined by scipy.sparse.linalg.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
solver : {'auto', 'svd', 'cholesky', 'lsqr', 'sparse_cg', 'sag'}
Solver to use in the computational routines:
- 'auto' chooses the solver automatically based on the type of data.
- 'svd' uses a Singular Value Decomposition of X to compute the Ridge
coefficients. More stable for singular matrices than
'cholesky'.
- 'cholesky' uses the standard scipy.linalg.solve function to
obtain a closed-form solution.
- 'sparse_cg' uses the conjugate gradient solver as found in
scipy.sparse.linalg.cg. As an iterative algorithm, this solver is
more appropriate than 'cholesky' for large-scale data
(possibility to set `tol` and `max_iter`).
- 'lsqr' uses the dedicated regularized least-squares routine
scipy.sparse.linalg.lsqr. It is the fatest but may not be available
in old scipy versions. It also uses an iterative procedure.
- 'sag' uses a Stochastic Average Gradient descent. It also uses an
iterative procedure, and is faster than other solvers when both
n_samples and n_features are large.
tol : float
Precision of the solution.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used in 'sag' solver.
Attributes
----------
coef_ : array, shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
n_iter_ : array or None, shape (n_targets,)
Actual number of iterations for each target. Available only for
sag and lsqr solvers. Other solvers will return None.
See also
--------
Ridge, RidgeClassifierCV
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alpha=1.0, fit_intercept=True, normalize=False,
copy_X=True, max_iter=None, tol=1e-3, class_weight=None,
solver="auto", random_state=None):
super(RidgeClassifier, self).__init__(
alpha=alpha, fit_intercept=fit_intercept, normalize=normalize,
copy_X=copy_X, max_iter=max_iter, tol=tol, solver=solver,
random_state=random_state)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples,n_features]
Training data
y : array-like, shape = [n_samples]
Target values
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : returns an instance of self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
super(RidgeClassifier, self).fit(X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
class _RidgeGCV(LinearModel):
"""Ridge regression with built-in Generalized Cross-Validation
It allows efficient Leave-One-Out cross-validation.
This class is not intended to be used directly. Use RidgeCV instead.
Notes
-----
We want to solve (K + alpha*Id)c = y,
where K = X X^T is the kernel matrix.
Let G = (K + alpha*Id)^-1.
Dual solution: c = Gy
Primal solution: w = X^T c
Compute eigendecomposition K = Q V Q^T.
Then G = Q (V + alpha*Id)^-1 Q^T,
where (V + alpha*Id) is diagonal.
It is thus inexpensive to inverse for many alphas.
Let loov be the vector of prediction values for each example
when the model was fitted with all examples but this example.
loov = (KGY - diag(KG)Y) / diag(I-KG)
Let looe be the vector of prediction errors for each example
when the model was fitted with all examples but this example.
looe = y - loov = c / diag(G)
References
----------
http://cbcl.mit.edu/projects/cbcl/publications/ps/MIT-CSAIL-TR-2007-025.pdf
http://www.mit.edu/~9.520/spring07/Classes/rlsslides.pdf
"""
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False,
scoring=None, copy_X=True,
gcv_mode=None, store_cv_values=False):
self.alphas = np.asarray(alphas)
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.copy_X = copy_X
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def _pre_compute(self, X, y):
# even if X is very sparse, K is usually very dense
K = safe_sparse_dot(X, X.T, dense_output=True)
v, Q = linalg.eigh(K)
QT_y = np.dot(Q.T, y)
return v, Q, QT_y
def _decomp_diag(self, v_prime, Q):
# compute diagonal of the matrix: dot(Q, dot(diag(v_prime), Q^T))
return (v_prime * Q ** 2).sum(axis=-1)
def _diag_dot(self, D, B):
# compute dot(diag(D), B)
if len(B.shape) > 1:
# handle case where B is > 1-d
D = D[(slice(None), ) + (np.newaxis, ) * (len(B.shape) - 1)]
return D * B
def _errors(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values(self, alpha, y, v, Q, QT_y):
# don't construct matrix G, instead compute action on y & diagonal
w = 1.0 / (v + alpha)
c = np.dot(Q, self._diag_dot(w, QT_y))
G_diag = self._decomp_diag(w, Q)
# handle case where y is 2-d
if len(y.shape) != 1:
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def _pre_compute_svd(self, X, y):
if sparse.issparse(X):
raise TypeError("SVD not supported for sparse matrices")
U, s, _ = linalg.svd(X, full_matrices=0)
v = s ** 2
UT_y = np.dot(U.T, y)
return v, U, UT_y
def _errors_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case where y is 2-d
G_diag = G_diag[:, np.newaxis]
return (c / G_diag) ** 2, c
def _values_svd(self, alpha, y, v, U, UT_y):
w = ((v + alpha) ** -1) - (alpha ** -1)
c = np.dot(U, self._diag_dot(w, UT_y)) + (alpha ** -1) * y
G_diag = self._decomp_diag(w, U) + (alpha ** -1)
if len(y.shape) != 1:
# handle case when y is 2-d
G_diag = G_diag[:, np.newaxis]
return y - (c / G_diag), c
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float,
multi_output=True, y_numeric=True)
n_samples, n_features = X.shape
X, y, X_mean, y_mean, X_std = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X,
sample_weight=sample_weight)
gcv_mode = self.gcv_mode
with_sw = len(np.shape(sample_weight))
if gcv_mode is None or gcv_mode == 'auto':
if sparse.issparse(X) or n_features > n_samples or with_sw:
gcv_mode = 'eigen'
else:
gcv_mode = 'svd'
elif gcv_mode == "svd" and with_sw:
# FIXME non-uniform sample weights not yet supported
warnings.warn("non-uniform sample weights unsupported for svd, "
"forcing usage of eigen")
gcv_mode = 'eigen'
if gcv_mode == 'eigen':
_pre_compute = self._pre_compute
_errors = self._errors
_values = self._values
elif gcv_mode == 'svd':
# assert n_samples >= n_features
_pre_compute = self._pre_compute_svd
_errors = self._errors_svd
_values = self._values_svd
else:
raise ValueError('bad gcv_mode "%s"' % gcv_mode)
v, Q, QT_y = _pre_compute(X, y)
n_y = 1 if len(y.shape) == 1 else y.shape[1]
cv_values = np.zeros((n_samples * n_y, len(self.alphas)))
C = []
scorer = check_scoring(self, scoring=self.scoring, allow_none=True)
error = scorer is None
for i, alpha in enumerate(self.alphas):
weighted_alpha = (sample_weight * alpha
if sample_weight is not None
else alpha)
if error:
out, c = _errors(weighted_alpha, y, v, Q, QT_y)
else:
out, c = _values(weighted_alpha, y, v, Q, QT_y)
cv_values[:, i] = out.ravel()
C.append(c)
if error:
best = cv_values.mean(axis=0).argmin()
else:
# The scorer want an object that will make the predictions but
# they are already computed efficiently by _RidgeGCV. This
# identity_estimator will just return them
def identity_estimator():
pass
identity_estimator.decision_function = lambda y_predict: y_predict
identity_estimator.predict = lambda y_predict: y_predict
out = [scorer(identity_estimator, y.ravel(), cv_values[:, i])
for i in range(len(self.alphas))]
best = np.argmax(out)
self.alpha_ = self.alphas[best]
self.dual_coef_ = C[best]
self.coef_ = safe_sparse_dot(self.dual_coef_.T, X)
self._set_intercept(X_mean, y_mean, X_std)
if self.store_cv_values:
if len(y.shape) == 1:
cv_values_shape = n_samples, len(self.alphas)
else:
cv_values_shape = n_samples, n_y, len(self.alphas)
self.cv_values_ = cv_values.reshape(cv_values_shape)
return self
class _BaseRidgeCV(LinearModel):
def __init__(self, alphas=(0.1, 1.0, 10.0),
fit_intercept=True, normalize=False, scoring=None,
cv=None, gcv_mode=None,
store_cv_values=False):
self.alphas = alphas
self.fit_intercept = fit_intercept
self.normalize = normalize
self.scoring = scoring
self.cv = cv
self.gcv_mode = gcv_mode
self.store_cv_values = store_cv_values
def fit(self, X, y, sample_weight=None):
"""Fit Ridge regression model
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Sample weight
Returns
-------
self : Returns self.
"""
if self.cv is None:
estimator = _RidgeGCV(self.alphas,
fit_intercept=self.fit_intercept,
normalize=self.normalize,
scoring=self.scoring,
gcv_mode=self.gcv_mode,
store_cv_values=self.store_cv_values)
estimator.fit(X, y, sample_weight=sample_weight)
self.alpha_ = estimator.alpha_
if self.store_cv_values:
self.cv_values_ = estimator.cv_values_
else:
if self.store_cv_values:
raise ValueError("cv!=None and store_cv_values=True "
" are incompatible")
parameters = {'alpha': self.alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(fit_intercept=self.fit_intercept),
parameters, fit_params=fit_params, cv=self.cv)
gs.fit(X, y)
estimator = gs.best_estimator_
self.alpha_ = gs.best_estimator_.alpha
self.coef_ = estimator.coef_
self.intercept_ = estimator.intercept_
return self
class RidgeCV(_BaseRidgeCV, RegressorMixin):
"""Ridge regression with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used, else, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
gcv_mode : {None, 'auto', 'svd', eigen'}, optional
Flag indicating which strategy to use when performing
Generalized Cross-Validation. Options are::
'auto' : use svd if n_samples > n_features or when X is a sparse
matrix, otherwise use eigen
'svd' : force computation via singular value decomposition of X
(does not work for sparse matrices)
'eigen' : force computation via eigendecomposition of X^T X
The 'auto' mode is the default and is intended to pick the cheaper
option of the two depending upon the shape and format of the training
data.
store_cv_values : boolean, default=False
Flag indicating if the cross-validation values corresponding to
each alpha should be stored in the `cv_values_` attribute (see
below). This flag is only compatible with `cv=None` (i.e. using
Generalized Cross-Validation).
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_targets, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and \
`cv=None`). After `fit()` has been called, this attribute will \
contain the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter.
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeClassifierCV: Ridge classifier with built-in cross validation
"""
pass
class RidgeClassifierCV(LinearClassifierMixin, _BaseRidgeCV):
"""Ridge classifier with built-in cross-validation.
By default, it performs Generalized Cross-Validation, which is a form of
efficient Leave-One-Out cross-validation. Currently, only the n_features >
n_samples case is handled efficiently.
Read more in the :ref:`User Guide <ridge_regression>`.
Parameters
----------
alphas : numpy array of shape [n_alphas]
Array of alpha values to try.
Small positive values of alpha improve the conditioning of the
problem and reduce the variance of the estimates.
Alpha corresponds to ``C^-1`` in other linear models such as
LogisticRegression or LinearSVC.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the efficient Leave-One-Out cross-validation
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Attributes
----------
cv_values_ : array, shape = [n_samples, n_alphas] or \
shape = [n_samples, n_responses, n_alphas], optional
Cross-validation values for each alpha (if `store_cv_values=True` and
`cv=None`). After `fit()` has been called, this attribute will contain \
the mean squared errors (by default) or the values of the \
`{loss,score}_func` function (if provided in the constructor).
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s).
intercept_ : float | array, shape = (n_targets,)
Independent term in decision function. Set to 0.0 if
``fit_intercept = False``.
alpha_ : float
Estimated regularization parameter
See also
--------
Ridge: Ridge regression
RidgeClassifier: Ridge classifier
RidgeCV: Ridge regression with built-in cross validation
Notes
-----
For multi-class classification, n_class classifiers are trained in
a one-versus-all approach. Concretely, this is implemented by taking
advantage of the multi-variate response support in Ridge.
"""
def __init__(self, alphas=(0.1, 1.0, 10.0), fit_intercept=True,
normalize=False, scoring=None, cv=None, class_weight=None):
super(RidgeClassifierCV, self).__init__(
alphas=alphas, fit_intercept=fit_intercept, normalize=normalize,
scoring=scoring, cv=cv)
self.class_weight = class_weight
def fit(self, X, y, sample_weight=None):
"""Fit the ridge classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : float or numpy array of shape (n_samples,)
Sample weight.
Returns
-------
self : object
Returns self.
"""
self._label_binarizer = LabelBinarizer(pos_label=1, neg_label=-1)
Y = self._label_binarizer.fit_transform(y)
if not self._label_binarizer.y_type_.startswith('multilabel'):
y = column_or_1d(y, warn=True)
if self.class_weight:
if sample_weight is None:
sample_weight = 1.
# modify the sample weights with the corresponding class weight
sample_weight = (sample_weight *
compute_sample_weight(self.class_weight, y))
_BaseRidgeCV.fit(self, X, Y, sample_weight=sample_weight)
return self
@property
def classes_(self):
return self._label_binarizer.classes_
|
bsd-3-clause
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/indexes/test_datetimelike.py
|
7
|
52802
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.tslib import Timestamp, OutOfBoundsDatetime
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# different tz coerces tz-naive to tz-awareIndex(dtype=object)
result = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 05:00'),
Timestamp('2011-01-02 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with tm.assertRaises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with tm.assertRaisesRegexp(TypeError, 'data is already tz-aware'):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with tm.assertRaises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_astype(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timestamp('2016-05-16')] + [NaT] * 3, dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([1463356800000000000] +
[-9223372036854775808] * 3, dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = date_range('1/1/2000', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(result.values, rng.asi8)
def test_astype_with_tz(self):
# with tz
rng = date_range('1/1/2000', periods=10, tz='US/Eastern')
result = rng.astype('datetime64[ns]')
expected = (date_range('1/1/2000', periods=10,
tz='US/Eastern')
.tz_convert('UTC').tz_localize(None))
tm.assert_index_equal(result, expected)
# BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
expected = pd.Series(
['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
tm.assert_series_equal(result, expected)
result = Series(pd.date_range('2012-01-01', periods=3,
tz='US/Eastern')).astype(str)
expected = Series(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_str_compat(self):
# GH 13149, GH 13209
# verify that we are returing NaT as a string (and not unicode)
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype(str)
expected = Index(['2016-05-16', 'NaT', 'NaT', 'NaT'], dtype=object)
tm.assert_index_equal(result, expected)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4,
name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03',
'2012-01-04'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name',
tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00',
'2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H',
name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00',
'2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00',
'2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_astype_datetime64(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
result = idx.astype('datetime64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('datetime64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
idx_tz = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN], tz='EST')
result = idx_tz.astype('datetime64[ns]')
expected = DatetimeIndex(['2016-05-16 05:00:00', 'NaT', 'NaT', 'NaT'],
dtype='datetime64[ns]')
tm.assert_index_equal(result, expected)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = DatetimeIndex(['2016-05-16', 'NaT', NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[D]')
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_tz(self):
i = pd.date_range('20130101', periods=3, tz='US/Eastern')
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_get_loc(self):
idx = pd.date_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
if method is not None:
self.assertEqual(idx.get_loc(idx[1], method,
tolerance=pd.Timedelta('0 days')),
1)
self.assertEqual(idx.get_loc('2000-01-01', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-01T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-01T12', method='nearest', tolerance='foo')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-01T03', method='nearest', tolerance='2 hours')
self.assertEqual(idx.get_loc('2000', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 3))
self.assertEqual(idx.get_loc('1999', method='nearest'), 0)
self.assertEqual(idx.get_loc('2001', method='nearest'), 2)
with tm.assertRaises(KeyError):
idx.get_loc('1999', method='pad')
with tm.assertRaises(KeyError):
idx.get_loc('2001', method='backfill')
with tm.assertRaises(KeyError):
idx.get_loc('foobar')
with tm.assertRaises(TypeError):
idx.get_loc(slice(2))
idx = pd.to_datetime(['2000-01-01', '2000-01-04'])
self.assertEqual(idx.get_loc('2000-01-02', method='nearest'), 0)
self.assertEqual(idx.get_loc('2000-01-03', method='nearest'), 1)
self.assertEqual(idx.get_loc('2000-01', method='nearest'), slice(0, 2))
# time indexing
idx = pd.date_range('2000-01-01', periods=24, freq='H')
tm.assert_numpy_array_equal(idx.get_loc(time(12)),
np.array([12]), check_dtype=False)
tm.assert_numpy_array_equal(idx.get_loc(time(12, 30)),
np.array([]), check_dtype=False)
with tm.assertRaises(NotImplementedError):
idx.get_loc(time(12, 30), method='pad')
def test_get_indexer(self):
idx = pd.date_range('2000-01-01', periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(['-1 hour', '12 hours',
'1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour')),
np.array([0, -1, 1], dtype=np.intp))
with tm.assertRaises(ValueError):
idx.get_indexer(idx[[0]], method='nearest', tolerance='foo')
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = self.round_trip_pickle(index)
self.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
self.assertEqual(str(index.reindex([])[0].tz), 'US/Eastern')
self.assertEqual(str(index.reindex(np.array([]))[0].tz), 'US/Eastern')
def test_time_loc(self): # GH8667
from datetime import time
from pandas.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
self.assertEqual(len(idx1), periods)
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
self.assertEqual(len(idx2), periods)
def test_intersection(self):
first = self.index
second = self.index[5:]
intersect = first.intersection(second)
self.assertTrue(tm.equalContents(intersect, second))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.intersection(case)
self.assertTrue(tm.equalContents(result, second))
third = Index(['a', 'b', 'c'])
result = first.intersection(third)
expected = pd.Index([], dtype=object)
self.assert_index_equal(result, expected)
def test_union(self):
first = self.index[:5]
second = self.index[5:]
everything = self.index
union = first.union(second)
self.assertTrue(tm.equalContents(union, everything))
# GH 10149
cases = [klass(second.values) for klass in [np.array, Series, list]]
for case in cases:
result = first.union(case)
self.assertTrue(tm.equalContents(result, everything))
def test_nat(self):
self.assertIs(DatetimeIndex([np.nan])[0], pd.NaT)
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '3D')
for result in [idx - delta, np.subtract(idx, delta)]:
tm.assertIsInstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
def test_fillna_datetime64(self):
# GH 11343
for tz in ['US/Eastern', 'Asia/Tokyo']:
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'])
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'])
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# tz mismatch
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'),
pd.Timestamp('2011-01-01 10:00', tz=tz),
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
pd.Timestamp('2011-01-01 11:00')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], tz=tz)
exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], tz=tz)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
pd.Timestamp('2011-01-01 10:00'),
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(
idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
# object
exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz),
'x',
pd.Timestamp('2011-01-01 11:00', tz=tz)],
dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of DatetimeIndex does not preserve frequency,
# so a differencing operation should not retain the freq field of the
# original index.
i = pd.date_range("20160920", "20160925", freq="D")
a = pd.date_range("20160921", "20160924", freq="D")
expected = pd.DatetimeIndex(["20160920", "20160925"], freq=None)
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.date_range("20160922", "20160925", freq="D")
b_diff = i.difference(b)
expected = pd.DatetimeIndex(["20160920", "20160921"], freq=None)
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_diff = a_diff.union(b_diff)
expected = pd.DatetimeIndex(["20160920", "20160921", "20160925"],
freq=None)
tm.assert_index_equal(union_of_diff, expected)
tm.assert_attr_equal('freq', union_of_diff, expected)
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makePeriodIndex(10))
self.setup_indices()
def create_index(self):
return period_range('20130101', periods=5, freq='D')
def test_construction_base_constructor(self):
# GH 13664
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Period('2011-03', freq='M')]
tm.assert_index_equal(pd.Index(arr), pd.PeriodIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.PeriodIndex(np.array(arr)))
arr = [pd.Period('2011-01', freq='M'), pd.NaT,
pd.Period('2011-03', freq='D')]
tm.assert_index_equal(pd.Index(arr), pd.Index(arr, dtype=object))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.Index(np.array(arr), dtype=object))
def test_astype(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
result = idx.astype(object)
expected = Index([Period('2016-05-16', freq='D')] +
[Period(NaT, freq='D')] * 3, dtype='object')
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([16937] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
idx = period_range('1990', '2009', freq='A')
result = idx.astype('i8')
self.assert_index_equal(result, Index(idx.asi8))
self.assert_numpy_array_equal(result.values, idx.asi8)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = PeriodIndex(['2016-05-16', 'NaT', NaT, np.NaN], freq='D')
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, 'timedelta64')
self.assertRaises(ValueError, idx.astype, 'timedelta64[ns]')
def test_shift(self):
# test shift for PeriodIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = PeriodIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05', '2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
def test_pickle_compat_construction(self):
pass
def test_get_loc(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(
idx.get_loc(idx[1].asfreq('H', how='start'), method), 1)
self.assertEqual(idx.get_loc(idx[1].to_timestamp(), method), 1)
self.assertEqual(
idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
idx = pd.period_range('2000-01-01', periods=5)[::2]
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day'), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')), 1)
self.assertEqual(idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with tm.assertRaises(KeyError):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
def test_where(self):
i = self.create_index()
result = i.where(notnull(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2))
expected = i2
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = self.create_index()
for arr in [np.nan, pd.NaT]:
result = i.where(notnull(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notnull(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_get_indexer(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with self.assertRaisesRegexp(ValueError, msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
def test_repeat(self):
# GH10183
idx = pd.period_range('2000-01-01', periods=3, freq='D')
res = idx.repeat(3)
exp = PeriodIndex(idx.values.repeat(3), freq='D')
self.assert_index_equal(res, exp)
self.assertEqual(res.freqstr, 'D')
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
self.assert_frame_equal(df, df.ix[idx])
self.assert_frame_equal(df, df.ix[list(idx)])
self.assert_frame_equal(df, df.loc[list(idx)])
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
def test_fillna_period(self):
# GH 11343
idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT,
'2011-01-01 11:00'], freq='H')
exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00',
'2011-01-01 11:00'], freq='H')
self.assert_index_equal(
idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'),
pd.Period('2011-01-01', freq='D'),
pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
self.assert_index_equal(idx.fillna(pd.Period('2011-01-01', freq='D')),
exp)
def test_no_millisecond_field(self):
with self.assertRaises(AttributeError):
DatetimeIndex.millisecond
with self.assertRaises(AttributeError):
DatetimeIndex([]).millisecond
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of Period MUST preserve frequency, but the ability
# to union results must be preserved
i = pd.period_range("20160920", "20160925", freq="D")
a = pd.period_range("20160921", "20160924", freq="D")
expected = pd.PeriodIndex(["20160920", "20160925"], freq='D')
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.period_range("20160922", "20160925", freq="D")
b_diff = i.difference(b)
expected = pd.PeriodIndex(["20160920", "20160921"], freq='D')
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_diff = a_diff.union(b_diff)
expected = pd.PeriodIndex(["20160920", "20160921", "20160925"],
freq='D')
tm.assert_index_equal(union_of_diff, expected)
tm.assert_attr_equal('freq', union_of_diff, expected)
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeTimedeltaIndex(10))
self.setup_indices()
def create_index(self):
return pd.to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)
def test_construction_base_constructor(self):
arr = [pd.Timedelta('1 days'), pd.NaT, pd.Timedelta('3 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timedelta('1 days')]
tm.assert_index_equal(pd.Index(arr), pd.TimedeltaIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.TimedeltaIndex(np.array(arr)))
def test_shift(self):
# test shift for TimedeltaIndex
# err8083
drange = self.create_index()
result = drange.shift(1)
expected = TimedeltaIndex(['1 days 01:00:00', '2 days 01:00:00',
'3 days 01:00:00',
'4 days 01:00:00', '5 days 01:00:00'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D 1s')
expected = TimedeltaIndex(['6 days 01:00:03', '7 days 01:00:03',
'8 days 01:00:03', '9 days 01:00:03',
'10 days 01:00:03'], freq='D')
self.assert_index_equal(result, expected)
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype(object)
expected = Index([Timedelta('1 days 03:46:40')] + [pd.NaT] * 3,
dtype=object)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index([100000000000000] + [-9223372036854775808] * 3,
dtype=np.int64)
tm.assert_index_equal(result, expected)
rng = timedelta_range('1 days', periods=10)
result = rng.astype('i8')
self.assert_index_equal(result, Index(rng.asi8))
self.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
result = idx.astype('timedelta64')
expected = Float64Index([1e+14] + [np.NaN] * 3, dtype='float64')
tm.assert_index_equal(result, expected)
result = idx.astype('timedelta64[ns]')
tm.assert_index_equal(result, idx)
self.assertFalse(result is idx)
result = idx.astype('timedelta64[ns]', copy=False)
tm.assert_index_equal(result, idx)
self.assertTrue(result is idx)
def test_astype_raises(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, 'NaT', pd.NaT, np.NaN])
self.assertRaises(ValueError, idx.astype, float)
self.assertRaises(ValueError, idx.astype, str)
self.assertRaises(ValueError, idx.astype, 'datetime64')
self.assertRaises(ValueError, idx.astype, 'datetime64[ns]')
def test_get_loc(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
for method in [None, 'pad', 'backfill', 'nearest']:
self.assertEqual(idx.get_loc(idx[1], method), 1)
self.assertEqual(idx.get_loc(idx[1].to_pytimedelta(), method), 1)
self.assertEqual(idx.get_loc(str(idx[1]), method), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=pd.Timedelta(0)), 1)
self.assertEqual(
idx.get_loc(idx[1], 'pad', tolerance=np.timedelta64(0, 's')), 1)
self.assertEqual(idx.get_loc(idx[1], 'pad', tolerance=timedelta(0)), 1)
with tm.assertRaisesRegexp(ValueError, 'must be convertible'):
idx.get_loc(idx[1], method='nearest', tolerance='foo')
for method, loc in [('pad', 1), ('backfill', 2), ('nearest', 1)]:
self.assertEqual(idx.get_loc('1 day 1 hour', method), loc)
def test_get_indexer(self):
idx = pd.to_timedelta(['0 days', '1 days', '2 days'])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.to_timedelta(['-1 hour', '12 hours', '1 day 1 hour'])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 hour'))
tm.assert_numpy_array_equal(res, np.array([0, -1, 1], dtype=np.intp))
def test_numeric_compat(self):
idx = self._holder(np.arange(5, dtype='int64'))
didx = self._holder(np.arange(5, dtype='int64') ** 2)
result = idx * 1
tm.assert_index_equal(result, idx)
result = 1 * idx
tm.assert_index_equal(result, idx)
result = idx / 1
tm.assert_index_equal(result, idx)
result = idx // 1
tm.assert_index_equal(result, idx)
result = idx * np.array(5, dtype='int64')
tm.assert_index_equal(result,
self._holder(np.arange(5, dtype='int64') * 5))
result = idx * np.arange(5, dtype='int64')
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='int64'))
tm.assert_index_equal(result, didx)
result = idx * Series(np.arange(5, dtype='float64') + 0.1)
tm.assert_index_equal(result, self._holder(np.arange(
5, dtype='float64') * (np.arange(5, dtype='float64') + 0.1)))
# invalid
self.assertRaises(TypeError, lambda: idx * idx)
self.assertRaises(ValueError, lambda: idx * self._holder(np.arange(3)))
self.assertRaises(ValueError, lambda: idx * np.array([1, 2]))
def test_pickle_compat_construction(self):
pass
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4H')
for result in [idx / 2, np.divide(idx, 2)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'H')
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2H')
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
tm.assertIsInstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_fillna_timedelta(self):
# GH 11343
idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
idx.fillna(pd.Timedelta('3 hour'))
exp = pd.Index(
[pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
self.assert_index_equal(idx.fillna('x'), exp)
def test_difference_of_union(self):
# GH14323: Test taking the union of differences of an Index.
# Difference of TimedeltaIndex does not preserve frequency,
# so a differencing operation should not retain the freq field of the
# original index.
i = pd.timedelta_range("0 days", "5 days", freq="D")
a = pd.timedelta_range("1 days", "4 days", freq="D")
expected = pd.TimedeltaIndex(["0 days", "5 days"], freq=None)
a_diff = i.difference(a)
tm.assert_index_equal(a_diff, expected)
tm.assert_attr_equal('freq', a_diff, expected)
b = pd.timedelta_range("2 days", "5 days", freq="D")
b_diff = i.difference(b)
expected = pd.TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(b_diff, expected)
tm.assert_attr_equal('freq', b_diff, expected)
union_of_difference = a_diff.union(b_diff)
expected = pd.TimedeltaIndex(["0 days", "1 days", "5 days"],
freq=None)
tm.assert_index_equal(union_of_difference, expected)
tm.assert_attr_equal('freq', union_of_difference, expected)
|
gpl-3.0
|
PatrickOReilly/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
20
|
51086
|
# Authors: Peter Prettenhofer <[email protected]> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from .base import make_dataset
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError("alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate.")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match "
"dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(self.intercept_)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous "
"data %d." % (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
Also used to compute learning_rate when set to 'optimal'.
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate schedule:
- 'constant': eta = eta0
- 'optimal': eta = 1.0 / (alpha * (t + t0)) [default]
- 'invscaling': eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
nhejazi/project-gamma
|
code/utils/roi_extraction.py
|
2
|
3699
|
"""
Helpers for extracting voxel time courses based on the
spherical shape assumption of a ROI.
"""
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import project_config
import numpy as np
import numpy.linalg as npl
import nibabel as nib
import itertools as itt
import matplotlib.pyplot as plt
import os
from scipy.spatial.distance import pdist
from general_utils import vol_index_iter
from scipy.spatial import cKDTree
def co2vox(coordinate,mm_to_vox):
"""
Return the indices of MNI brain given coordinate of ROI center
Parameters:
---------
Input
coordinate: center coordinate of ROI
mm_to_vox: Map from coordinate to MNI brain indices
Return
MNI brain indices
"""
ind = nib.affines.apply_affine(mm_to_vox, coordinate)
return [int(x) for x in ind]
def ROI_region(fmri):
"""
Get all the index within one ROI region, given the center index of it.
Parameters:
---------
Input
fmri: standard brain ROI center
Return
List of tuples, which represent indices of voxel within given ROI.
"""
x = range(max(fmri[0]-4,0),min(fmri[0]+4,91))
y = range(max(fmri[1]-4,0),min(fmri[1]+4,109))
z = range(max(fmri[2]-4,0),min(fmri[2]+4,91))
a = [x,y,z]
roi = list(itt.product(*a))
return roi
def filter_ROI(vox_indc, in_brain_mask):
"""
Filter the voxels in the ROI so that promise remaining voxels within the brain.
Parameters:
---------
Input
vox_indc: list of tuples representing the voxels within the ROI.
in_brain_mask: a list of True and False. True represent valid voxels with in the brain.
Return
List of tuples, which represent indices of filtered voxel within given ROI.
"""
return [v for v in vox_indc if in_brain_mask[v]]
def min_roi_roi_distance(network_map):
"""
Compute shortest pairwise euclidean distance of all ROI centers in all networks.
The purpose of this is to check whether the choice of diameter for a ROI sphere
will cause overlaps of voxels.
"""
centers = [center for v in network_map.values() for center in v.values()]
return min(pdist(centers, 'euclidean'))
def get_voxels(mm_to_vox, coor, in_brain_mask):
"""
Get voxels of a given center coordinate of ROI.
Parameters:
---------
Input
mm_to_vox: Map from coordinate to MNI brain indices
coor: center coordinate of ROI
in_brain_mask: a list of True and False. True represent valid voxels with in
Return
List of tuples, which represent indices of filtered voxel within given ROI.
"""
b = ROI_region(co2vox(coor,mm_to_vox))
return filter_ROI(b, in_brain_mask)
class SphereExtractor(object):
def __init__(self, in_brain_mask, dist_from_center):
points = [i for i in vol_index_iter(in_brain_mask.shape) if in_brain_mask[i]]
self.tree = cKDTree(np.array(points))
self.points = points
self.dist_from_center = dist_from_center
def get_voxels(self, mm_to_vox, coor):
"""
Get voxels that are within a distance from a given center coordinate of ROI.
Parameters:
---------
Input
mm_to_vox: Map from coordinate to MNI brain indices
coor: center coordinate of ROI in mm
in_brain_mask: a list of True and False. True represent valid voxels with in
Return
List of tuples, which represent indices of filtered voxel within given ROI.
"""
center = co2vox(coor, mm_to_vox)
return [self.points[i] for i in self.tree.query_ball_point(center, self.dist_from_center)]
dic = {}
net_roi_filename = os.path.join(os.path.dirname(__file__), '../../data/net_roi.txt')
with open (net_roi_filename) as f:
for lines in f:
(key_region, k_roi, val1, val2, val3)= lines.split()
loc = [int(val1),int(val2),int(val3)]
if key_region not in dic:
dic[key_region] = {}
dic[key_region][k_roi] = loc
|
bsd-3-clause
|
fivejjs/GPy
|
GPy/testing/rv_transformation_tests.py
|
2
|
3523
|
# Written by Ilias Bilionis
"""
Test if hyperparameters in models are properly transformed.
"""
import unittest
import numpy as np
import scipy.stats as st
import GPy
class TestModel(GPy.core.Model):
"""
A simple GPy model with one parameter.
"""
def __init__(self):
GPy.core.Model.__init__(self, 'test_model')
theta = GPy.core.Param('theta', 1.)
self.link_parameter(theta)
def log_likelihood(self):
return 0.
class RVTransformationTestCase(unittest.TestCase):
def _test_trans(self, trans):
m = TestModel()
prior = GPy.priors.LogGaussian(.5, 0.1)
m.theta.set_prior(prior)
m.theta.unconstrain()
m.theta.constrain(trans)
# The PDF of the transformed variables
p_phi = lambda(phi): np.exp(-m._objective_grads(phi)[0])
# To the empirical PDF of:
theta_s = prior.rvs(100000)
phi_s = trans.finv(theta_s)
# which is essentially a kernel density estimation
kde = st.gaussian_kde(phi_s)
# We will compare the PDF here:
phi = np.linspace(phi_s.min(), phi_s.max(), 100)
# The transformed PDF of phi should be this:
pdf_phi = np.array([p_phi(p) for p in phi])
# UNCOMMENT TO SEE GRAPHICAL COMPARISON
#import matplotlib.pyplot as plt
#fig, ax = plt.subplots()
#ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Histogram')
#ax.plot(phi, kde(phi), '--', linewidth=2, label='Kernel Density Estimation')
#ax.plot(phi, pdf_phi, ':', linewidth=2, label='Transformed PDF')
#ax.set_xlabel(r'transformed $\theta$', fontsize=16)
#ax.set_ylabel('PDF', fontsize=16)
#plt.legend(loc='best')
#plt.show(block=True)
# END OF PLOT
# The following test cannot be very accurate
self.assertTrue(np.linalg.norm(pdf_phi - kde(phi)) / np.linalg.norm(kde(phi)) <= 1e-1)
# Check the gradients at a few random points
for i in xrange(10):
m.theta = theta_s[i]
self.assertTrue(m.checkgrad(verbose=True))
def test_Logexp(self):
self._test_trans(GPy.constraints.Logexp())
self._test_trans(GPy.constraints.Exponent())
if __name__ == '__main__':
unittest.main()
quit()
m = TestModel()
prior = GPy.priors.LogGaussian(0., .9)
m.theta.set_prior(prior)
# The following should return the PDF in terms of the transformed quantities
p_phi = lambda(phi): np.exp(-m._objective_grads(phi)[0])
# Let's look at the transformation phi = log(exp(theta - 1))
trans = GPy.constraints.Exponent()
m.theta.constrain(trans)
# Plot the transformed probability density
phi = np.linspace(-8, 8, 100)
fig, ax = plt.subplots()
# Let's draw some samples of theta and transform them so that we see
# which one is right
theta_s = prior.rvs(10000)
# Transform it to the new variables
phi_s = trans.finv(theta_s)
# And draw their histogram
ax.hist(phi_s, normed=True, bins=100, alpha=0.25, label='Empirical')
# This is to be compared to the PDF of the model expressed in terms of these new
# variables
ax.plot(phi, [p_phi(p) for p in phi], label='Transformed PDF', linewidth=2)
ax.set_xlim(-3, 10)
ax.set_xlabel(r'transformed $\theta$', fontsize=16)
ax.set_ylabel('PDF', fontsize=16)
plt.legend(loc='best')
# Now let's test the gradients
m.checkgrad(verbose=True)
# And show the plot
plt.show(block=True)
|
bsd-3-clause
|
apache/arrow
|
dev/archery/archery/lang/python.py
|
3
|
7778
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import inspect
import tokenize
from contextlib import contextmanager
try:
from numpydoc.validate import Docstring, validate
except ImportError:
have_numpydoc = False
else:
have_numpydoc = True
from ..utils.logger import logger
from ..utils.command import Command, capture_stdout, default_bin
class Flake8(Command):
def __init__(self, flake8_bin=None):
self.bin = default_bin(flake8_bin, "flake8")
class Autopep8(Command):
def __init__(self, autopep8_bin=None):
self.bin = default_bin(autopep8_bin, "autopep8")
@capture_stdout()
def run_captured(self, *args, **kwargs):
return self.run(*args, **kwargs)
def _tokenize_signature(s):
lines = s.encode('ascii').splitlines()
generator = iter(lines).__next__
return tokenize.tokenize(generator)
def _convert_typehint(tokens):
names = []
opening_bracket_reached = False
for token in tokens:
# omit the tokens before the opening bracket
if not opening_bracket_reached:
if token.string == '(':
opening_bracket_reached = True
else:
continue
if token.type == 1: # type 1 means NAME token
names.append(token)
else:
if len(names) == 1:
yield (names[0].type, names[0].string)
elif len(names) == 2:
# two "NAME" tokens follow each other which means a cython
# typehint like `bool argument`, so remove the typehint
# note that we could convert it to python typehints, but hints
# are not supported by _signature_fromstr
yield (names[1].type, names[1].string)
elif len(names) > 2:
raise ValueError('More than two NAME tokens follow each other')
names = []
yield (token.type, token.string)
def inspect_signature(obj):
"""
Custom signature inspection primarily for cython generated callables.
Cython puts the signatures to the first line of the docstrings, which we
can reuse to parse the python signature from, but some gymnastics are
required, like removing the cython typehints.
It converts the cython signature:
array(obj, type=None, mask=None, size=None, from_pandas=None,
bool safe=True, MemoryPool memory_pool=None)
To:
<Signature (obj, type=None, mask=None, size=None, from_pandas=None,
safe=True, memory_pool=None)>
"""
cython_signature = obj.__doc__.splitlines()[0]
cython_tokens = _tokenize_signature(cython_signature)
python_tokens = _convert_typehint(cython_tokens)
python_signature = tokenize.untokenize(python_tokens)
return inspect._signature_fromstr(inspect.Signature, obj, python_signature)
class NumpyDoc:
def __init__(self, symbols=None):
if not have_numpydoc:
raise RuntimeError(
'Numpydoc is not available, install the development version '
'with command: pip install numpydoc==1.1.0'
)
self.symbols = set(symbols or {'pyarrow'})
def traverse(self, fn, obj, from_package):
"""Apply a function on publicly exposed API components.
Recursively iterates over the members of the passed object. It omits
any '_' prefixed and thirdparty (non pyarrow) symbols.
Parameters
----------
obj : Any
from_package : string, default 'pyarrow'
Predicate to only consider objects from this package.
"""
todo = [obj]
seen = set()
while todo:
obj = todo.pop()
if obj in seen:
continue
else:
seen.add(obj)
fn(obj)
for name in dir(obj):
if name.startswith('_'):
continue
member = getattr(obj, name)
module = getattr(member, '__module__', None)
if not (module and module.startswith(from_package)):
continue
todo.append(member)
@contextmanager
def _apply_patches(self):
"""
Patch Docstring class to bypass loading already loaded python objects.
"""
orig_load_obj = Docstring._load_obj
orig_signature = inspect.signature
@staticmethod
def _load_obj(obj):
# By default it expects a qualname and import the object, but we
# have already loaded object after the API traversal.
if isinstance(obj, str):
return orig_load_obj(obj)
else:
return obj
def signature(obj):
# inspect.signature tries to parse __text_signature__ if other
# properties like __signature__ doesn't exists, but cython
# doesn't set that property despite that embedsignature cython
# directive is set. The only way to inspect a cython compiled
# callable's signature to parse it from __doc__ while
# embedsignature directive is set during the build phase.
# So path inspect.signature function to attempt to parse the first
# line of callable.__doc__ as a signature.
try:
return orig_signature(obj)
except Exception as orig_error:
try:
return inspect_signature(obj)
except Exception:
raise orig_error
try:
Docstring._load_obj = _load_obj
inspect.signature = signature
yield
finally:
Docstring._load_obj = orig_load_obj
inspect.signature = orig_signature
def validate(self, from_package='', allow_rules=None,
disallow_rules=None):
results = []
def callback(obj):
try:
result = validate(obj)
except OSError as e:
symbol = f"{obj.__module__}.{obj.__name__}"
logger.warning(f"Unable to validate `{symbol}` due to `{e}`")
return
errors = []
for errcode, errmsg in result.get('errors', []):
if allow_rules and errcode not in allow_rules:
continue
if disallow_rules and errcode in disallow_rules:
continue
errors.append((errcode, errmsg))
if len(errors):
result['errors'] = errors
results.append((obj, result))
with self._apply_patches():
for symbol in self.symbols:
try:
obj = Docstring._load_obj(symbol)
except (ImportError, AttributeError):
print('{} is not available for import'.format(symbol))
else:
self.traverse(callback, obj, from_package=from_package)
return results
|
apache-2.0
|
taknevski/tensorflow-xsmm
|
tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py
|
11
|
65637
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import learning_rate_decay
from tensorflow.python.training import monitored_session
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
def _assert_metrics_in_range(keys, metrics):
epsilon = 0.00001 # Added for floating point edge cases.
for key in keys:
estimator_test_utils.assert_in_range(0.0 - epsilon, 1.0 + epsilon, key,
metrics)
class _CheckCallsHead(head_lib.Head):
"""Head that checks whether head_ops is called."""
def __init__(self):
self._head_ops_called_times = 0
@property
def logits_dimension(self):
return 1
def create_model_fn_ops(
self, mode, features, labels=None, train_op_fn=None, logits=None,
logits_input=None, scope=None):
"""See `_Head`."""
self._head_ops_called_times += 1
loss = losses.mean_squared_error(labels, logits)
return model_fn.ModelFnOps(
mode,
predictions={'loss': loss},
loss=loss,
train_op=train_op_fn(loss),
eval_metric_ops={'loss': loss})
@property
def head_ops_called_times(self):
return self._head_ops_called_times
class _StepCounterHook(session_run_hook.SessionRunHook):
"""Counts the number of training steps."""
def __init__(self):
self._steps = 0
def after_run(self, run_context, run_values):
del run_context, run_values
self._steps += 1
@property
def steps(self):
return self._steps
class EmbeddingMultiplierTest(test.TestCase):
"""dnn_model_fn tests."""
def testRaisesNonEmbeddingColumn(self):
one_hot_language = feature_column.one_hot_column(
feature_column.sparse_column_with_hash_bucket('language', 10))
params = {
'dnn_feature_columns': [one_hot_language],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep embeddings constant.
'embedding_lr_multipliers': {
one_hot_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[0], [0], [0]], dtype=dtypes.int32)
with self.assertRaisesRegexp(ValueError,
'can only be defined for embedding columns'):
dnn_linear_combined._dnn_linear_combined_model_fn(features, labels,
model_fn.ModeKeys.TRAIN,
params)
def testMultipliesGradient(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
embedding_wire = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('wire', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
params = {
'dnn_feature_columns': [embedding_language, embedding_wire],
'head': head_lib.multi_class_head(2),
'dnn_hidden_units': [1],
# Set lr mult to 0. to keep language embeddings constant, whereas wire
# embeddings will be trained.
'embedding_lr_multipliers': {
embedding_language: 0.0
},
'dnn_optimizer': 'Adagrad',
}
with ops.Graph().as_default():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'wire':
sparse_tensor.SparseTensor(
values=['omar', 'stringer', 'marlo'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
}
labels = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
training_util.create_global_step()
model_ops = dnn_linear_combined._dnn_linear_combined_model_fn(
features, labels, model_fn.ModeKeys.TRAIN, params)
with monitored_session.MonitoredSession() as sess:
language_var = dnn_linear_combined._get_embedding_variable(
embedding_language, 'dnn', 'dnn/input_from_feature_columns')
language_initial_value = sess.run(language_var)
for _ in range(2):
_, language_value = sess.run([model_ops.train_op, language_var])
self.assertAllClose(language_value, language_initial_value)
# We could also test that wire_value changed, but that test would be flaky.
class DNNLinearCombinedEstimatorTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedEstimator)
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedEstimator(
head=_CheckCallsHead(),
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testCheckCallsHead(self):
"""Tests binary classification using matrix data as input."""
head = _CheckCallsHead()
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [feature_column.bucketized_column(
cont_features[0], test_data.get_quantile_based_buckets(iris.data, 10))]
estimator = dnn_linear_combined.DNNLinearCombinedEstimator(
head,
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
estimator.fit(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(1, head.head_ops_called_times)
estimator.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=10)
self.assertEqual(2, head.head_ops_called_times)
estimator.predict(input_fn=test_data.iris_input_multiclass_fn)
self.assertEqual(3, head.head_ops_called_times)
class DNNLinearCombinedClassifierTest(test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedClassifier)
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testNoFeatureColumns(self):
with self.assertRaisesRegexp(
ValueError,
'Either linear_feature_columns or dnn_feature_columns must be defined'):
dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=None,
dnn_feature_columns=None,
dnn_hidden_units=[3, 3])
def testNoDnnHiddenUnits(self):
def _input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
with self.assertRaisesRegexp(
ValueError,
'dnn_hidden_units must be defined when dnn_feature_columns is '
'specified'):
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[age, language])
classifier.fit(input_fn=_input_fn, steps=2)
def testSyncReplicasOptimizerUnsupported(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
sync_optimizer = sync_replicas_optimizer.SyncReplicasOptimizer(
opt=adagrad.AdagradOptimizer(learning_rate=0.1),
replicas_to_aggregate=1,
total_num_replicas=1)
sync_hook = sync_optimizer.make_session_run_hook(is_chief=True)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=sync_optimizer)
with self.assertRaisesRegexp(
ValueError,
'SyncReplicasOptimizer is not supported in DNNLinearCombined model'):
classifier.fit(
input_fn=test_data.iris_input_multiclass_fn, steps=100,
monitors=[sync_hook])
def testEmbeddingMultiplier(self):
embedding_language = feature_column.embedding_column(
feature_column.sparse_column_with_hash_bucket('language', 10),
dimension=1,
initializer=init_ops.constant_initializer(0.1))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[embedding_language],
dnn_hidden_units=[3, 3],
embedding_lr_multipliers={embedding_language: 0.8})
self.assertEqual({
embedding_language: 0.8
}, classifier.params['embedding_lr_multipliers'])
def testInputPartitionSize(self):
def _input_fn_float_label(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[0.8], [0.], [0.2]], dtype=dtypes.float32)
return features, labels
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [
feature_column.embedding_column(language_column, dimension=1),
]
# Set num_ps_replica to be 10 and the min slice size to be extremely small,
# so as to ensure that there'll be 10 partititions produced.
config = run_config.RunConfig(tf_random_seed=1)
config._num_ps_replicas = 10
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
dnn_feature_columns=feature_columns,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad',
config=config,
input_layer_min_slice_size=1)
# Ensure the param is passed in.
self.assertTrue(callable(classifier.params['input_layer_partitioner']))
# Ensure the partition count is 10.
classifier.fit(input_fn=_input_fn_float_label, steps=50)
partition_count = 0
for name in classifier.get_variable_names():
if 'language_embedding' in name and 'Adagrad' in name:
partition_count += 1
self.assertEqual(10, partition_count)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_feature = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = test_data.prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i):
array_ops.reshape(
constant_op.constant(
iris.data[:, i], dtype=dtypes.float32), [-1, 1])
})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
dense_shape=[len(iris.target), 2])
labels = array_ops.reshape(
constant_op.constant(
iris.target, dtype=dtypes.int32), [-1, 1])
return features, labels
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [
feature_column.real_valued_column(str(i)) for i in range(4)
]
linear_features = [
feature_column.bucketized_column(cont_features[i],
test_data.get_quantile_based_buckets(
iris.data[:, i], 10))
for i in range(4)
]
linear_features.append(
feature_column.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
embedding_features = [
feature_column.embedding_column(
sparse_features[0], dimension=1)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
config=config)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
_assert_metrics_in_range(('accuracy', 'auc'), scores)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = base.load_iris()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
labels = constant_op.constant([[1], [0], [0], [0]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(0.562, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(1.06, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x).
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=adagrad.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = test_data.prepare_iris_data_for_logistic_regression()
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
bucketized_features = [
feature_column.bucketized_column(
cont_features[0],
test_data.get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = training_util.get_global_step()
learning_rate = learning_rate_decay.exponential_decay(
learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return adagrad.AdagradOptimizer(learning_rate=learning_rate)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=test_data.iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=100)
_assert_metrics_in_range(('accuracy',), scores)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32)}
return features, labels
def _input_fn_predict():
y = input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32), num_epochs=1)
features = {'x': y}
return features
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict_classes(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
labels = math_ops.to_float(labels)
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
# Test the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testVariableQuery(self):
"""Tests get_variable_names and get_variable_value."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testExport(self):
"""Tests export model for servo."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[
feature_column.real_valued_column('age'),
language,
],
dnn_feature_columns=[
feature_column.embedding_column(
language, dimension=1),
],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
classifier.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=True)
classifier.fit(input_fn=_input_fn_train, steps=1000)
self.assertIn('binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value(
'binary_logistic_head/centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testGlobalStepLinearOnly(self):
"""Tests global step update for linear-only model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNOnly(self):
"""Tests global step update for dnn-only model."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testGlobalStepDNNLinearCombinedBug(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=False)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
# Expected is 100, but because of the global step increment bug, this is 51.
self.assertEqual(51, step_counter.steps)
def testGlobalStepDNNLinearCombinedBugFixed(self):
"""Tests global step update for dnn-linear combined model."""
def input_fn():
return {
'age': constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 10)
age = feature_column.real_valued_column('age')
step_counter = _StepCounterHook()
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language],
dnn_feature_columns=[
feature_column.embedding_column(language, dimension=1)],
dnn_hidden_units=[3, 3],
fix_global_step_increment_bug=True)
classifier.fit(input_fn=input_fn, steps=100, monitors=[step_counter])
self.assertEqual(100, step_counter.steps)
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 100)
age = feature_column.real_valued_column('age')
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/age/weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/age/weight')))
self.assertEquals(
100, len(classifier.get_variable_value('linear/language/weights')))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column.sparse_column_with_hash_bucket('language', 99)
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
variable_names = classifier.get_variable_names()
self.assertNotIn('dnn/logits/biases', variable_names)
self.assertNotIn('dnn/logits/weights', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/language/weights', variable_names)
self.assertEquals(
1, len(classifier.get_variable_value('linear/bias_weight')))
self.assertEquals(
99, len(classifier.get_variable_value('linear/language/weights')))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
self.assertNotIn('linear/bias_weight', variable_names)
self.assertNotIn('linear/feature_BUCKETIZED/weight', variable_names)
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
labels = constant_op.constant([[1], [1], [1], [0]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedClassifier(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
variable_names = classifier.get_variable_names()
self.assertIn('dnn/hiddenlayer_0/weights', variable_names)
self.assertIn('dnn/hiddenlayer_0/biases', variable_names)
self.assertIn('dnn/hiddenlayer_1/weights', variable_names)
self.assertIn('dnn/hiddenlayer_1/biases', variable_names)
self.assertIn('dnn/logits/weights', variable_names)
self.assertIn('dnn/logits/biases', variable_names)
class DNNLinearCombinedRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
exp = experiment.Experiment(
estimator=dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3]),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, dnn_linear_combined.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [feature_column.real_valued_column('feature', dimension=4)]
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_logistic_fn, steps=10)
scores = regressor.evaluate(
input_fn=test_data.iris_input_logistic_fn, steps=1)
self.assertIn('loss', scores.keys())
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
classifier = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=10)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
regressor.predict_scores(input_fn=_input_fn, as_iterable=False)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor.predict_scores(input_fn=predict_input_fn, as_iterable=True)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': metric_ops.streaming_mean_squared_error,
('my_metric', 'scores'): _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case that the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testCustomMetricsWithMetricSpec(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=5)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict_scores(
input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
def testExport(self):
"""Tests export model for servo."""
labels = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=10)
export_dir = tempfile.mkdtemp()
input_feature_key = 'examples'
def serving_input_fn():
features, targets = _input_fn()
features[input_feature_key] = array_ops.placeholder(dtypes.string)
return features, targets
regressor.export(
export_dir,
serving_input_fn,
input_feature_key,
use_deprecated_input_fn=False)
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {
'x':
input_lib.limit_epochs(
constant_op.constant([[100.], [3.], [2.], [2.]]),
num_epochs=num_epochs)
}
return features, labels
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_regressor = lambda: dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=run_config.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
regressor = new_regressor()
regressor.fit(input_fn=_input_fn, steps=10)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor = new_regressor()
predictions2 = list(regressor.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column, feature_column.real_valued_column('age')
],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([1., 0., 0.2], dtype=dtypes.float32)
language_column = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = dnn_linear_combined.DNNLinearCombinedRegressor(
dnn_feature_columns=[
feature_column.embedding_column(
language_column, dimension=1),
feature_column.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertIn('loss', scores.keys())
class FeatureEngineeringFunctionTest(test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
labels = constant_op.constant([[100.], [3.], [2.], [2.]])
features = {'x': constant_op.constant([[100.], [3.], [2.], [2.]])}
return features, labels
def feature_engineering_fn(features, labels):
_, _ = features, labels
labels = constant_op.constant([[1000.], [30.], [20.], [20.]])
features = {'x': constant_op.constant([[1000.], [30.], [20.], [20.]])}
return features, labels
estimator_with_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = dnn_linear_combined.DNNLinearCombinedRegressor(
linear_feature_columns=[feature_column.real_valued_column('x')],
dnn_feature_columns=[feature_column.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=run_config.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=10.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict_scores(
input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
joshloyal/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
73
|
1854
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
santosjorge/cufflinks
|
cufflinks/datagen.py
|
1
|
10514
|
import os
import string
import numpy as np
import pandas as pd
from .auth import get_config_file
from .exceptions import CufflinksError
def scattergeo():
"""
Returns
"""
path=os.path.join(os.path.dirname(__file__), '../data/scattergeo.csv')
df=pd.read_csv(path)
del df['Unnamed: 0']
df['text'] = df['airport'] + ' ' + df['city'] + ', ' + df['state'] + ' ' + 'Arrivals: ' + df['cnt'].astype(str)
df=df.rename(columns={'cnt':'z','long':'lon'})
return df
def choropleth():
"""
Returns
"""
path=os.path.join(os.path.dirname(__file__), '../data/choropleth.csv')
df=pd.read_csv(path)
del df['Unnamed: 0']
df['z']=[np.random.randint(0,100) for _ in range(len(df))]
return df
def scatter3d(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a scatter3d plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each trace
prefix : string
Name for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'z':np.random.randn(n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def bubble3d(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble3d plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each trace
prefix : string
Name for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'z':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def bubble(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a bubble plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'size':np.random.randint(1,100,n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def pie(n_labels=5,mode=None):
"""
Returns a DataFrame with the required format for
a pie plot
Parameters:
-----------
n_labels : int
Number of labels
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
return pd.DataFrame({'values':np.random.randint(1,100,n_labels),
'labels':getName(n_labels,mode=mode)})
def scatter(n_categories=5,n=10,prefix='category',mode=None):
"""
Returns a DataFrame with the required format for
a scatter plot
Parameters:
-----------
n_categories : int
Number of categories
n : int
Number of points for each category
prefix : string
Name for each category
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
for i in range(n_categories):
categories.extend([prefix+str(i+1)]*n)
return pd.DataFrame({'x':np.random.randn(n*n_categories),
'y':np.random.randn(n*n_categories),
'text':getName(n*n_categories,mode=mode),
'categories':categories})
def heatmap(n_x=5,n_y=10):
"""
Returns a DataFrame with the required format for
a heatmap plot
Parameters:
-----------
n_x : int
Number of x categories
n_y : int
Number of y categories
"""
x=['x_'+str(_) for _ in range(n_x)]
y=['y_'+str(_) for _ in range(n_y)]
return pd.DataFrame(surface(n_x-1,n_y-1).values,index=x,columns=y)
def lines(n_traces=5,n=100,columns=None,dateIndex=True,mode=None):
"""
Returns a DataFrame with the required format for
a scatter (lines) plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
columns : [str]
List of column names
dateIndex : bool
If True it will return a datetime index
if False it will return a enumerated index
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
index=pd.date_range('1/1/15',periods=n) if dateIndex else list(range(n))
df=pd.DataFrame(np.random.randn(n,n_traces),index=index,
columns=getName(n_traces,columns=columns,mode=mode))
return df.cumsum()
def bars(n=3,n_categories=3,prefix='category',columns=None,mode='abc'):
"""
Returns a DataFrame with the required format for
a bar plot
Parameters:
-----------
n : int
Number of points for each trace
n_categories : int
Number of categories for each point
prefix : string
Name for each category
columns : [str]
List of column names
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
categories=[]
if not columns:
columns=getName(n,mode=mode)
for i in range(n_categories):
categories.extend([prefix+str(i+1)])
data=dict([(x,np.random.randint(1,100,n_categories)) for x in columns])
return pd.DataFrame(data,index=categories)
def ohlc(n=100):
"""
Returns a DataFrame with the required format for
a candlestick or ohlc plot
df[['open','high','low','close']]
Parameters:
-----------
n : int
Number of ohlc points
"""
index=pd.date_range('1/1/15',periods=n*288,freq='5min',tz='utc')
data=np.random.randn(n*288)
data[0]=np.array([100])
df=pd.DataFrame(data,index=index,
columns=['a'])
df=df.cumsum()
df=df.resample('1d').ohlc()
df.index=df.index.date
df.index=pd.to_datetime(df.index)
return df['a']
def ohlcv(n=100):
"""
Returns a DataFrame with the required format for
a candlestick or ohlc plot
df[['open','high','low','close','volume']
Parameters:
-----------
n : int
Number of ohlc points
"""
df=ohlc(n=n)
df['volume']=[np.random.randint(1000,10000) for _ in range(len(df))]
return df
def box(n_traces=5,n=100,mode=None):
"""
Returns a DataFrame with the required format for
a box plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
df=pd.DataFrame([np.random.chisquare(np.random.randint(2,10),n_traces) for _ in range(n)],
columns=getName(n_traces,mode=mode))
return df
def histogram(n_traces=1,n=500,dispersion=2,mode=None):
"""
Returns a DataFrame with the required format for
a histogram plot
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
df=pd.DataFrame(np.transpose([np.random.randn(n)+np.random.randint(-1*dispersion,dispersion) for _ in range(n_traces)]),
columns=getName(n_traces,mode=mode))
return df
def distplot(n_traces=1,n=500,dispersion=3,mode=None):
"""
Returns a DataFrame with the required format for
a distribution plot (distplot)
Parameters:
-----------
n_traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
'abc' for alphabet columns
'stocks' for random stock names
"""
return histogram(n_traces,n,dispersion,mode)
def violin(n=500,dispersion=3,categories=True,n_categories=5):
"""
Returns a DataFrame with the required format for
a distribution plot (distplot)
Parameters:
-----------
n : int
Number of points
categories : bool or int
If True, then a column with categories is added
n_categories : int
Number of categories
"""
df = histogram(1,n,dispersion,'abc')
df=df.rename(columns={'a':'data'})
if categories:
df['categories']=['category_{0}'.format(np.random.randint(n_categories)) for _ in range(n)]
return df
def surface(n_x=20,n_y=20):
"""
Returns a DataFrame with the required format for
a surface plot
Parameters:
-----------
n_x : int
Number of points along the X axis
n_y : int
Number of points along the Y axis
"""
x=[float(np.random.randint(0,100))]
for i in range(n_x):
x.append(x[:1][0]+np.random.randn()*np.random.randint(1,10))
df=pd.DataFrame(x)
for i in range(n_y):
df[i+1]=df[i].map(lambda x:x+np.random.randn()*np.random.randint(1,10))
return df
def sinwave(n=4,inc=.25):
"""
Returns a DataFrame with the required format for
a surface (sine wave) plot
Parameters:
-----------
n : int
Ranges for X and Y axis (-n,n)
n_y : int
Size of increment along the axis
"""
x=np.arange(-n,n,inc)
y=np.arange(-n,n,inc)
X,Y=np.meshgrid(x,y)
R = np.sqrt(X**2 + Y**2)
Z = np.sin(R)/(.5*R)
return pd.DataFrame(Z,index=x,columns=y)
def getName(n=1,name=3,exchange=2,columns=None,mode='abc'):
if columns:
if isinstance(columns,str):
columns=[columns]
if n != len(columns):
raise CufflinksError("Length of column names needs to be the \n"
"same length of traces")
else:
if mode is None:
mode=get_config_file()['datagen_mode']
if mode=='abc':
def get_abc(n):
def _w(n,base=2):
_n=1
st=[]
while base**_n<=n:
_n+=1
for _ in range(_n-1,0,-1):
n_st=n//(base**_)
st.append(n_st)
n=n-n_st*(base**_)
st.append(n+1)
return st
st=_w(n,len(string.ascii_lowercase))
_st=''
for _ in st:
_st+=string.ascii_lowercase[_-1]
return _st
columns=[get_abc(_) for _ in range(n)]
elif mode=='stocks':
columns=[''.join(np.random.choice(list(string.ascii_uppercase),name)) + '.' + ''.join(np.random.choice(list(string.ascii_uppercase),exchange)) for _ in range(n)]
else:
raise CufflinksError("Unknown mode: {0}".format(mode))
return columns
|
mit
|
edublancas/sklearn-evaluation
|
tests/nb/test_NotebookIntrospector.py
|
1
|
1563
|
from sklearn_evaluation import NotebookIntrospector
from IPython.display import HTML, Image
def test_simple_notebook(tmp_directory, nb_literals):
intr = NotebookIntrospector('nb_literals.ipynb')
assert set(intr) == {'int', 'list', 'dict'}
assert intr['int'] == 1
assert intr['list'] == [1, 2, 3]
assert intr['dict'] == {'x': 1, 'y': 2}
assert dict(intr) == {
'int': 1,
'list': [1, 2, 3],
'dict': {
'x': 1,
'y': 2
}
}
def test_notebook_with_plot(tmp_directory, nb_plot):
intr = NotebookIntrospector('nb_plot.ipynb')
img = intr['a']
assert isinstance(img, Image)
assert dict(intr) == {'b': 42, 'a': img}
def test_notebook_with_table(tmp_directory, nb_table):
intr = NotebookIntrospector('nb_table.ipynb')
html = intr['a']
assert isinstance(html, HTML)
assert dict(intr) == {'b': 42, 'a': html}
def test_notebook_with_no_output(tmp_directory, nb_no_output):
intr = NotebookIntrospector('nb_no_output.ipynb')
assert dict(intr) == dict()
def test_notebook_with_invalid_output(tmp_directory, nb_invalid_output):
intr = NotebookIntrospector(nb_invalid_output, literal_eval=True)
assert intr['numpy_array'] == 'array([1, 2, 3])'
def test_json_serializable(tmp_directory, nb_plot):
d = NotebookIntrospector('nb_plot.ipynb').to_json_serializable()
# must return the plain text representation of the plot (i.e. a str with
# "<Figure size YxZ with 1 Axes>")
assert 'Figure size' in d['a']
assert d['b'] == 42
|
mit
|
fredhusser/scikit-learn
|
examples/linear_model/plot_ransac.py
|
250
|
1673
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
|
bsd-3-clause
|
lbeltrame/bcbio-nextgen
|
bcbio/pipeline/qcsummary.py
|
1
|
17471
|
"""Quality control and summary metrics for next-gen alignments and analysis.
"""
import collections
import copy
import csv
import os
import yaml
from datetime import datetime
import pandas as pd
import glob
import toolz as tz
from bcbio import bam
from bcbio import utils
from bcbio.cwl import cwlutils
from bcbio.log import logger
from bcbio.pipeline import config_utils, run_info
import bcbio.pipeline.datadict as dd
from bcbio.provenance import do
from bcbio.rnaseq import gtf
from bcbio.variation import damage, peddy, vcfutils, vcfanno
import six
# ## High level functions to generate summary
def qc_to_rec(samples):
"""CWL: Convert a set of input samples into records for parallelization.
"""
samples = [utils.to_single_data(x) for x in samples]
samples = cwlutils.assign_complex_to_samples(samples)
to_analyze, extras = _split_samples_by_qc(samples)
recs = cwlutils.samples_to_records([utils.to_single_data(x) for x in to_analyze + extras])
return [[x] for x in recs]
def generate_parallel(samples, run_parallel):
"""Provide parallel preparation of summary information for alignment and variant calling.
"""
to_analyze, extras = _split_samples_by_qc(samples)
qced = run_parallel("pipeline_summary", to_analyze)
samples = _combine_qc_samples(qced) + extras
qsign_info = run_parallel("qsignature_summary", [samples])
metadata_file = _merge_metadata([samples])
summary_file = write_project_summary(samples, qsign_info)
out = []
for data in samples:
if "summary" not in data[0]:
data[0]["summary"] = {}
data[0]["summary"]["project"] = summary_file
data[0]["summary"]["metadata"] = metadata_file
if qsign_info:
data[0]["summary"]["mixup_check"] = qsign_info[0]["out_dir"]
out.append(data)
out = _add_researcher_summary(out, summary_file)
# MultiQC must be run after all file outputs are set:
return [[utils.to_single_data(d)] for d in run_parallel("multiqc_summary", [out])]
def pipeline_summary(data):
"""Provide summary information on processing sample.
Handles standard and CWL (single QC output) cases.
"""
data = utils.to_single_data(data)
if data["analysis"].startswith("wgbs-seq"):
bismark_bam = dd.get_align_bam(data)
sorted_bam = bam.sort(bismark_bam, data["config"])
data = dd.set_align_bam(data, sorted_bam)
data = dd.set_work_bam(data, bismark_bam)
work_bam = dd.get_align_bam(data) or dd.get_work_bam(data)
if not work_bam or not work_bam.endswith(".bam"):
work_bam = None
if dd.get_ref_file(data):
if work_bam or (tz.get_in(["config", "algorithm", "kraken"], data)): # kraken doesn't need bam
logger.info("QC: %s %s" % (dd.get_sample_name(data), ", ".join(dd.get_algorithm_qc(data))))
work_data = cwlutils.unpack_tarballs(utils.deepish_copy(data), data)
data["summary"] = _run_qc_tools(work_bam, work_data)
if (len(dd.get_algorithm_qc(data)) == 1 and "output_cwl_keys" in data):
data["summary"]["qc"] = data["summary"]["qc"].get(dd.get_algorithm_qc(data)[0])
return [[data]]
def get_qc_tools(data):
"""Retrieve a list of QC tools to use based on configuration and analysis type.
Uses defaults if previously set.
"""
if dd.get_algorithm_qc(data):
return dd.get_algorithm_qc(data)
analysis = data["analysis"].lower()
to_run = []
if tz.get_in(["config", "algorithm", "kraken"], data):
to_run.append("kraken")
if "fastqc" not in dd.get_tools_off(data):
to_run.append("fastqc")
if any([tool in dd.get_tools_on(data)
for tool in ["qualimap", "qualimap_full"]]):
to_run.append("qualimap")
if analysis.startswith("rna-seq") or analysis == "smallrna-seq":
if "qualimap" not in dd.get_tools_off(data):
if gtf.is_qualimap_compatible(dd.get_gtf_file(data)):
to_run.append("qualimap_rnaseq")
else:
logger.debug("GTF not compatible with Qualimap, skipping.")
if analysis.startswith("chip-seq"):
to_run.append("chipqc")
if dd.get_chip_method(data) == "atac":
to_run.append("ataqv")
if analysis.startswith("smallrna-seq"):
to_run.append("small-rna")
to_run.append("atropos")
if "coverage_qc" not in dd.get_tools_off(data):
to_run.append("samtools")
if dd.has_variantcalls(data):
if "coverage_qc" not in dd.get_tools_off(data):
to_run += ["coverage", "picard"]
to_run += ["qsignature", "variants"]
if vcfanno.is_human(data):
to_run += ["peddy"]
if "contamination" not in dd.get_tools_off(data):
to_run += ["contamination"]
if vcfutils.get_paired_phenotype(data):
if "viral" not in dd.get_tools_off(data):
to_run += ["viral"]
if damage.should_filter([data]):
to_run += ["damage"]
if dd.get_umi_consensus(data):
to_run += ["umi"]
if tz.get_in(["config", "algorithm", "preseq"], data):
to_run.append("preseq")
to_run = [tool for tool in to_run if tool not in dd.get_tools_off(data)]
to_run.sort()
return to_run
def _run_qc_tools(bam_file, data):
"""Run a set of third party quality control tools, returning QC directory and metrics.
:param bam_file: alignments in bam format
:param data: dict with all configuration information
:returns: dict with output of different tools
"""
from bcbio.qc import (atropos, contamination, coverage, damage, fastqc, kraken,
qsignature, qualimap, samtools, picard, srna, umi, variant,
viral, preseq, chipseq, atac)
tools = {"fastqc": fastqc.run,
"atropos": atropos.run,
"small-rna": srna.run,
"samtools": samtools.run,
"qualimap": qualimap.run,
"qualimap_rnaseq": qualimap.run_rnaseq,
"qsignature": qsignature.run,
"contamination": contamination.run,
"coverage": coverage.run,
"damage": damage.run,
"variants": variant.run,
"peddy": peddy.run_qc,
"kraken": kraken.run,
"picard": picard.run,
"umi": umi.run,
"viral": viral.run,
"preseq": preseq.run,
"chipqc": chipseq.run,
"ataqv": atac.run
}
qc_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "qc", data["description"]))
metrics = {}
qc_out = utils.deepish_copy(dd.get_summary_qc(data))
for program_name in dd.get_algorithm_qc(data):
if not bam_file and program_name != "kraken": # kraken doesn't need bam
continue
if dd.get_phenotype(data) == "germline" and program_name != "variants":
continue
qc_fn = tools[program_name]
cur_qc_dir = os.path.join(qc_dir, program_name)
out = qc_fn(bam_file, data, cur_qc_dir)
qc_files = None
if out and isinstance(out, dict):
# Check for metrics output, two cases:
# 1. output with {"metrics"} and files ("base")
if "metrics" in out:
metrics.update(out.pop("metrics"))
# 2. a dictionary of metrics
elif "base" not in out:
metrics.update(out)
# Check for files only output
if "base" in out:
qc_files = out
elif out and isinstance(out, six.string_types) and os.path.exists(out):
qc_files = {"base": out, "secondary": []}
if not qc_files:
qc_files = _organize_qc_files(program_name, cur_qc_dir)
if qc_files:
qc_out[program_name] = qc_files
metrics["Name"] = dd.get_sample_name(data)
metrics["Quality format"] = dd.get_quality_format(data).lower()
return {"qc": qc_out, "metrics": metrics}
def _organize_qc_files(program, qc_dir):
"""Organize outputs from quality control runs into a base file and secondary outputs.
Provides compatibility with CWL output. Returns None if no files created during processing.
"""
base_files = {"fastqc": "fastqc_report.html",
"qualimap_rnaseq": "qualimapReport.html",
"qualimap": "qualimapReport.html"}
if os.path.exists(qc_dir):
out_files = []
for fname in [os.path.join(qc_dir, x) for x in os.listdir(qc_dir)]:
if os.path.isfile(fname) and not fname.endswith(".bcbiotmp"):
out_files.append(fname)
elif os.path.isdir(fname) and not fname.endswith("tx"):
for root, dirs, files in os.walk(fname):
for f in files:
if not f.endswith(".bcbiotmp"):
out_files.append(os.path.join(root, f))
if len(out_files) > 0 and all([not f.endswith("-failed.log") for f in out_files]):
if len(out_files) == 1:
base = out_files[0]
secondary = []
else:
base = None
if program in base_files:
base_choices = [x for x in out_files if x.endswith("/%s" % base_files[program])]
if len(base_choices) == 1:
base = base_choices[0]
if not base:
base = out_files[0]
secondary = [x for x in out_files if x != base]
return {"base": base, "secondary": secondary}
# ## Allow parallelization for separate QC runs
def _split_samples_by_qc(samples):
"""Split data into individual quality control steps for a run.
"""
to_process = []
extras = []
for data in [utils.to_single_data(x) for x in samples]:
qcs = dd.get_algorithm_qc(data)
# kraken doesn't need bam
if qcs and (dd.get_align_bam(data) or dd.get_work_bam(data) or
tz.get_in(["config", "algorithm", "kraken"], data)):
for qc in qcs:
add = copy.deepcopy(data)
add["config"]["algorithm"]["qc"] = [qc]
to_process.append([add])
else:
extras.append([data])
return to_process, extras
def _combine_qc_samples(samples):
"""Combine split QC analyses into single samples based on BAM files.
"""
by_bam = collections.defaultdict(list)
for data in [utils.to_single_data(x) for x in samples]:
batch = dd.get_batch(data) or dd.get_sample_name(data)
if not isinstance(batch, (list, tuple)):
batch = [batch]
batch = tuple(batch)
by_bam[(dd.get_align_bam(data) or dd.get_work_bam(data), batch)].append(data)
out = []
for data_group in by_bam.values():
data = data_group[0]
alg_qc = []
qc = {}
metrics = {}
for d in data_group:
qc.update(dd.get_summary_qc(d))
metrics.update(dd.get_summary_metrics(d))
alg_qc.extend(dd.get_algorithm_qc(d))
data["config"]["algorithm"]["qc"] = alg_qc
data["summary"]["qc"] = qc
data["summary"]["metrics"] = metrics
out.append([data])
return out
# ## Generate project level QC summary for quickly assessing large projects
def write_project_summary(samples, qsign_info=None):
"""Write project summary information on the provided samples.
write out dirs, genome resources,
"""
work_dir = samples[0][0]["dirs"]["work"]
out_file = os.path.join(work_dir, "project-summary.yaml")
upload_dir = (os.path.join(work_dir, samples[0][0]["upload"]["dir"])
if "dir" in samples[0][0]["upload"] else "")
date = str(datetime.now())
prev_samples = _other_pipeline_samples(out_file, samples)
with open(out_file, "w") as out_handle:
yaml.safe_dump({"date": date}, out_handle,
default_flow_style=False, allow_unicode=False)
if qsign_info:
qsign_out = utils.deepish_copy(qsign_info[0])
qsign_out.pop("out_dir", None)
yaml.safe_dump({"qsignature": qsign_out}, out_handle, default_flow_style=False,
allow_unicode=False)
yaml.safe_dump({"upload": upload_dir}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"bcbio_system": samples[0][0]["config"].get("bcbio_system", "")}, out_handle,
default_flow_style=False, allow_unicode=False)
yaml.safe_dump({"samples": prev_samples + [_save_fields(sample[0]) for sample in samples]}, out_handle,
default_flow_style=False, allow_unicode=False)
return out_file
def _merge_metadata(samples):
"""Merge all metadata into CSV file"""
samples = list(utils.flatten(samples))
out_dir = dd.get_work_dir(samples[0])
logger.info("summarize metadata")
out_file = os.path.join(out_dir, "metadata.csv")
sample_metrics = collections.defaultdict(dict)
for s in samples:
m = tz.get_in(['metadata'], s)
if isinstance(m, six.string_types):
m = json.loads(m)
if m:
for me in list(m.keys()):
if isinstance(m[me], list) or isinstance(m[me], dict) or isinstance(m[me], tuple):
m.pop(me, None)
sample_metrics[dd.get_sample_name(s)].update(m)
pd.DataFrame(sample_metrics).transpose().to_csv(out_file)
return out_file
def _other_pipeline_samples(summary_file, cur_samples):
"""Retrieve samples produced previously by another pipeline in the summary output.
"""
cur_descriptions = set([s[0]["description"] for s in cur_samples])
out = []
if utils.file_exists(summary_file):
with open(summary_file) as in_handle:
for s in yaml.safe_load(in_handle).get("samples", []):
if s["description"] not in cur_descriptions:
out.append(s)
return out
def _save_fields(sample):
to_save = ["dirs", "genome_resources", "genome_build", "sam_ref", "metadata",
"description"]
saved = {k: sample[k] for k in to_save if k in sample}
if "summary" in sample and "metrics" in sample["summary"]:
saved["summary"] = {"metrics": sample["summary"]["metrics"]}
return saved
# ## Generate researcher specific summaries
def _add_researcher_summary(samples, summary_yaml):
"""Generate summary files per researcher if organized via a LIMS.
"""
by_researcher = collections.defaultdict(list)
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
by_researcher[researcher].append(data["description"])
out_by_researcher = {}
for researcher, descrs in by_researcher.items():
out_by_researcher[researcher] = _summary_csv_by_researcher(summary_yaml, researcher,
set(descrs), samples[0][0])
out = []
for data in (x[0] for x in samples):
researcher = utils.get_in(data, ("upload", "researcher"))
if researcher:
data["summary"]["researcher"] = out_by_researcher[researcher]
out.append([data])
return out
def _summary_csv_by_researcher(summary_yaml, researcher, descrs, data):
"""Generate a CSV file with summary information for a researcher on this project.
"""
out_file = os.path.join(utils.safe_makedir(os.path.join(data["dirs"]["work"], "researcher")),
"%s-summary.tsv" % run_info.clean_name(researcher))
metrics = ["Total_reads", "Mapped_reads", "Mapped_reads_pct", "Duplicates", "Duplicates_pct"]
with open(summary_yaml) as in_handle:
with open(out_file, "w") as out_handle:
writer = csv.writer(out_handle, dialect="excel-tab")
writer.writerow(["Name"] + metrics)
for sample in yaml.safe_load(in_handle)["samples"]:
if sample["description"] in descrs:
row = [sample["description"]] + [utils.get_in(sample, ("summary", "metrics", x), "")
for x in metrics]
writer.writerow(row)
return out_file
# ## Galaxy functionality
def prep_pdf(qc_dir, config):
"""Create PDF from HTML summary outputs in QC directory.
Requires wkhtmltopdf installed: http://www.msweet.org/projects.php?Z1
Thanks to: https://www.biostars.org/p/16991/
Works around issues with CSS conversion on CentOS by adjusting CSS.
"""
html_file = os.path.join(qc_dir, "fastqc", "fastqc_report.html")
html_fixed = "%s-fixed%s" % os.path.splitext(html_file)
try:
topdf = config_utils.get_program("wkhtmltopdf", config)
except config_utils.CmdNotFound:
topdf = None
if topdf and utils.file_exists(html_file):
out_file = "%s.pdf" % os.path.splitext(html_file)[0]
if not utils.file_exists(out_file):
cmd = ("sed 's/div.summary/div.summary-no/' %s | sed 's/div.main/div.main-no/' > %s"
% (html_file, html_fixed))
do.run(cmd, "Fix fastqc CSS to be compatible with wkhtmltopdf")
cmd = [topdf, html_fixed, out_file]
do.run(cmd, "Convert QC HTML to PDF")
return out_file
|
mit
|
cayetanobv/daynight2geojson
|
daynight2geojson/daynight2geojson.py
|
2
|
3599
|
# -*- coding: utf-8 -*-
#
# Author: Cayetano Benavent, 2015.
# https://github.com/GeographicaGS/daynight2geojson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
import json
import geojson
import shapely.geometry
import shapely.wkt
# To avoid errors if you run module without X11
import matplotlib
matplotlib.use('Agg')
from mpl_toolkits.basemap import Basemap
from datetime import datetime
class DayNight2Geojson(object):
"""
Get day and night geometry and dumps
to a GeoJSON file
"""
def __init__(self, filepath, input_date=None):
"""
filepath: destiny file to store output
GeoJSON with day-night geometry
input_date = None is for UTC now date
For others input date: datetime object must be passed
datetime(year, month, day, hour, minute)
"""
self.filepath = filepath
self.input_date = input_date
def getDayNight(self):
"""
Get day and night geometry an dumps
to a GeoJSON file
Default projection: Equirectangular (Cylindrical Equidistant)
Default date to compute: now (UTC)
"""
if self.input_date == None:
date = datetime.utcnow()
map_date = date.strftime("%d %b %Y %H:%M:%S")
else:
date = self.input_date
map_date = date.strftime("%d %b %Y %H:%M:%S")
map = Basemap(projection='cyl',lon_0=0, ellps='WGS84', resolution=None)
contour_set = map.nightshade(date)
self.__buildGeojson(contour_set, map_date)
print 'Day/Night Map for %s (UTC)' % (map_date)
def __buildGeojson(self, contour_set, map_date):
"""
Build GeoJSON with a input geometry
"""
n_coll = len(contour_set.collections)
for cs_coll in range(n_coll):
if len(contour_set.collections[cs_coll].get_paths()) > 0:
cs_paths = contour_set.collections[cs_coll].get_paths()[0]
vert = cs_paths.vertices
lon = vert[:,0]
lat = vert[:,1]
if len(lon) > 2:
coord_list = [(coord[0], coord[1]) for coord in zip(lon,lat)]
geom = shapely.geometry.polygon.Polygon(coord_list)
geom_wkt = shapely.wkt.loads(str(geom))
geom_geojson = geojson.Feature(geometry=geom_wkt, properties={'Date': map_date})
self.__writeGeojsonFile(geom_geojson)
def __writeGeojsonFile(self, geojson_str):
"""
Write GeoJSON string to a file.
"""
geojson_file = open(self.filepath, 'w')
geojson_file.write(json.dumps(geojson_str))
geojson_file.close()
|
gpl-2.0
|
shahankhatch/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
218
|
3893
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
khrapovs/famamcbeth
|
examples/usage_example.py
|
1
|
2904
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Usage examples
"""
from __future__ import print_function, division
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pylab as plt
import seaborn as sns
from sas7bdat import SAS7BDAT
from famamcbeth import FamaMcBeth
def import_data():
parse = lambda x: dt.datetime.strptime(x, '%Y%m')
date_name = 'date'
factor_names = ['VWMe', 'SMB', 'HML']
rf_name = 'RF'
data = pd.read_csv('../data/FamaFrench.csv', index_col=date_name,
parse_dates=date_name, date_parser=parse)
riskfree = data[[rf_name]].values
factors = data[factor_names].values
# Augment factors with the constant
factors = np.hstack((np.ones_like(riskfree), factors))
portfolios = data[data.columns - factor_names - [rf_name]].values
excess_ret = portfolios - riskfree
return factors, excess_ret
def import_sas_data():
df = SAS7BDAT('../data/readytogmm.sas7bdat').to_data_frame().dropna()
excess_ret = df[df.columns[-25:]].values
factors = df[['MKTRF', 'SMB', 'HML', 'UMD']].values
riskfree = df[['RF']]
factors = np.hstack((np.ones_like(riskfree), factors))
return factors, excess_ret
def test_default():
# factors, excess_ret = import_data()
factors, excess_ret = import_sas_data()
model = FamaMcBeth(factors, excess_ret)
(param, gamma_rsq, gamma_rmse, theta_rsq, theta_rmse) \
= model.two_step_ols()
alpha, beta, gamma = model.convert_theta_to2d(param)
kernel = 'Bartlett'
band = 3
jstat, jpval = model.jtest(param, kernel=kernel, band=band)
tstat = model.alpha_beta_gamma_tstat(param, kernel=kernel, band=band)
alpha_tstat, beta_tstat, gamma_tstat = tstat
print('OLS results:')
print(gamma)
print(gamma_tstat)
print('J-stat = %.2f, p-value = %.2f\n' % (jstat, jpval))
method = 'L-BFGS-B'
# method = 'Nelder-Mead'
# method = 'basin'
res = model.gmmest(param, kernel=kernel, band=band, method=method)
param_final = model.convert_theta_to2d(res.theta)
alpha_final, beta_final, gamma_final = param_final
tstat_final = model.convert_theta_to2d(res.tstat)
alpha_tstat, beta_tstat, gamma_tstat = tstat_final
print('GMM results:')
print(gamma_final)
print(gamma_tstat)
jstat, jpval = model.jtest(res.theta, kernel=kernel, band=band)
print('J-stat = %.2f, p-value = %.2f' % (jstat, jpval))
ret_realized = model.get_realized_ret()
ret_predicted = model.get_predicted_ret(res.theta)
plt.scatter(ret_realized, ret_predicted)
x = np.linspace(*plt.gca().get_xlim())
plt.gca().plot(x, x)
plt.xlabel('Realized')
plt.ylabel('Predicted')
plt.show()
def try_size_bm_portfolios():
pass
if __name__ == '__main__':
np.set_printoptions(precision=3, suppress=True)
sns.set_context('paper')
test_default()
|
mit
|
lpryszcz/REDiscover
|
plot_violin.py
|
1
|
4061
|
#!/usr/bin/env python
desc="""Generate violin plot from REDiscover.diff output.
TBA:
- half-violin plot for sense/antisense? https://stackoverflow.com/questions/29776114/half-violin-plot
"""
epilog="""Author:
[email protected]
Fribourg, 14/08/2017
"""
import os, sys, gzip
import numpy as np
from itertools import izip
from datetime import datetime
import matplotlib
matplotlib.use('Agg') # Force matplotlib to not use any Xwindows backend
import matplotlib.pyplot as plt
from plot_hist import *
def violin_plot(outfn, snps, fnames, id2snp):
"""Generate violin plot"""
fig, axes = plt.subplots(nrows=1, ncols=12, figsize=(16, .5*len(fnames)+2))
# process all files
pos = range(1, len(fnames)+1)
for ax, _snps, name in izip(axes, snps, id2snp):
# get ax and set title
name = "%s\n%s events"%(name[:-1], max([len(x) for x in _snps]))
ax.set_title(name, fontsize=10)
# violin plot after http://matplotlib.org/examples/statistics/violinplot_demo.html
ax.violinplot(_snps, pos, points=1000, widths=.5, vert=False, showmeans=True, showextrema=True, showmedians=True, bw_method='silverman')
# show labels only of first plot
for i, ax in enumerate(axes.flatten()):
if i:
ax.set_yticklabels([])
else:
ax.set_yticks(pos)
ax.set_yticklabels(fnames)
#fig.suptitle("RNA editing violin plot", fontsize=16)
fig.subplots_adjust(hspace=.2, left=.15, right=.99, bottom=.05)
plt.savefig(outfn, dpi=300, transparent=False)
def main():
import argparse
usage = "%(prog)s [options]"
parser = argparse.ArgumentParser(usage=usage, description=desc, epilog=epilog, \
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-v", "--verbose", default=False, action="store_true", help="verbose")
parser.add_argument('--version', action='version', version='1.15b')
parser.add_argument("-i", "--fnames", nargs="+", help="file(s) to process")
parser.add_argument("-b", "--bins", default=50, type=int,
help="number of bins in histogram [%(default)s]")
parser.add_argument("-e", "--eid", default=0, type=int,
help="element of bam name (after . splitting) [%(default)s]")
parser.add_argument("-s", "--selected", nargs="+", default=[], help="select samples [all]")
parser.add_argument("-s2", "--startswith", default='', help="select samples that start with string [all]")
parser.add_argument("--ext", default="png", choices=['png', 'svg', 'pdf', 'jpg'],
help="figure extension [%(default)s]")
# print help if no parameters
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
o = parser.parse_args()
if o.verbose:
sys.stderr.write("Options: %s\n"%str(o))
# get snp2id
snp2id, id2snp = {}, []
s = "+"
for a in bases:
for b in bases:
if a==b: continue
snp = "%s>%s%s"%(a, b, s)
snprc = "".join(base2rc[b] for b in snp)
snp2id[snp] = len(id2snp)
snp2id[snprc] = len(id2snp)
id2snp.append(snp)
# process input files
for fn in o.fnames:
snps, names = load_snps(fn, snp2id, o.eid)
# report stats
print fn
print "snp\t" + "\t".join(names)
oline = "%s\t"*6
for i, snp in enumerate(id2snp):
lens = map(len, [filter(lambda x: 0.02<x<0.98, _snps) for _snps in snps[i]])
print "%s\t"%snp[:-1] + "\t".join(map(str, lens))
outfn = "%s.violin_plot.%s"%(fn, o.ext)
violin_plot(outfn, snps, names, id2snp)
sys.stderr.write("[INFO] Violin plot saved as: %s\n"%outfn)
if __name__=="__main__":
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\nCtrl-C pressed! \n")
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n" % dt)
|
gpl-2.0
|
AlexandreMoulti/bachelier
|
ESG_template.py
|
1
|
1382
|
# -*- coding: utf-8 -*-
import xlwings as xw
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
import scipy as sc
from bachelier import *
def launch_simulations():
wb = xw.Book.caller()
horizon = wb.sheets['Main'].range('B3').value
nb_steps = wb.sheets['Main'].range('B4').value
nb_sim = wb.sheets['Main'].range('B5').value
out_repo = wb.sheets['Main'].range('B6').value
grid = DiscretisationGrid(nb_sim,horizon,nb_steps)
zc_prices = wb.sheets['market_data'].range('F2:G33').options(pd.Series, index_col=0, numbers= np.float64).value
speed = wb.sheets['model_parameters'].range('B2').value
vol = wb.sheets['model_parameters'].range('B3').value
hw = HW1F(speed, vol, zc_prices)
hw.simulate(grid)
short_rates = hw.get_short_rates_simulations()
short_rates.to_csv(out_repo+'simulations_short_rates.csv')
r1 = hw.get_actuarial_rates_simulations(1)
r1.to_csv(out_repo+'simulation_actuarial_rates_1Y.csv')
r10 = hw.get_actuarial_rates_simulations(10)
r10.to_csv(out_repo+'simulation_actuarial_rates_10Y.csv')
if __name__ == '__main__':
# Expects the Excel file next to this source file, adjust accordingly.
xw.Book('ESG_template.xlsm').set_mock_caller()
launch_simulations()
|
mit
|
phoebe-project/phoebe2-docs
|
2.3/tutorials/passbands.py
|
2
|
13881
|
#!/usr/bin/env python
# coding: utf-8
# # Adding new passbands to PHOEBE
#
# In this tutorial we will show you how to add your own passband to PHOEBE. Adding a passband involves:
#
# * providing a passband transmission function;
# * defining and registering parameters of the passband;
# * computing blackbody response for the passband;
# * \[optional\] computing Castelli & Kurucz (2004) response for the passband;
# * \[optional\] computing Castelli & Kurucz (2004) limb darkening coefficients;
# * \[optional\] computing Castelli & Kurucz (2004) limb darkening integrals;
# * \[optional\] if the passband is one of the passbands included in the Wilson-Devinney code, importing the WD response; and
# * saving the generated passband file.
#
#
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[ ]:
#!pip install -I "phoebe>=2.3,<2.4"
# I don't care about the details, just show/remind me how it's done
# -------------------------------------------------------------------
#
# Makes sense, and we don't judge: you want to get to science. Provided that you have the passband transmission file available and the ck2004 database already downloaded, the sequence that will generate/register a new passband is:
# In[ ]:
import phoebe
from phoebe import u
# Register a passband:
pb = phoebe.atmospheres.passbands.Passband(
ptf='my_passband.ptf', pbset='Custom', pbname='mypb',
effwl=330., wlunits=u.nm, calibrated=True,
reference='A completely made-up passband published in Nowhere (2017)', version=1.0,
comments='This is my first custom passband')
# Blackbody response:
pb.compute_blackbody_response()
# Castelli & Kurucz (2004) response:
pb.compute_ck2004_response(path='ck2004i')
pb.compute_ck2004_intensities(path='ck2004i')
pb.compute_ck2004_ldcoeffs()
pb.compute_ck2004_ldints()
# Wilson-Devinney response:
pb.import_wd_atmcof('atmcofplanck.dat', 'atmcof.dat', 22)
# Save the passband:
pb.save('my_passband.pb')
# Getting started
# -----------------
#
# Let us start by importing phoebe, numpy and matplotlib:
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger(clevel='WARNING')
# Passband transmission function
# -----------------------------------
#
# The passband transmission function is typically a user-provided two-column file. The first column is wavelength, and the second column is passband transmission. For the purposes of this tutorial, we will simulate the passband as a uniform box.
# In[2]:
wl = np.linspace(300, 360, 61)
ptf = np.zeros(len(wl))
ptf[(wl>=320) & (wl<=340)] = 1.0
# Let us plot this simulated passband transmission function to see what it looks like:
# In[3]:
plt.xlabel('Wavelength [nm]')
plt.ylabel('Passband transmission')
plt.plot(wl, ptf, 'b-')
plt.show()
# Let us now save these data in a file that we will use to register a new passband.
# In[4]:
np.savetxt('my_passband.ptf', np.vstack((wl, ptf)).T)
# Registering a passband
# -------------------------
#
# The first step in introducing a new passband into PHOEBE is registering it with the system. We use the [Passband](../api/phoebe.atmospheres.passbands.Passband.md) class for that.
# In[5]:
pb = phoebe.atmospheres.passbands.Passband(
ptf='my_passband.ptf',
pbset='Custom',
pbname='mypb',
effwl=330.,
wlunits=u.nm,
calibrated=True,
reference='A completely made-up passband published in Nowhere (2017)',
version=1.0,
comments='This is my first custom passband')
# The first argument, `ptf`, is the passband transmission file we just created. Of course, you would provide an actual passband transmission function that comes from a respectable source rather than this silly tutorial.
#
# The next two arguments, `pbset` and `pbname`, should be taken in unison. The way PHOEBE refers to passbands is a `pbset`:`pbname` string, for example `Johnson:V`, `Cousins:Rc`, etc. Thus, our fake passband will be `Custom:mypb`.
#
# The following two arguments, `effwl` and `wlunits`, also come as a pair. PHOEBE uses effective wavelength to apply zero-level passband corrections when better options (such as model atmospheres) are unavailable. Effective wavelength is a transmission-weighted average wavelength in the units given by `wlunits`.
#
# The `calibrated` parameter instructs PHOEBE whether to take the transmission function as calibrated, i.e. the flux through the passband is absolutely calibrated. If set to `True`, PHOEBE will assume that absolute intensities computed using the passband transmission function do not need further calibration. If `False`, the intensities are considered as scaled rather than absolute, i.e. correct to a scaling constant. Most modern passbands provided in the recent literature are calibrated.
#
# The `reference` parameter holds a reference string to the literature from which the transmission function was taken from. It is common that updated transmission functions become available, which is the point of the `version` parameter. If there are multiple versions of the transmission function, PHOEBE will by default take the largest value, or the value that is explicitly requested in the filter string, i.e. `Johnson:V:1.0` or `Johnson:V:2.0`.
#
# Finally, the `comments` parameter is a convenience parameter to store any additional pertinent information.
#
# Computing blackbody response
# --------------------------------
#
# To significantly speed up calculations, passband coefficients are stored in lookup tables instead of computing the intensities over and over again on the fly. Computed passband tables are tagged in the `content` property of the class:
# In[6]:
print pb.content
# Since we have not computed any tables yet, the list is empty for now. Blackbody functions for computing the lookup tables are built into PHOEBE and you do not need any auxiliary files to generate them. The lookup tables are defined for effective temperatures between 300K and 500,000K. To compute the blackbody response, issue:
# In[7]:
pb.compute_blackbody_response()
# Checking the `content` property again shows that the table has been successfully computed:
# In[8]:
print pb.content
# We can now test-drive the blackbody lookup table we just created. For this we will use a low-level `Passband` class method that computes normal emergent passband intensity, `Inorm()`. For the sake of simplicity, we will turn off limb darkening by setting `ld_func` to `'linear'` and `ld_coeffs` to `'[0.0]'`:
# In[9]:
print pb.Inorm(Teff=5772, atm='blackbody', ld_func='linear', ld_coeffs=[0.0])
# Let us now plot a range of temperatures, to make sure that normal emergent passband intensities do what they are supposed to do. While at it, let us compare what we get for the Johnson:V passband.
# In[10]:
jV = phoebe.get_passband('Johnson:V')
teffs = np.linspace(5000, 8000, 100)
plt.xlabel('Temperature [K]')
plt.ylabel('Inorm [W/m^2/A]')
plt.plot(teffs, pb.Inorm(teffs, atm='blackbody', ld_func='linear', ld_coeffs=[0.0]), label='mypb')
plt.plot(teffs, jV.Inorm(teffs, atm='blackbody', ld_func='linear', ld_coeffs=[0.0]), label='jV')
plt.legend(loc='lower right')
plt.show()
# This makes perfect sense: Johnson V transmission function is wider than our boxed transmission function, so intensity in the V band is larger the lower temperatures. However, for the hotter temperatures the contribution to the UV flux increases and our box passband with a perfect transmission of 1 takes over.
# Computing Castelli & Kurucz (2004) response
# -------------------------------------------------
#
# For any real science you will want to generate model atmosphere tables. The default choice in PHOEBE are the models computed by Fiorella Castelli and Bob Kurucz ([website](http://wwwuser.oats.inaf.it/castelli/), [paper](https://arxiv.org/abs/astro-ph/0405087)) that feature new opacity distribution functions. In principle, you can generate PHOEBE-compatible tables for *any* model atmospheres, but that would require a bit of book-keeping legwork in the PHOEBE backend. Contact [Andrej Prša](mailto:[email protected]) to discuss an extension to other model atmospheres.
#
# To compute Castelli & Kurucz (2004) tables for the passband of your choice, you will need to download a precomputed database of absolute intensities. This database is *huge*, so beware. You will need approximately 140GB of free space. Once you are sure you have this kind of space available, proceed to download the database tarball (28GB):
#
# ```
# [cd into a parent directory that will hold the database]
# wget http://phoebe-project.org/static/ck2004i.tgz
# tar xzf ck2004i.tgz
# ```
#
# Keep in mind that this will take a long time. Plan to go for lunch or leave it overnight. The good news is that this needs to be done only once.
#
# Once the database is unpacked, you are ready to compute the tables. We start with the ck2004 response table:
# In[11]:
pb.compute_ck2004_response(path='ck2004i', verbose=False)
# Note, of course, that you will need to change the `path` to point to the directory where you unpacked the ck2004 database. The verbosity parameter `verbose` will report on the progress as computation is being done. Depending on your computer speed, this step will take ~10 minutes to complete. We can now check the passband's `content` attribute again:
# In[12]:
print pb.content
# Let us now use the same low-level function as before to compare normal emergent passband intensity for our custom passband for blackbody and ck2004 model atmospheres. One other complication is that, unlike blackbody model that depends only on the temperature, the ck2004 model depends on surface gravity (log g) and heavy metal abundances as well, so we need to pass those arrays.
# In[13]:
loggs = np.ones(len(teffs))*4.43
abuns = np.zeros(len(teffs))
plt.xlabel('Temperature [K]')
plt.ylabel('Inorm [W/m^2/A]')
plt.plot(teffs, pb.Inorm(teffs, atm='blackbody', ld_func='linear', ld_coeffs=[0.0]), label='blackbody')
plt.plot(teffs, pb.Inorm(teffs, loggs, abuns, atm='ck2004', ld_func='linear', ld_coeffs=[0.0]), label='ck2004')
plt.legend(loc='lower right')
plt.show()
# Quite a difference. That is why using model atmospheres is superior when accuracy is of importance. Next, we need to compute direction-dependent intensities for all our limb darkening and boosting needs. This is a step that takes a long time; depending on your computer speed, it can take a few hours to complete.
# In[14]:
pb.compute_ck2004_intensities(path='ck2004i', verbose=False)
# This step will allow PHOEBE to compute all direction-dependent intensities on the fly, including the interpolation of the limb darkening coefficients that is model-independent. When limb darkening models are preferred (for example, when you don't quite trust direction-dependent intensities from the model atmosphere), we need to calculate two more tables: one for limb darkening coefficients and the other for the integrated limb darkening. That is done by two methods that do not take appreciable time to complete:
# In[15]:
pb.compute_ck2004_ldcoeffs()
pb.compute_ck2004_ldints()
# This completes the computation of Castelli & Kurucz auxiliary tables.
#
# Importing Wilson-Devinney response
# ---------------------------------------
#
# PHOEBE no longer shares any codebase with the WD code, but for comparison purposes it is sometimes useful to use the same atmosphere tables. If the passband you are registering with PHOEBE has been defined in WD's atmcof.dat and atmcofplanck.dat files, PHOEBE can import those coefficients and use them to compute intensities.
#
# To import a set of WD atmospheric coefficients, you need to know the corresponding index of the passband (you can look it up in the WD user manual available at ftp://ftp.astro.ufl.edu/pub/wilson/lcdc2003/ebdoc2003.2feb2004.pdf.gz) and you need to grab the files ftp://ftp.astro.ufl.edu/pub/wilson/lcdc2003/atmcofplanck.dat.gz and ftp://ftp.astro.ufl.edu/pub/wilson/lcdc2003/atmcof.dat.gz from Bob Wilson's webpage. For this particular passband the index is 22. To import, issue:
# In[16]:
pb.import_wd_atmcof('atmcofplanck.dat', 'atmcof.dat', 22)
# We can consult the `content` attribute to see the entire set of supported tables, and plot different atmosphere models for comparison purposes:
# In[17]:
print pb.content
# In[18]:
plt.xlabel('Temperature [K]')
plt.ylabel('Inorm [W/m^2/A]')
plt.plot(teffs, pb.Inorm(teffs, atm='blackbody', ld_func='linear', ld_coeffs=[0.0]), label='blackbody')
plt.plot(teffs, pb.Inorm(teffs, loggs, abuns, atm='ck2004', ld_func='linear', ld_coeffs=[0.0]), label='ck2004')
plt.plot(teffs, pb.Inorm(teffs, loggs, abuns, atm='extern_atmx', ld_func='linear', ld_coeffs=[0.0]), label='wd_atmx')
plt.legend(loc='lower right')
plt.show()
# Still an appreciable difference. This hopefully illustrates why excrutiating caution should be exercised at all times when dealing with modeling radiation.
#
# Saving the passband table
# -----------------------------
#
# The final step of all this (computer's) hard work is to save the passband file so that these steps do not need to be ever repeated. From now on you will be able to load the passband file explicitly and PHOEBE will have full access to all of its tables. Your new passband will be identified as `'Custom:mypb'`.
# In[19]:
pb.save('my_passband.pb')
# To make PHOEBE automatically load the passband, add it to the `phoebe/atmospheres/tables/passbands` directory. If there are no proprietary aspects that hinder the dissemination of the tables, please consider contributing them to PHOEBE so that other users can use them.
|
gpl-3.0
|
mhue/scikit-learn
|
sklearn/covariance/__init__.py
|
389
|
1157
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
bsd-3-clause
|
PrashntS/scikit-learn
|
examples/linear_model/plot_iris_logistic.py
|
283
|
1678
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
iamshang1/Projects
|
Basic_ML/Reinforcement_Learning/cat_mouse.py
|
1
|
14119
|
import numpy as np
from copy import copy
from matplotlib import pyplot as plt
import matplotlib.patches as mpatches
# Parameters
world_size = 10
gamma = 0.5
q_learning_iterations = 250
cats = 1
mice = 2
cheese_rate = 0.01
obstacle_rate = 0.3
sight_range = 8
cheese_value = 50
cat_value = -100
# Classes
class Cat(object):
def __init__(self, position, sight_range=4, gamma=0.8, q_learning_iterations=100):
self.position = position
self.sight_range = sight_range
self.gamma = gamma
self.q_learning_iterations = q_learning_iterations
def update(self,world):
self.world = world
self.vision = self.world[self.position[0]-self.sight_range:self.position[0]+self.sight_range+1,self.position[1]-self.sight_range:self.position[1]+self.sight_range+1]
def q_learn(self):
shape = self.vision.shape
self.utility_grid = np.zeros((shape[0],shape[0]))
for i in range(0,shape[0]):
for j in range(0,shape[0]):
if self.vision[i,j] == 2:
self.utility_grid[i,j] = 100
for i in range(self.q_learning_iterations):
start = (np.random.randint(shape[0]),np.random.randint(shape[0]))
while self.vision[start[0],start[1]] == 9 or self.vision[start[0],start[1]] == 1 or self.vision[start[0],start[1]] == 3:
start = (np.random.randint(shape[0]),np.random.randint(shape[0]))
pos = start
for i in range(100):
if self.vision[pos[0],pos[1]] == 2:
break
neighbors = [(pos[0]+1,pos[1]),(pos[0]-1,pos[1]),(pos[0],pos[1]+1),(pos[0],pos[1]-1)]
max_utility = -999
next = pos
for neighbor in neighbors:
if neighbor[0] > shape[0]-1 or neighbor[1] > shape[0]-1 or neighbor[0] < 0 or neighbor[1] < 0:
continue
elif self.vision[neighbor[0],neighbor[1]] == 9 or self.vision[neighbor[0],neighbor[1]] == 1 or self.vision[neighbor[0],neighbor[1]] == 3:
continue
utility = round(self.reward_grid[pos[0],pos[1]] + self.gamma*self.utility_grid[neighbor[0],neighbor[1]], 2)
if utility > max_utility:
max_utility = utility
next = neighbor
self.utility_grid[pos[0],pos[1]] = max_utility
pos = next
def move(self):
self.reward_grid = np.ones((self.sight_range*2+1,self.sight_range*2+1)) * -0.1
self.q_learn()
options = {"up":(self.sight_range-1,self.sight_range),"down":(self.sight_range+1,self.sight_range),"left":(self.sight_range,self.sight_range-1),"right":(self.sight_range,self.sight_range+1)}
self.nextmove = "stay"
max_utility = -999
for key,val in options.iteritems():
if self.vision[val[0],val[1]] == 9 or self.vision[val[0],val[1]] == 1 or self.vision[val[0],val[1]] == 3:
continue
utility = self.utility_grid[val[0],val[1]]
if utility > max_utility:
max_utility = utility
self.nextmove = key
current = copy(self.position)
if self.nextmove == "up":
self.position[0] -= 1
self.world[self.position[0],self.position[1]] = 1
self.world[current[0],current[1]] = 0
elif self.nextmove == "down":
self.position[0] += 1
self.world[self.position[0],self.position[1]] = 1
self.world[current[0],current[1]] = 0
elif self.nextmove == "left":
self.position[1] -= 1
self.world[self.position[0],self.position[1]] = 1
self.world[current[0],current[1]] = 0
elif self.nextmove == "right":
self.position[1] += 1
self.world[self.position[0],self.position[1]] = 1
self.world[current[0],current[1]] = 0
return self.world
class Mouse(object):
def __init__(self, position, sight_range=4, gamma=0.8, q_learning_iterations=100, cheese_value = 10, cat_value = -100):
self.position = position
self.sight_range = sight_range
self.gamma = gamma
self.q_learning_iterations = q_learning_iterations
self.cheese_value = cheese_value
self.cat_value = cat_value
def update(self,world):
self.world = world
self.vision = self.world[self.position[0]-self.sight_range:self.position[0]+self.sight_range+1,self.position[1]-self.sight_range:self.position[1]+self.sight_range+1]
def q_learn(self):
shape = self.vision.shape
self.utility_grid_cat = np.zeros((shape[0],shape[0]))
self.utility_grid_cheese = np.zeros((shape[0],shape[0]))
for i in range(0,shape[0]):
for j in range(0,shape[0]):
if self.vision[i,j] == 1:
self.utility_grid_cat[i,j] = -self.cat_value
if self.vision[i,j] == 3:
self.utility_grid_cheese[i,j] = self.cheese_value
for i in range(self.q_learning_iterations):
start = (np.random.randint(shape[0]),np.random.randint(shape[0]))
while self.vision[start[0],start[1]] == 9 or self.vision[start[0],start[1]] == 2:
start = (np.random.randint(shape[0]),np.random.randint(shape[0]))
pos = start
for i in range(100):
if self.vision[pos[0],pos[1]] == 1:
break
neighbors = [(pos[0]+1,pos[1]),(pos[0]-1,pos[1]),(pos[0],pos[1]+1),(pos[0],pos[1]-1)]
max_utility = -999
next = pos
for neighbor in neighbors:
if neighbor[0] > shape[0]-1 or neighbor[1] > shape[0]-1 or neighbor[0] < 0 or neighbor[1] < 0:
continue
elif self.vision[neighbor[0],neighbor[1]] == 9 or self.vision[neighbor[0],neighbor[1]] == 2:
continue
utility = round(self.reward_grid[pos[0],pos[1]] + self.gamma*self.utility_grid_cat[neighbor[0],neighbor[1]],2)
if utility > max_utility:
max_utility = utility
next = neighbor
self.utility_grid_cat[pos[0],pos[1]] = max_utility
pos = next
for i in range(self.q_learning_iterations):
start = (np.random.randint(shape[0]),np.random.randint(shape[0]))
while self.vision[start[0],start[1]] == 9 or self.vision[start[0],start[1]] == 2:
start = (np.random.randint(shape[0]),np.random.randint(shape[0]))
pos = start
for i in range(100):
if self.vision[pos[0],pos[1]] == 3:
break
neighbors = [(pos[0]+1,pos[1]),(pos[0]-1,pos[1]),(pos[0],pos[1]+1),(pos[0],pos[1]-1)]
max_utility = -999
next = pos
for neighbor in neighbors:
if neighbor[0] > shape[0]-1 or neighbor[1] > shape[0]-1 or neighbor[0] < 0 or neighbor[1] < 0:
continue
elif self.vision[neighbor[0],neighbor[1]] == 9 or self.vision[neighbor[0],neighbor[1]] == 2:
continue
utility = round(self.reward_grid[pos[0],pos[1]] + self.gamma*self.utility_grid_cheese[neighbor[0],neighbor[1]],2)
if utility > max_utility:
max_utility = utility
next = neighbor
self.utility_grid_cheese[pos[0],pos[1]] = max_utility
pos = next
self.utility_grid = self.utility_grid_cheese - self.utility_grid_cat
def move(self):
self.reward_grid = np.ones((self.sight_range*2+1,self.sight_range*2+1)) * 0.1
self.q_learn()
options = {"up":(self.sight_range-1,self.sight_range),"down":(self.sight_range+1,self.sight_range),"left":(self.sight_range,self.sight_range-1),"right":(self.sight_range,self.sight_range+1)}
self.nextmove = "stay"
max_utility = -999
for key,val in options.iteritems():
if self.vision[val[0],val[1]] == 9 or self.vision[val[0],val[1]] == 2:
continue
utility = self.utility_grid[val[0],val[1]]
if utility > max_utility:
max_utility = utility
self.nextmove = key
current = copy(self.position)
if self.nextmove == "up":
self.position[0] -= 1
self.world[self.position[0],self.position[1]] = 2
self.world[current[0],current[1]] = 0
elif self.nextmove == "down":
self.position[0] += 1
self.world[self.position[0],self.position[1]] = 2
self.world[current[0],current[1]] = 0
elif self.nextmove == "left":
self.position[1] -= 1
self.world[self.position[0],self.position[1]] = 2
self.world[current[0],current[1]] = 0
elif self.nextmove == "right":
self.position[1] += 1
self.world[self.position[0],self.position[1]] = 2
self.world[current[0],current[1]] = 0
return self.world
class World(object):
def __init__(self,world_size=100, gamma=0.8, q_learning_iterations=100, cats=2, mice=5, cheese_rate=0.001, obstacle_rate=0.01, sight_range=4, cheese_value = 10, cat_value = -100):
self.world = np.zeros((world_size+sight_range*2,world_size+sight_range*2))
self.world[:,:sight_range] = 9
self.world[:,-sight_range:] = 9
self.world[:sight_range,:] = 9
self.world[-sight_range:,:] = 9
self.sight_range = sight_range
self.gamma = gamma
self.q_learning_iterations = q_learning_iterations
self.num_cats = cats
self.num_mice = mice
self.cheese_rate = cheese_rate
self.obstacle_rate = obstacle_rate
self.cheese_value = cheese_value
self.cat_value = cat_value
self.cats = []
self.mice = []
#self.gif = 0
self.populate()
self.state()
def populate(self):
taken = []
for cat in range(self.num_cats):
position = [np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range),np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range)]
while position in taken:
position = [np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range),np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range)]
taken.append(position)
self.cats.append(Cat(position,self.sight_range,self.gamma,self.q_learning_iterations))
self.world[position[0],position[1]] = 1
for mouse in range(self.num_mice):
position = [np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range),np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range)]
while position in taken:
position = [np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range),np.random.randint(self.sight_range,self.world.shape[0]-self.sight_range)]
taken.append(position)
self.mice.append(Mouse(position,self.sight_range,self.gamma,self.q_learning_iterations,self.cheese_value,self.cat_value))
self.world[position[0],position[1]] = 2
for i in range(0,self.world.shape[0]):
for j in range(0,self.world.shape[0]):
if self.world[i,j] == 0:
if np.random.random() < self.cheese_rate:
self.world[i,j] = 3
for i in range(0,self.world.shape[0]):
for j in range(0,self.world.shape[0]):
if self.world[i,j] == 0:
if np.random.random() < self.obstacle_rate:
self.world[i,j] = 9
for cat in self.cats:
cat.update(self.world)
for mouse in self.mice:
mouse.update(self.world)
def iterate(self):
for mouse in self.mice:
if self.world[mouse.position[0],mouse.position[1]] == 1:
self.mice.remove(mouse)
continue
self.world = mouse.move()
for cat in self.cats:
cat.update(self.world)
for mouse in self.mice:
mouse.update(self.world)
self.state()
for cat in self.cats:
self.world = cat.move()
for cat in self.cats:
cat.update(self.world)
for mouse in self.mice:
mouse.update(self.world)
self.state()
for i in range(0,self.world.shape[0]):
for j in range(0,self.world.shape[0]):
if self.world[i,j] == 0:
if np.random.random() < self.cheese_rate:
self.world[i,j] = 3
for cat in self.cats:
cat.update(self.world)
for mouse in self.mice:
mouse.update(self.world)
self.state()
def state(self):
img = np.copy(self.world[self.sight_range:self.world.shape[0]-self.sight_range,self.sight_range:self.world.shape[0]-self.sight_range])
img[img==0] = 256
img[img==1] = 215
img[img==2] = 123
img[img==3] = 175
img[img==9] = 1
p = plt.imshow(img, interpolation='nearest', cmap='nipy_spectral')
fig = plt.gcf()
c1 = mpatches.Patch(color='red', label='cats')
c2 = mpatches.Patch(color='green', label='mice')
c3 = mpatches.Patch(color='yellow', label='cheese')
plt.legend(handles=[c1,c2,c3],loc='center left',bbox_to_anchor=(1, 0.5))
#plt.savefig("cat_mouse%i.png" % self.gif, bbox_inches='tight')
#self.gif += 1
plt.pause(0.1)
# Run algorithm
World = World(world_size,gamma,q_learning_iterations,cats,mice,cheese_rate,obstacle_rate,sight_range,cheese_value,cat_value)
for i in range(40):
print "iteration %i" % (i+1)
World.iterate()
|
mit
|
greenoaktree/MissionPlanner
|
Lib/site-packages/scipy/signal/fir_filter_design.py
|
53
|
18572
|
"""Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
|
gpl-3.0
|
mne-tools/mne-tools.github.io
|
0.17/_downloads/20a1da06c35b0bcff7922862fdce817b/plot_background_filtering.py
|
2
|
49239
|
# -*- coding: utf-8 -*-
r"""
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general,
and how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in Parks & Burrus [1]_ and
Ifeachor and Jervis [2]_, and for filtering in an
M/EEG context we recommend reading Widmann *et al.* 2015 [7]_.
To see how to use the default filters in MNE-Python on actual data, see
the :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`
tutorial.
.. contents::
:local:
Problem statement
=================
The practical issues with filtering electrophysiological data are covered
well by Widmann *et al.* in [7]_, in a follow-up to an article where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011) [[3]_].
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase SNR, but if it is not used carefully,
it can distort data. Here we hope to cover some filtering basics so
users can better understand filtering tradeoffs, and why MNE-Python has
chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \\
&= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\
&= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over:
1. The numerator coefficients :math:`b_k`, which get multiplied by
the previous input :math:`x(n-k)` values, and
2. The denominator coefficients :math:`a_k`, which get multiplied by
the previous output :math:`y(n-k)` values.
Note that these summations in :eq:`summations` correspond nicely to
(1) a weighted `moving average`_ and (2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in [1]_, FIR and IIR have different tradeoffs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
2015 [7]_:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required (Ifeachor and Jervis, 2002 [2]_, p. 321),
...FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always tradeoffs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency tradeoff, and it will
show up below.
FIR Filters
===========
First we will focus first on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try designing a low-pass filter, and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG data.
import numpy as np
from scipy import signal, fftpack
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a value of 1 in
# the pass-band (up to frequency :math:`f_p`) and a value of 0 in the stop-band
# (down to frequency :math:`f_s`) such that :math:`f_p=f_s=40` Hz here
# (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontunity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in frequency is actually sinc_ function
# in time, which requires an infinite number of samples, and thus infinite
# time, to represent. So although this filter has ideal frequency suppression,
# it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 sec)', flim=flim)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 sec) gets us a
# bit better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 sec)', flim=flim)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 sec),
# with a resulting larger x-axis:
n = int(round(10. * sfreq)) + 1
t = np.arange(-n // 2, n // 2) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 sec)', flim=flim)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire second. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`, `MATLAB fir2`_
# and :func:`scipy.signal.firwin`)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <scipy.fftpack.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightfroward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precise control of all frequency
# regions, here we will use and explore primarily windowed FIR
# design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a
# smoother slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 sec filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (1.0 sec)',
flim=flim)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.5 sec)',
flim=flim)
###############################################################################
# But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10-Hz transition (0.2 sec)',
flim=flim)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50-Hz transition (0.2 sec)',
flim=flim)
###############################################################################
# So far we have only discussed *acausal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, espcially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming acausal), minimum-phase filters do not require any
# compensation to achieve small delays in the passband. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the ``minimum_phase`` function (that will be in SciPy 0.19's
# :mod:`scipy.signal`), and note that the falloff is not as steep:
h_min = mne.fixes.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random + line). Note that the original, clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band / 2. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin')
x_v16 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim)
###############################################################################
# Filter it with a different design mode ``fir_design="firwin2"``, and also
# compensate for the constant filter delay. This method does not produce
# quite as sharp a transition compared to ``fir_design="firwin"``, despite
# being twice as long:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2')
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim)
###############################################################################
# This is actually set to become the default type of filter used in MNE-Python
# in 0.14 (see :ref:`tut_filtering_in_python`).
#
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2')
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin')
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filhters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially on signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (sec)', xlim=t[[0, -1]])
X = fftpack.fft(x)
freqs = fftpack.fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yticks = np.arange(7) / -30.
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.200, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter, and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few orders of filter,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# acausal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if acausal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim)
# Eventually this will just be from scipy signal.sosfiltfilt, but 0.18 is
# not widely adopted yet (as of June 2016), so we use our wrapper...
sosfiltfilt = mne.fixes.get_sosfiltfilt()
x_shallow = sosfiltfilt(sos, x)
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given in tut_filtering_basics_ use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used when possible to do IIR filtering.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=8', flim=flim)
x_steep = sosfiltfilt(sos, x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=1) # dB of acceptable pass-band ripple
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim)
###############################################################################
# And if we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
sos = signal.iirfilter(8, f_p / nyq, btype='low', ftype='cheby1', output='sos',
rp=6)
plot_filter(dict(sos=sos), sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are acausal (zero-phase), can make
# activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen 2011 [3]_, investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to smulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet 2012 [5]_.
#
# Perhaps more revealing, it was noted in Widmann & Schröger 2012 [6]_ that
# the problematic low-pass filters from VanRullen 2011 [3]_:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* 2012 [4]_ to:
#
# "...generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* 2015 [7]_ also came to suggest a 0.1 Hz
# highpass. And more evidence followed in Tanner *et al.* 2015 [8]_ of such
# distortions. Using data from language ERP studies of semantic and syntactic
# processing (i.e., N400 and P600), using a high-pass above 0.3 Hz caused
# significant effects to be introduced implausibly early when compared to the
# unfiltered data. From this, the authors suggested the optimal high-pass
# value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from Tanner *et al.* 2015 [8]_:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV, onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass and
# high-pass filters... No visible distortion to the original waveform
# [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by Kappenman & Luck 2010 [12]_,
# they found that applying a 1 Hz high-pass decreased the probaility of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 HZ or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving :ref:`ch_sample_data`,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* 2015 [8]_ suggest using baseline
# correction to remove slow drifts in data. However, Maess *et al.* 2016 [9]_
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* [10]_ rebutted that baseline correction can correct for
# problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In response, Maess *et al.* 2016 [11]_ note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multielectrode recordings
# the topology (i.e., spatial pattiern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin`. In Widmann *et al.* 2015 [7]_, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 45.0 | 11.25 | 5.0 |
# +------------------+-------------------+-------------------+
# | 48.0 | 12.0 | 2.0 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
# or Blackman windows, respectively as selected by the ``fir_window``
# argument for ``fir_design='firwin'``, and double these for
# ``fir_design='firwin2'`` mode.
#
# .. note:: For ``fir_design='firwin2'``, the multiplicative factors are
# doubled compared to what is given in Ifeachor and Jervis [2]_
# (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
# on the frequency response, which we compensate for by
# increasing the filter length. This is why
# ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately
# when using ``fir_design='firwin2'``. For design mode
# ``fir_design='firwin'``, there is no need to separate the
# operations, as the lowpass and highpass elements are constructed
# separately to meet the transition band requirements.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on
# :ref:`sphx_glr_auto_tutorials_plot_artifacts_correction_filtering.py`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M-EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in [7]_. Briefly:
#
# * EEGLAB
# MNE-Python in 0.14 defaults to behavior very similar to that of EEGLAB,
# see the `EEGLAB filtering FAQ`_ for more information.
# * Fieldrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more information, see e.g. `FieldTrip band-pass documentation`_.
#
# Reporting Filters
# =================
# On page 45 in Widmann *et al.* [7]_, there is a convenient list of important
# filter parameters that should be reported with each publication:
#
# 1. filtertype (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
# 2. cutoff frequency (including definition)
# 3. filter order (or length)
# 4. roll-off or transition bandwidth
# 5. passband ripple and stopband attenuation
# 6. filter delay (zero-phase, linear-phase, non-linear phase) and causality
# 7. direction of computation (one-pass forward/reverse,or two-pass forward and
# reverse)
#
# In the following, we will address how to deal with these parameters in MNE:
#
#
# Filter type
# -----------
# Depending on the function or method used, the filter type can be specified.
# To name an example. in :func:`mne.filter.create_filter`, the relevant
# arguments would be `l_freq`, `h_freg`, `method`, and if the method is FIR:
# `fir_window`, and `fir_design`.
#
#
# Cutoff frequency
# ----------------
# The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
# middle of the transition band. That is, if you construct a lowpass FIR filter
# with ``h_freq = 40.``, the filter function will provide a transition
# bandwidth that depens on the `h_trans_bandwidth` argument. The desired
# half-amplitude cutoff of the lowpass FIR filter is then at:
# ``h_freq + transition_bandwidth/2.``.
#
# Filter length (order) and transition bandwidth (roll-off)
# ---------------------------------------------------------
# In the :ref:`tut_filtering_in_python` section, we have already talked about
# the default filter lengths and transition bandwidths that are used when no
# custom values are specified using the respective filter function's arguments.
#
# If you want to find out about the filter length and transition bandwidth that
# were used through the 'auto' setting, you can use
# :func:`mne.filter.create_filter` to print out the settings once more:
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(data=None, # Data is only used for sanity checking, not strictly needed # noqa
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
###############################################################################
# .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter`
# will not print a filter length and transition bandwidth to the log.
# Instead, you can specify the roll off with the `iir_params`
# argument or stay with the default, which is a 4th order
# (Butterworth) filter.
#
# Passband ripple and stopband attenuation
# ----------------------------------------
#
# When use standard :func:`scipy.signal.firwin` design (as for FIR filters in
# MNE), the passband ripple and stopband attenuation are dependent upon the
# window used in design. For standard windows the values are listed in this
# table (see Ifeachor & Jervis, p. 357 [3]_):
#
# +-------------------------+-----------------+----------------------+
# | Name of window function | Passband ripple | Stopband attenuation |
# +=========================+=================+======================+
# | Hann | 0.0545 dB | 44 dB |
# +-------------------------+-----------------+----------------------+
# | Hamming | 0.0194 dB | 53 dB |
# +-------------------------+-----------------+----------------------+
# | Blackman | 0.0017 dB | 74 dB |
# +-------------------------+-----------------+----------------------+
#
#
# Filter delay and direction of computation
# -----------------------------------------
# For reporting this information, it might be sufficient to read the docstring
# of the filter function or method that you apply. For example in the
# docstring of `mne.filter.create_filter`, for the phase parameter it says:
#
# Phase of the filter, only used if ``method='fir'``.
# By default, a symmetric linear-phase FIR filter is constructed.
# If ``phase='zero'`` (default), the delay of this filter
# is compensated for. If ``phase=='zero-double'``, then this filter
# is applied twice, once forward, and once backward. If 'minimum',
# then a minimum-phase, causal filter will be used.
#
#
# Summary
# =======
#
# When filtering, there are always tradeoffs that should be considered.
# One important tradeoff is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
#
# .. [1] Parks TW, Burrus CS (1987). Digital Filter Design.
# New York: Wiley-Interscience.
# .. [2] Ifeachor, E. C., & Jervis, B. W. (2002). Digital Signal Processing:
# A Practical Approach. Prentice Hall.
# .. [3] Vanrullen, R. (2011). Four common conceptual fallacies in mapping
# the time course of recognition. Perception Science, 2, 365.
# .. [4] Acunzo, D. J., MacKenzie, G., & van Rossum, M. C. W. (2012).
# Systematic biases in early ERP and ERF components as a result
# of high-pass filtering. Journal of Neuroscience Methods,
# 209(1), 212–218. https://doi.org/10.1016/j.jneumeth.2012.06.011
# .. [5] Rousselet, G. A. (2012). Does filtering preclude us from studying
# ERP time-courses? Frontiers in Psychology, 3(131)
# .. [6] Widmann, A., & Schröger, E. (2012). Filter effects and filter
# artifacts in the analysis of electrophysiological data.
# Perception Science, 233.
# .. [7] Widmann, A., Schröger, E., & Maess, B. (2015). Digital filter
# design for electrophysiological data – a practical approach.
# Journal of Neuroscience Methods, 250, 34–46.
# https://doi.org/10.1016/j.jneumeth.2014.08.002
# .. [8] Tanner, D., Morgan-Short, K., & Luck, S. J. (2015).
# How inappropriate high-pass filters can produce artifactual effects
# and incorrect conclusions in ERP studies of language and cognition.
# Psychophysiology, 52(8), 997–1009. https://doi.org/10.1111/psyp.12437
# .. [9] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis.
# Commentary on: “How inappropriate high-pass filters can produce
# artifacts and incorrect conclusions in ERP studies of language
# and cognition.” Journal of Neuroscience Methods, 266, 164–165.
# .. [10] Tanner, D., Norton, J. J. S., Morgan-Short, K., & Luck, S. J. (2016).
# On high-pass filter artifacts (they’re real) and baseline correction
# (it’s a good idea) in ERP/ERMF analysis.
# .. [11] Maess, B., Schröger, E., & Widmann, A. (2016).
# High-pass filters and baseline correction in M/EEG analysis-continued
# discussion. Journal of Neuroscience Methods, 266, 171–172.
# Journal of Neuroscience Methods, 266, 166–170.
# .. [12] Kappenman E. & Luck, S. (2010). The effects of impedance on data
# quality and statistical significance in ERP recordings.
# Psychophysiology, 47, 888-904.
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: http://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: http://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: http://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _fieldtrip band-pass documentation: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter # noqa
|
bsd-3-clause
|
asimshankar/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/linear_test.py
|
15
|
77825
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column_lib as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeaturesOOVWithNoOOVBuckets(self):
"""LinearClassifier with SDCAOptimizer with OOV features (-1 IDs)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
# 'GB' is out of the vocabulary.
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_keys(
'country', keys=['US', 'CA', 'MK', 'IT', 'CN'])
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearClassifier with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
print('all scores = {}'.format(scores))
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearRegressor with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0,
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 == 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
phobson/statsmodels
|
statsmodels/tools/tests/test_data.py
|
36
|
1758
|
import pandas
import numpy as np
from statsmodels.tools import data
def test_missing_data_pandas():
"""
Fixes GH: #144
"""
X = np.random.random((10,5))
X[1,2] = np.nan
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(rnames.tolist(), [0,2,3,4,5,6,7,8,9])
def test_structarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X)
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_recarray():
X = np.random.random((9,)).view([('var1', 'f8'),
('var2', 'f8'),
('var3', 'f8')])
vals, cnames, rnames = data.interpret_data(X.view(np.recarray))
np.testing.assert_equal(cnames, X.dtype.names)
np.testing.assert_equal(vals, X.view((float,3)))
np.testing.assert_equal(rnames, None)
def test_dataframe():
X = np.random.random((10,5))
df = pandas.DataFrame(X)
vals, cnames, rnames = data.interpret_data(df)
np.testing.assert_equal(vals, df.values)
np.testing.assert_equal(rnames.tolist(), df.index.tolist())
np.testing.assert_equal(cnames, df.columns.tolist())
def test_patsy_577():
X = np.random.random((10, 2))
df = pandas.DataFrame(X, columns=["var1", "var2"])
from patsy import dmatrix
endog = dmatrix("var1 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, None))
exog = dmatrix("var2 - 1", df)
np.testing.assert_(data._is_using_patsy(endog, exog))
|
bsd-3-clause
|
lscheinkman/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/offsetbox.py
|
69
|
17728
|
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent. The [VH]Packer,
DrawingArea and TextArea are derived from the OffsetBox.
The [VH]Packer automatically adjust the relative postisions of their
children, which should be instances of the OffsetBox. This is used to
align similar artists together, e.g., in legend.
The DrawingArea can contain any Artist as a child. The
DrawingArea has a fixed width and height. The position of children
relative to the parent is fixed. The TextArea is contains a single
Text instance. The width and height of the TextArea instance is the
width and height of the its child text.
"""
import matplotlib.transforms as mtransforms
import matplotlib.artist as martist
import matplotlib.text as mtext
import numpy as np
from matplotlib.patches import bbox_artist as mbbox_artist
DEBUG=False
# for debuging use
def bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
# _get_packed_offsets() and _get_aligned_offsets() are coded assuming
# that we are packing boxes horizontally. But same function will be
# used with vertical packing.
def _get_packed_offsets(wd_list, total, sep, mode="fixed"):
"""
Geiven a list of (width, xdescent) of each boxes, calculate the
total width and the x-offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*wd_list* : list of (width, xdescent) of boxes to be packed.
*sep* : spacing between boxes
*total* : Intended total length. None if not used.
*mode* : packing mode. 'fixed', 'expand', or 'equal'.
"""
w_list, d_list = zip(*wd_list)
# d_list is currently not used.
if mode == "fixed":
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
sep = (total - sum(w_list))/(len(w_list)-1.)
offsets_ = np.add.accumulate([0]+[w + sep for w in w_list])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(w_list)
if total is None:
total = (maxh+sep)*len(w_list)
else:
sep = float(total)/(len(w_list)) - maxh
offsets = np.array([(maxh+sep)*i for i in range(len(w_list))])
return total, offsets
else:
raise ValueError("Unknown mode : %s" % (mode,))
def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Geiven a list of (height, descent) of each boxes, align the boxes
with *align* and calculate the y-offsets of each boxes.
total width and the offset positions of each items according to
*mode*. xdescent is analagous to the usual descent, but along the
x-direction. xdescent values are currently ignored.
*hd_list* : list of (width, xdescent) of boxes to be aligned.
*sep* : spacing between boxes
*height* : Intended total length. None if not used.
*align* : align mode. 'baseline', 'top', 'bottom', or 'center'.
"""
if height is None:
height = max([h for h, d in hd_list])
if align == "baseline":
height_descent = max([h-d for h, d in hd_list])
descent = max([d for h, d in hd_list])
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left","top"]:
descent=0.
offsets = [d for h, d in hd_list]
elif align in ["right","bottom"]:
descent=0.
offsets = [height-h+d for h, d in hd_list]
elif align == "center":
descent=0.
offsets = [(height-h)*.5+d for h, d in hd_list]
else:
raise ValueError("Unknown Align mode : %s" % (align,))
return height, descent, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist. The child artist are meant
to be drawn at a relative position to its parent.
"""
def __init__(self, *args, **kwargs):
super(OffsetBox, self).__init__(*args, **kwargs)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
for c in self.get_children():
c.set_figure(fig)
def set_offset(self, xy):
"""
Set the offset
accepts x, y, tuple, or a callable object.
"""
self._offset = xy
def get_offset(self, width, height, xdescent, ydescent):
"""
Get the offset
accepts extent of the box
"""
if callable(self._offset):
return self._offset(width, height, xdescent, ydescent)
else:
return self._offset
def set_width(self, width):
"""
Set the width
accepts float
"""
self.width = width
def set_height(self, height):
"""
Set the height
accepts float
"""
self.height = height
def get_children(self):
"""
Return a list of artists it contains.
"""
return self._children
def get_extent_offsets(self, renderer):
raise Exception("")
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
return w, h, xd, yd
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(w, h, xd, yd)
return mtransforms.Bbox.from_bounds(px-xd, py-yd, w, h)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
width, height, xdescent, ydescent, offsets = self.get_extent_offsets(renderer)
px, py = self.get_offset(width, height, xdescent, ydescent)
for c, (ox, oy) in zip(self.get_children(), offsets):
c.set_offset((px+ox, py+oy))
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class PackerBase(OffsetBox):
def __init__(self, pad=None, sep=None, width=None, height=None,
align=None, mode=None,
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(PackerBase, self).__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
The VPacker has its children packed vertically. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(VPacker, self).__init__(pad, sep, width, height,
align, mode,
children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
whd_list = [(w, h, xd, (h-yd)) for w, h, xd, yd in whd_list]
wd_list = [(w, xd) for w, h, xd, yd in whd_list]
width, xdescent, xoffsets = _get_aligned_offsets(wd_list,
self.width,
self.align)
pack_list = [(h, yd) for w,h,xd,yd in whd_list]
height, yoffsets_ = _get_packed_offsets(pack_list, self.height,
self.sep, self.mode)
yoffsets = yoffsets_ + [yd for w,h,xd,yd in whd_list]
ydescent = height - yoffsets[0]
yoffsets = height - yoffsets
#w, h, xd, h_yd = whd_list[-1]
yoffsets = yoffsets - ydescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent+self.pad, ydescent+self.pad, \
zip(xoffsets, yoffsets)
class HPacker(PackerBase):
"""
The HPacker has its children packed horizontally. It automatically
adjust the relative postisions of children in the drawing time.
"""
def __init__(self, pad=None, sep=None, width=None, height=None,
align="baseline", mode="fixed",
children=None):
"""
*pad* : boundary pad
*sep* : spacing between items
*width*, *height* : width and height of the container box.
calculated if None.
*align* : alignment of boxes
*mode* : packing mode
"""
super(HPacker, self).__init__(pad, sep, width, height,
align, mode, children)
def get_extent_offsets(self, renderer):
"""
update offset of childrens and return the extents of the box
"""
whd_list = [c.get_extent(renderer) for c in self.get_children()]
if self.height is None:
height_descent = max([h-yd for w,h,xd,yd in whd_list])
ydescent = max([yd for w,h,xd,yd in whd_list])
height = height_descent + ydescent
else:
height = self.height - 2*self._pad # width w/o pad
hd_list = [(h, yd) for w, h, xd, yd in whd_list]
height, ydescent, yoffsets = _get_aligned_offsets(hd_list,
self.height,
self.align)
pack_list = [(w, xd) for w,h,xd,yd in whd_list]
width, xoffsets_ = _get_packed_offsets(pack_list, self.width,
self.sep, self.mode)
xoffsets = xoffsets_ + [xd for w,h,xd,yd in whd_list]
xdescent=whd_list[0][2]
xoffsets = xoffsets - xdescent
return width + 2*self.pad, height + 2*self.pad, \
xdescent + self.pad, ydescent + self.pad, \
zip(xoffsets, yoffsets)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed.
"""
def __init__(self, width, height, xdescent=0.,
ydescent=0., clip=True):
"""
*width*, *height* : width and height of the container box.
*xdescent*, *ydescent* : descent of the box in x- and y-direction.
"""
super(DrawingArea, self).__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
"""
Return with, height, xdescent, ydescent of box
"""
return self.width, self.height, self.xdescent, self.ydescent
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the container box'
self._children.append(a)
a.set_transform(self.get_transform())
def draw(self, renderer):
"""
Draw the children
"""
for c in self._children:
c.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
class TextArea(OffsetBox):
"""
The TextArea is contains a single Text instance. The text is
placed at (0,0) with baseline+left alignment. The width and height
of the TextArea instance is the width and height of the its child
text.
"""
def __init__(self, s,
textprops=None,
multilinebaseline=None,
minimumdescent=True,
):
"""
*s* : a string to be displayed.
*textprops* : property dictionary for the text
*multilinebaseline* : If True, baseline for multiline text is
adjusted so that it is (approximatedly)
center-aligned with singleline text.
*minimumdescent* : If True, the box has a minimum descent of "p".
"""
if textprops is None:
textprops = {}
if not textprops.has_key("va"):
textprops["va"]="baseline"
self._text = mtext.Text(0, 0, s, **textprops)
OffsetBox.__init__(self)
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self.offset_transform.clear()
self.offset_transform.translate(0, 0)
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform+self._baseline_transform)
self._multilinebaseline = multilinebaseline
self._minimumdescent = minimumdescent
def set_multilinebaseline(self, t):
"""
Set multilinebaseline .
If True, baseline for multiline text is
adjusted so that it is (approximatedly) center-aligned with
singleline text.
"""
self._multilinebaseline = t
def get_multilinebaseline(self):
"""
get multilinebaseline .
"""
return self._multilinebaseline
def set_minimumdescent(self, t):
"""
Set minimumdescent .
If True, extent of the single line text is adjusted so that
it has minimum descent of "p"
"""
self._minimumdescent = t
def get_minimumdescent(self):
"""
get minimumdescent.
"""
return self._minimumdescent
def set_transform(self, t):
"""
set_transform is ignored.
"""
pass
def set_offset(self, xy):
"""
set offset of the container.
Accept : tuple of x,y cooridnate in disokay units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
def get_offset(self):
"""
return offset of the container.
"""
return self._offset
def get_window_extent(self, renderer):
'''
get the bounding box in display space.
'''
w, h, xd, yd = self.get_extent(renderer)
ox, oy = self.get_offset() #w, h, xd, yd)
return mtransforms.Bbox.from_bounds(ox-xd, oy-yd, w, h)
def get_extent(self, renderer):
clean_line, ismath = self._text.is_math_text(self._text._text)
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties, ismath=False)
bbox, info = self._text._get_layout(renderer)
w, h = bbox.width, bbox.height
line = info[0][0] # first line
_, hh, dd = renderer.get_text_width_height_descent(
clean_line, self._text._fontproperties, ismath=ismath)
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline: # multi line
d = h-(hh-dd) # the baseline of the first line
d_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, d - d_new)
d = d_new
else: # single line
h_d = max(h_ - d_, h-dd)
if self.get_minimumdescent():
## to have a minimum descent, #i.e., "l" and "p" have same
## descents.
d = max(dd, d_)
else:
d = dd
h = h_d + d
return w, h, 0., d
def draw(self, renderer):
"""
Draw the children
"""
self._text.draw(renderer)
bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
|
agpl-3.0
|
feilchenfeldt/pypopgen
|
modules/splittree.py
|
1
|
5274
|
import numpy as np
import pandas as pd
import scipy
import treetools as tt
import warnings
def get_ne(pwd, mutation_rate, accessible_size, groupings=None):
"""
If groupings are given. The Ne represents
the Ne of the (stuctured) populations
formed by the groupings.
Returns:
Dictionary of group names and Ne.
"""
assert (pwd.index == pwd.columns).all()
if groupings is None:
pwd_grouped = pwd
else:
pwd_grouped = pwd.groupby(groupings).mean().groupby(groupings,axis=1).mean()
return {k:v for k,v in zip(pwd_grouped.index,
np.diagonal(pwd_grouped/(4.*mutation_rate*accessible_size)))}
def get_group_pwd(pwd, groupings):
"""
Note the difference to get_ne.
Get ne averages across comparisons
of individuals within groups.
This averages over cross-group
comparisons.
Returns: DataFrame of pairwise differences
between groups.
"""
return pwd.groupby(groupings)\
.apply(lambda df:df.groupby(groupings,axis=1)\
.apply(np.diagonal)).applymap(np.mean)
def get_samples_per_group(pwd, groupings=None, haploid=True):
"""
Returns the number of samples in each group.
"""
if groupings is None:
return {n:(1+haploid) for n in pwd.index}
else:
return (pwd.groupby(groupings).apply(len)*(1+haploid)).to_dict()
def get_split_diff(pwd):
"""
Returns an estimate of
group split differnces by calculating
pi_between-(pi_within1+pi_within2)/2.
This assumes that Ne_ancestral is average
of the two present day Ne_s
"""
return pwd.subtract(np.diagonal(pwd)/2.,
axis=1).subtract(np.diagonal(pwd)/2.,
axis=0)
# d = np.diagonal(pwd)
# h = np.repeat([d],len(d),axis=0)
# v = h.T
# mx = ( v*(v<h) + h*(v>=h))
# mn = scipy.stats.gmean([h,v], axis=0)
# return (pwd - mx)
def get_split_times(pwd, mutation_rate, accessible_size, generation_time=1,
groupings=None):
"""
Uses a pairwise difference matrix to
infer group split times.
Returns:
split_time ... pd.DataFrame of split times
ne ... dict effective population sizes of each group
n_samples ... dict of number of haploid samples in each group
"""
ne = get_ne(pwd, mutation_rate, accessible_size, groupings=groupings)
n_samples = get_samples_per_group(pwd, groupings=groupings, haploid=True)
if groupings is not None:
pwd = get_group_pwd(pwd, groupings)
split_diff = get_split_diff(pwd)
split_time = split_diff/(2.*mutation_rate*accessible_size/generation_time)
return split_time, ne, n_samples
def get_split_tree(pwd, mutation_rate, accessible_size, generation_time=1,
groupings=None, outgroup=None, prune_outgroup=True):
"""
Uses a pairwise difference matrix to
infer group split times.
A neighbour-joining tree is constructed
from these split times and returned.
The tree nodes a annotated with branch
effective population sizes (ne). Ancestral
ne's are assumed to be average of the child
branches (but for groupings the average of indivdual
ne's is taken as basis for this calculation).
Returns:
tree ... pypopgen.modules.treetools.HsTree instance
(heir of ete3.Tree but with additional functionality,
e.g., a .plot() method based on matplotlib)
"""
individual_ne = get_ne(pwd, mutation_rate, accessible_size)
if groupings is not None:
for k,v in groupings.iteritems():
if v == outgroup:
del individual_ne[k]
else:
del individual_ne[outgroup]
if min(individual_ne.values())*2 < max(individual_ne.values()):
warnings.warn("Inferred effective population sizes differ by a factor more than 2."
" The assumptions used to infer split times are not met. "
"The tree is likely far off from the truth. Branches with smallest Ne will be far too long. "
"Here are the estimates: {}".format(str(individual_ne)))
ne = get_ne(pwd, mutation_rate, accessible_size, groupings=groupings)
n_samples = get_samples_per_group(pwd, groupings=groupings, haploid=True)
if groupings is not None:
pwd = get_group_pwd(pwd, groupings)
split_diff = get_split_diff(pwd)
split_time = split_diff/(2.*mutation_rate*accessible_size/generation_time)
#the factor 2 comes from the fact that the distance between two leafes is 2*split_time
tree = tt.dm_to_tree(2*split_time, outgroup=outgroup, prune_outgroup=prune_outgroup)
tree.add_property_to_nodes('ne',ne)
tree.add_property_to_nodes('n_samples',n_samples)
for node in tree.iter_descendants('postorder'):
if not hasattr(node, 'ne'):
l,r = node.get_children()
nes = [l.ne, r.ne]
for i,n in enumerate([l,r]):
if n.is_leaf():
nes[i] = pwd.loc[n.name, n.name]/(4.*mutation_rate*accessible_size)
node.ne = sum(nes)/2.
return tree
|
mit
|
eclee25/flu-SDI-exploratory-age
|
scripts/OR_seasonseverity.py
|
1
|
2365
|
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 7/25/13
###Function: Draw OR by season severity plots where season severity is defined as attack rate per 100,000 in acute care or inpatient facilities in flu peak weeks
###Import data: SQL_export/OR_swk6.csv, SQL_export/seasonseverity.csv
###Command Line: python
##############################################
### notes ###
### packages ###
import matplotlib
import csv
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
## local packages ##
import ORgenerator as od
### data structures ###
s6dict, ORdict = {},{}
sevdict = {} # season-severity proxy dictionary
### parameters ###
seasons = range(2,11) #seasons for which ORs will be generated
### functions ###
### import data ###
s6in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/OR_swk6.csv','r')
s6=csv.reader(s6in, delimiter=',')
sevin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/seasonseverity.csv','r')
sev=csv.reader(sevin, delimiter=',')
### program ###
s6dict = od.import_d(s6, 0, 1, 2)
ORdict = od.ORgen_seas(s6dict, seasons)
sevdict = od.import_gen_d(sev, 0, 3)
# plot data
# [1:] drops season 1 data for presentation plot
xvals = [float(sevdict[k]) for k in sorted(sevdict)[1:]]
yvals = [ORdict[k] for k in sorted(ORdict)]
print "sevdict pairs", sorted(sevdict.items())
print "sevdict keys", sorted(sevdict)
print "ORdict pairs", sorted(ORdict.items())
print "ORdict keys", sorted(ORdict)
# values without season 1 data
print "sevdict values", xvals
print "ORdict values", yvals
plt.scatter(xvals, yvals, marker='o', color = 'black', label= "total")
for num, AR, OR in zip(sorted(sevdict)[1:], xvals, yvals):
plt.annotate(num, xy = (AR, OR), xytext = (-2, 5), textcoords = 'offset points', fontsize=18)
# plt.xlabel('Attack Rate per 100,000 in acute care/inpatient at peak weeks')
plt.xlabel('Season Severity\nAttack Rate in inpatient facilities per 100,000 at peak weeks', fontsize=18)
plt.xticks(fontsize=16)
# plt.ylabel('Attack Rate OR, c:a (US pop normalized, peak +/- 6wks)')
plt.ylabel('OR at Peak Weeks, child:adult', fontsize=24)
plt.yticks(fontsize=16)
plt.show()
corr = stats.pearsonr(xvals[:-1], yvals[:-1])
print 'Pearson correlation coefficient', corr
|
mit
|
marcsans/cnn-physics-perception
|
phy/lib/python2.7/site-packages/matplotlib/tests/test_streamplot.py
|
7
|
2093
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import numpy as np
from numpy.testing import assert_array_almost_equal
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.transforms as mtransforms
def velocity_field():
Y, X = np.mgrid[-3:3:100j, -3:3:100j]
U = -1 - X**2 + Y
V = 1 + X - Y**2
return X, Y, U, V
@image_comparison(baseline_images=['streamplot_colormap_test_image'])
def test_colormap():
X, Y, U, V = velocity_field()
plt.streamplot(X, Y, U, V, color=U, density=0.6, linewidth=2,
cmap=plt.cm.autumn)
plt.colorbar()
@image_comparison(baseline_images=['streamplot_linewidth_test_image'])
def test_linewidth():
X, Y, U, V = velocity_field()
speed = np.sqrt(U*U + V*V)
lw = 5*speed/speed.max()
df = 25. / 30. # Compatibility factor for old test image
plt.streamplot(X, Y, U, V, density=[0.5 * df, 1. * df], color='k',
linewidth=lw)
@image_comparison(baseline_images=['streamplot_masks_and_nans_test_image'])
def test_masks_and_nans():
X, Y, U, V = velocity_field()
mask = np.zeros(U.shape, dtype=bool)
mask[40:60, 40:60] = 1
U = np.ma.array(U, mask=mask)
U[:20, :20] = np.nan
with np.errstate(invalid='ignore'):
plt.streamplot(X, Y, U, V, color=U, cmap=plt.cm.Blues)
@cleanup
def test_streamplot_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
if __name__=='__main__':
import nose
nose.runmodule()
|
mit
|
BEugen/AI
|
CNTK/cntk-2.py
|
1
|
3227
|
import cntk
from cntk import Trainer
from cntk.learners import sgd
from cntk.ops import *
from cntk.io import *
from cntk.layers import *
from cntk.device import *
import pylab
import pandas as pd
iris = pd.read_csv('data_so2N.csv')
# print(iris)
# Строим отображение типов ирисов на номер класса
fwmap = dict(enumerate(set(iris.values[:, 4])))
bkmap = {fwmap[k]: k for k in fwmap}
print(fwmap, bkmap)
def conv(n):
"""
Преобразует название класса в трехмерный вектор из нулей и единиц
"""
return [1 if x == n else 0 for i, x in fwmap.items()]
def dump(seq, fname):
with open(fname, 'w') as f:
for x in seq:
f.write("|label {}|features {} {} {} {}\n".format(" ".join(map(str, conv(x[4]))), x[0], x[1], x[2], x[3]))
data = np.random.permutation(iris.values)
dump(data[0:130], 'iris_train.txt')
dump(data[130:], 'iris_test.txt')
reader_train = MinibatchSource(CTFDeserializer('iris_train.txt',
StreamDefs(
labels=StreamDef(field='label', shape=3),
features=StreamDef(field='features', shape=4))))
reader_test = MinibatchSource(CTFDeserializer('iris_test.txt',
StreamDefs(
labels=StreamDef(field='label', shape=3),
features=StreamDef(field='features', shape=4))))
input_var = input_variable(4)
label_var = input_variable(3)
model = Sequential([Dense(16, init=glorot_uniform(), activation=sigmoid),
Dense(3, init=glorot_uniform(), activation=None)])
z = model(input_var)
ce = cntk.cross_entropy_with_softmax(z, label_var)
pe = cntk.classification_error(z, label_var)
minibatch_size = 16
lr_per_minibatch = cntk.learning_rate_schedule(0.01, cntk.UnitType.minibatch)
pp = cntk.logging.ProgressPrinter()
learner = cntk.adagrad(z.parameters, lr=lr_per_minibatch)
trainer = cntk.Trainer(z, (ce, pe), [learner], [pp])
input_map = {
input_var: reader_train.streams.features,
label_var: reader_train.streams.labels
}
cntk.logging.log_number_of_parameters(z)
progress = []
for x in range(150):
tloss = 0;
taccuracy = 0;
cnt = 0;
for y in range(500):
data = reader_train.next_minibatch(minibatch_size, input_map)
t = trainer.train_minibatch(data)
tloss += trainer.previous_minibatch_loss_average * trainer.previous_minibatch_sample_count
taccuracy += trainer.previous_minibatch_evaluation_average * trainer.previous_minibatch_sample_count
cnt += trainer.previous_minibatch_sample_count
pp.update_with_trainer(trainer, with_metric=True)
progress.append([float(x), tloss / cnt, taccuracy / cnt])
pp.epoch_summary(with_metric=True)
progress = np.array(progress)
print(progress[:, 0], progress[:, 2])
test_size = 20
data = reader_test.next_minibatch(test_size, input_map=input_map)
metric = trainer.test_minibatch(data)
model.save("iris_model.bin")
print("Eval error = {}".format(metric*100))
|
gpl-3.0
|
fengzhyuan/scikit-learn
|
examples/ensemble/plot_adaboost_hastie_10_2.py
|
355
|
3576
|
"""
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.20/_downloads/24805de9a643a052b923c193bd9ce1f9/plot_40_epochs_to_data_frame.py
|
9
|
7008
|
"""
.. _tut-epochs-dataframe:
Exporting Epochs to Pandas DataFrames
=====================================
This tutorial shows how to export the data in :class:`~mne.Epochs` objects to a
:class:`Pandas DataFrame <pandas.DataFrame>`, and applies a typical Pandas
:doc:`split-apply-combine <pandas:user_guide/groupby>` workflow to examine the
latencies of the response maxima across epochs and conditions.
.. contents:: Page contents
:local:
:depth: 2
We'll use the :ref:`sample-dataset` dataset, but load a version of the raw file
that has already been filtered and downsampled, and has an average reference
applied to its EEG channels. As usual we'll start by importing the modules we
need and loading the data:
"""
import os
import seaborn as sns
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
###############################################################################
# Next we'll load a list of events from file, map them to condition names with
# an event dictionary, set some signal rejection thresholds (cf.
# :ref:`tut-reject-epochs-section`), and segment the continuous data into
# epochs:
sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(sample_data_events_file)
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
tmin, tmax = (-0.2, 0.5) # epoch from 200 ms before event to 500 ms after it
baseline = (None, 0) # baseline period from start of epoch to time=0
epochs = mne.Epochs(raw, events, event_dict, tmin, tmax, proj=True,
baseline=baseline, reject=reject_criteria, preload=True)
del raw
###############################################################################
# Converting an ``Epochs`` object to a ``DataFrame``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Once we have our :class:`~mne.Epochs` object, converting it to a
# :class:`~pandas.DataFrame` is simple: just call :meth:`epochs.to_data_frame()
# <mne.Epochs.to_data_frame>`. Each channel's data will be a column of the new
# :class:`~pandas.DataFrame`, alongside three additional columns of event name,
# epoch number, and sample time. Here we'll just show the first few rows and
# columns:
df = epochs.to_data_frame()
df.iloc[:5, :10]
###############################################################################
# Scaling time and channel values
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, time values are converted from seconds to milliseconds and
# then rounded to the nearest integer; if you don't want this, you can pass
# ``time_format=None`` to keep time as a :class:`float` value in seconds, or
# convert it to a :class:`~pandas.Timedelta` value via
# ``time_format='timedelta'``.
#
# Note also that, by default, channel measurement values are scaled so that EEG
# data are converted to µV, magnetometer data are converted to fT, and
# gradiometer data are converted to fT/cm. These scalings can be customized
# through the ``scalings`` parameter, or suppressed by passing
# ``scalings=dict(eeg=1, mag=1, grad=1)``.
df = epochs.to_data_frame(time_format=None,
scalings=dict(eeg=1, mag=1, grad=1))
df.iloc[:5, :10]
###############################################################################
# Notice that the time values are no longer integers, and the channel values
# have changed by several orders of magnitude compared to the earlier
# DataFrame.
#
#
# Setting the ``index``
# ~~~~~~~~~~~~~~~~~~~~~
#
# It is also possible to move one or more of the indicator columns (event name,
# epoch number, and sample time) into the :ref:`index <pandas:indexing>`, by
# passing a string or list of strings as the ``index`` parameter. We'll also
# demonstrate here the effect of ``time_format='timedelta'``, yielding
# :class:`~pandas.Timedelta` values in the "time" column.
df = epochs.to_data_frame(index=['condition', 'epoch'],
time_format='timedelta')
df.iloc[:5, :10]
###############################################################################
# Wide- versus long-format DataFrames
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Another parameter, ``long_format``, determines whether each channel's data is
# in a separate column of the :class:`~pandas.DataFrame`
# (``long_format=False``), or whether the measured values are pivoted into a
# single ``'value'`` column with an extra indicator column for the channel name
# (``long_format=True``). Passing ``long_format=True`` will also create an
# extra column ``ch_type`` indicating the channel type.
long_df = epochs.to_data_frame(time_format=None, index='condition',
long_format=True)
long_df.head()
###############################################################################
# Generating the :class:`~pandas.DataFrame` in long format can be helpful when
# using other Python modules for subsequent analysis or plotting. For example,
# here we'll take data from the "auditory/left" condition, pick a couple MEG
# channels, and use :func:`seaborn.lineplot` to automatically plot the mean and
# confidence band for each channel, with confidence computed across the epochs
# in the chosen condition:
channels = ['MEG 1332', 'MEG 1342']
data = long_df.loc['auditory/left'].query('channel in @channels')
# convert channel column (CategoryDtype → string; for a nicer-looking legend)
data['channel'] = data['channel'].astype(str)
sns.lineplot(x='time', y='value', hue='channel', data=data)
###############################################################################
# We can also now use all the power of Pandas for grouping and transforming our
# data. Here, we find the latency of peak activation of 2 gradiometers (one
# near auditory cortex and one near visual cortex), and plot the distribution
# of the timing of the peak in each channel as a :func:`~seaborn.violinplot`:
# sphinx_gallery_thumbnail_number = 2
df = epochs.to_data_frame(time_format=None)
peak_latency = (df.filter(regex=r'condition|epoch|MEG 1332|MEG 2123')
.groupby(['condition', 'epoch'])
.aggregate(lambda x: df['time'].iloc[x.idxmax()])
.reset_index()
.melt(id_vars=['condition', 'epoch'],
var_name='channel',
value_name='latency of peak')
)
ax = sns.violinplot(x='channel', y='latency of peak', hue='condition',
data=peak_latency, palette='deep', saturation=1)
|
bsd-3-clause
|
ixaxaar/rosalind
|
Consensus and Profile.py
|
1
|
1220
|
#! /usr/bin/env python
import sys
import pandas as pd
import itertools
def main():
fasta = open(sys.argv[1], 'r')
dim = 1000
profile = {
'A': [0]*dim,
'C': [0]*dim,
'G': [0]*dim,
'T': [0]*dim
}
cur_seq = ''
line = fasta.readline()
while(line != ''):
if (line[0] == '>'):
if (cur_seq):
ctr = 0
for base in cur_seq:
profile[base][ctr] += 1
ctr += 1
cur_seq = ''
else:
cur_seq += line.rstrip()
line = fasta.readline()
ctr = 0
for base in cur_seq:
profile[base][ctr] += 1
ctr += 1
cur_seq = ''
consensus = ''
for ctr in range(dim):
m = max(profile['A'][ctr], profile['C'][ctr], profile['T'][ctr], profile['G'][ctr])
if (profile['A'][ctr] == profile['C'][ctr] == profile['T'][ctr] == profile['G'][ctr] == m):
consensus += ''
profile['A'][ctr] = profile['C'][ctr] = profile['T'][ctr] = profile['G'][ctr] = ''
elif profile['A'][ctr] == m: consensus += 'A'
elif profile['C'][ctr] == m: consensus += 'C'
elif profile['T'][ctr] == m: consensus += 'T'
elif profile['G'][ctr] == m: consensus += 'G'
out = open('out', 'w')
out.write(consensus)
out.write(pd.DataFrame(profile).transpose().to_string())
if __name__ == "__main__":
main()
|
mit
|
jmuhlich/image_explorer
|
image_explorer.py
|
1
|
3080
|
import glob
import warnings
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import scoreatpercentile
def load_field(fileglob='CK1_A01_1_*.tif'):
filenames = sorted(glob.glob(fileglob))
assert len(filenames) > 0, 'tiff files not found'
imlist = [Image.open(f) for f in filenames]
# field will be Width * Height * Channels
field = np.dstack([np.array(im) for im in imlist])
return field
def show_panels(field):
(h, w, d) = field.shape
panels = field.transpose(2,1,0).reshape(w*d,h).transpose(1,0)
plt.imshow(panels, vmin=0, vmax=2**16-1)
def show_pseudocolor(field):
assert field.shape[2] in (2, 3), 'field must have 2 or 3 channels'
# Pad 2-channel images with an empty 3rd channel.
if field.shape[2] == 2:
empty_image = np.zeros(field.shape[0:2], dtype=field.dtype)
field = np.dstack((field, empty_image))
plt.imshow(field/float(2**16), vmin=0.0, vmax=1.0)
plt.gca().xaxis.set_label_position('top')
def check_decimation(field, bits):
k = 2**bits;
for c in range(0, field.shape[2]):
print c, ':', np.histogram(field[:,:,c] & (k-1), k, (0,k))[0]
def build_histograms(field, num_bins):
channels = np.dsplit(field, field.shape[2])
bins = np.linspace(0, 2**16, num_bins+1)
hist = np.array([np.histogram(c, bins)[0] for c in channels]).T
return (bins, hist)
def show_histograms(field):
bins, hist = build_histograms(field, 2**8)
# +1 prevents taking log of 0, and only imperceptibly alters the result
log_hist = np.log(hist+1)
log_hist[log_hist == -np.inf] = 0
color_idx = list(enumerate(('red', 'green', 'blue', 'orange')))
for i, color in color_idx[0:field.shape[2]]:
plt.plot(bins[0:-1], log_hist[:,i], color=color)
plt.xlim(0, bins[-2])
def auto_contrast(field):
(h, w, d) = field.shape
# initial pre-scaling to help background calculation resolution
peak = scoreatpercentile(field.reshape(w*h, d), 99.9)
field_ac = field * (float(2**16-1) / peak) # do scaling to full-scale
# calculate background (bottom of dynamic range) and subtract it, shifting it to zero
bins, hist = build_histograms(field_ac, 2**8)
# take histogram peak as background (works for images seen so far...)
background = hist.argmax(0) * bins[1]
#### alternative background calculation using median values
###background = median(field.reshape(w*h, d), 0)
# do background subtraction
field_ac = field_ac - background
# calculate Nth percentile (top of dynamic range) and rescale it to the
# full-scale intensity value.
# must calculate percentile from new bg-corrected values
peak = scoreatpercentile(field_ac.reshape(w*h, d), 99.9)
# do scaling to full-scale
field_ac = field_ac * (float(2**16-1) / peak)
field_ac.clip(0, 2**16-1, field_ac)
return field_ac.astype('uint16')
def show_pc_hist(field):
plt.axes((.15, .2, .7, .7))
show_pseudocolor(field)
plt.axes((.1, .1, .8, .09))
show_histograms(field)
|
mit
|
zak-k/cartopy
|
lib/cartopy/tests/mpl/test_mpl_integration.py
|
1
|
19808
|
# (C) British Crown Copyright 2011 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
import math
import warnings
from nose.tools import assert_equal
try:
from nose.tools import assert_regex
except ImportError:
from nose.tools import assert_regexp_matches as assert_regex
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import six
import cartopy.crs as ccrs
from cartopy.tests.mpl import ImageTesting
_ROB_TOL = 0.5 if ccrs.PROJ4_VERSION < (4, 9) else 0.1
@ImageTesting(['global_contour_wrap'])
def test_global_contour_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data, transform=ccrs.PlateCarree())
@ImageTesting(['global_contour_wrap'])
def test_global_contour_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contour(x, y, data)
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data, transform=ccrs.PlateCarree())
@ImageTesting(['global_contourf_wrap'])
def test_global_contourf_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.contourf(x, y, data)
global_pcolor_wrap = ('global_pcolor_wrap'
if mpl.__version__ >= '1.5' else
'global_pcolor_wrap_pre_mpl_1.5')
@ImageTesting([global_pcolor_wrap])
def test_global_pcolor_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data, transform=ccrs.PlateCarree())
@ImageTesting([global_pcolor_wrap])
def test_global_pcolor_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines()
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.pcolor(x, y, data)
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_new_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
# By default the coastline feature will be drawn after patches.
# By setting zorder we can ensure our scatter points are drawn
# after the coastlines.
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data, transform=ccrs.PlateCarree())
@ImageTesting(['global_scatter_wrap'])
def test_global_scatter_wrap_no_transform():
ax = plt.axes(projection=ccrs.PlateCarree())
ax.coastlines(zorder=0)
x, y = np.meshgrid(np.linspace(0, 360), np.linspace(-90, 90))
data = np.sin(np.sqrt(x ** 2 + y ** 2))
plt.scatter(x, y, c=data)
@ImageTesting(['global_map'],
tolerance=16 if ccrs.PROJ4_VERSION < (4, 9) else 0.1)
def test_global_map():
ax = plt.axes(projection=ccrs.Robinson())
# ax.coastlines()
# ax.gridlines(5)
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
@ImageTesting(['simple_global'])
def test_simple_global():
plt.axes(projection=ccrs.PlateCarree())
plt.gca().coastlines()
# produces a global map, despite not having needed to set the limits
@ImageTesting(['multiple_projections1'])
def test_multiple_projections():
projections = [ccrs.PlateCarree(),
ccrs.Robinson(),
ccrs.RotatedPole(pole_latitude=45, pole_longitude=180),
ccrs.OSGB(),
ccrs.TransverseMercator(),
ccrs.Mercator(
globe=ccrs.Globe(semimajor_axis=math.degrees(1)),
min_latitude=-85., max_latitude=85.),
ccrs.LambertCylindrical(),
ccrs.Miller(),
ccrs.Gnomonic(),
ccrs.Stereographic(),
ccrs.NorthPolarStereo(),
ccrs.SouthPolarStereo(),
ccrs.Orthographic(),
ccrs.Mollweide(),
ccrs.InterruptedGoodeHomolosine(),
]
fig = plt.figure(figsize=(10, 10))
for i, prj in enumerate(projections, 1):
ax = fig.add_subplot(5, 5, i, projection=prj)
ax.set_global()
ax.coastlines()
plt.plot(-0.08, 51.53, 'o', transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='red',
transform=ccrs.PlateCarree())
plt.plot([-0.08, 132], [51.53, 43.17], color='blue',
transform=ccrs.Geodetic())
def test_cursor_values():
ax = plt.axes(projection=ccrs.NorthPolarStereo())
x, y = np.array([-969100.]), np.array([-4457000.])
r = ax.format_coord(x, y)
assert_equal(r.encode('ascii', 'ignore'),
six.b('-9.691e+05, -4.457e+06 (50.716617N, 12.267069W)'))
ax = plt.axes(projection=ccrs.PlateCarree())
x, y = np.array([-181.5]), np.array([50.])
r = ax.format_coord(x, y)
assert_equal(r.encode('ascii', 'ignore'),
six.b('-181.5, 50 (50.000000N, 178.500000E)'))
ax = plt.axes(projection=ccrs.Robinson())
x, y = np.array([16060595.2]), np.array([2363093.4])
r = ax.format_coord(x, y)
assert_regex(r.encode('ascii', 'ignore'),
six.b('1.606e\\+07, 2.363e\\+06 '
'\\(22.09[0-9]{4}N, 173.70[0-9]{4}E\\)'))
plt.close()
@ImageTesting(['natural_earth_interface'], tolerance=_ROB_TOL)
def test_axes_natural_earth_interface():
rob = ccrs.Robinson()
ax = plt.axes(projection=rob)
with warnings.catch_warnings(record=True) as all_warnings:
warnings.simplefilter('always')
ax.natural_earth_shp('rivers_lake_centerlines', edgecolor='black',
facecolor='none')
ax.natural_earth_shp('lakes', facecolor='blue')
assert_equal(len(all_warnings), 2)
for warning in all_warnings:
msg = str(warning.message)
assert 'deprecated' in msg
assert 'add_feature' in msg
@ImageTesting(['pcolormesh_global_wrap1'])
def test_pcolormesh_global_with_wrap1():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds = np.linspace(0, 360, nx, endpoint=True)
ybnds = np.linspace(-90, 90, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@ImageTesting(['pcolormesh_global_wrap2'])
def test_pcolormesh_global_with_wrap2():
# make up some realistic data with bounds (such as data from the UM)
nx, ny = 36, 18
xbnds, xstep = np.linspace(0, 360, nx - 1, retstep=True, endpoint=True)
ybnds, ystep = np.linspace(-90, 90, nx - 1, retstep=True, endpoint=True)
xbnds -= xstep / 2
ybnds -= ystep / 2
xbnds = np.append(xbnds, xbnds[-1] + xstep)
ybnds = np.append(ybnds, ybnds[-1] + ystep)
x, y = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(x)) + np.cos(np.deg2rad(y)))
data = data[:-1, :-1]
ax = plt.subplot(211, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(212, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@ImageTesting(['pcolormesh_global_wrap3'], tolerance=_ROB_TOL)
def test_pcolormesh_global_with_wrap3():
nx, ny = 33, 17
xbnds = np.linspace(-1.875, 358.125, nx, endpoint=True)
ybnds = np.linspace(91.25, -91.25, ny, endpoint=True)
xbnds, ybnds = np.meshgrid(xbnds, ybnds)
data = np.exp(np.sin(np.deg2rad(xbnds)) + np.cos(np.deg2rad(ybnds)))
# this step is not necessary, but makes the plot even harder to do (i.e.
# it really puts cartopy through its paces)
ybnds = np.append(ybnds, ybnds[:, 1:2], axis=1)
xbnds = np.append(xbnds, xbnds[:, 1:2] + 360, axis=1)
data = np.ma.concatenate([data, data[:, 0:1]], axis=1)
data = data[:-1, :-1]
data = np.ma.masked_greater(data, 2.6)
ax = plt.subplot(311, projection=ccrs.PlateCarree(-45))
c = plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
assert c._wrapped_collection_fix is not None, \
'No pcolormesh wrapping was done when it should have been.'
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(312, projection=ccrs.PlateCarree(-1.87499952))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
ax = plt.subplot(313, projection=ccrs.Robinson(-2))
plt.pcolormesh(xbnds, ybnds, data, transform=ccrs.PlateCarree())
ax.coastlines()
ax.set_global() # make sure everything is visible
@ImageTesting(['pcolormesh_limited_area_wrap'])
def test_pcolormesh_limited_area_wrap():
# make up some realistic data with bounds (such as data from the UM's North
# Atlantic Europe model)
nx, ny = 22, 36
xbnds = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
ybnds = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x, y = np.meshgrid(xbnds, ybnds)
data = ((np.sin(np.deg2rad(x))) / 10. + np.exp(np.cos(np.deg2rad(y))))
data = data[:-1, :-1]
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plt.figure(figsize=(10, 6))
ax = plt.subplot(221, projection=ccrs.PlateCarree())
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Set1')
ax.coastlines()
ax = plt.subplot(222, projection=ccrs.PlateCarree(180))
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Set1')
ax.coastlines()
ax.set_global()
# draw the same plot, only more zoomed in, and using the 2d versions
# of the coordinates (just to test that 1d and 2d are both suitably
# being fixed)
ax = plt.subplot(223, projection=ccrs.PlateCarree(180))
plt.pcolormesh(x, y, data, transform=rp, cmap='Set1')
ax.coastlines()
ax.set_extent([-70, 0, 0, 80])
ax = plt.subplot(224, projection=rp)
plt.pcolormesh(xbnds, ybnds, data, transform=rp, cmap='Set1')
ax.coastlines()
@ImageTesting(['pcolormesh_goode_wrap'])
def test_pcolormesh_goode_wrap():
# global data on an Interrupted Goode Homolosine projection
# shouldn't spill outside projection boundary
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.InterruptedGoodeHomolosine())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@ImageTesting(['pcolormesh_mercator_wrap'])
def test_pcolormesh_mercator_wrap():
x = np.linspace(0, 360, 73)
y = np.linspace(-87.5, 87.5, 36)
X, Y = np.meshgrid(*[np.deg2rad(c) for c in (x, y)])
Z = np.cos(Y) + 0.375 * np.sin(2. * X)
Z = Z[:-1, :-1]
ax = plt.axes(projection=ccrs.Mercator())
ax.coastlines()
ax.pcolormesh(x, y, Z, transform=ccrs.PlateCarree())
@ImageTesting(['quiver_plate_carree'])
def test_quiver_plate_carree():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree())
@ImageTesting(['quiver_rotated_pole'])
def test_quiver_rotated_pole():
nx, ny = 22, 36
x = np.linspace(311.91998291, 391.11999512, nx, endpoint=True)
y = np.linspace(-23.59000015, 24.81000137, ny, endpoint=True)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = -2. * np.cos(2. * np.deg2rad(y2d)) * np.sin(np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
rp = ccrs.RotatedPole(pole_longitude=177.5, pole_latitude=37.5)
plot_extent = [x[0], x[-1], y[0], y[-1]]
# plot on native projection
plt.figure(figsize=(6, 6))
ax = plt.subplot(211, projection=rp)
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag)
# plot on different projection
ax = plt.subplot(212, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=rp)
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=rp)
@ImageTesting(['quiver_regrid'])
def test_quiver_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=30)
@ImageTesting(['quiver_regrid_with_extent'])
def test_quiver_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.quiver(x, y, u, v, mag, transform=ccrs.PlateCarree(),
regrid_shape=10, target_extent=target_extent)
@ImageTesting(['barbs_plate_carree'])
def test_barbs():
x = np.arange(-60, 45, 5)
y = np.arange(30, 75, 5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 6))
# plot on native projection
ax = plt.subplot(211, projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, length=4, linewidth=.25)
# plot on a different projection
ax = plt.subplot(212, projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(), length=4, linewidth=.25)
@ImageTesting(['barbs_regrid'])
def test_barbs_regrid():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.4, regrid_shape=20)
@ImageTesting(['barbs_regrid_with_extent'])
def test_barbs_regrid_with_extent():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = 40 * np.cos(np.deg2rad(y2d))
v = 40 * np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
target_extent = [-3e6, 2e6, -6e6, -2.5e6]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, mag, transform=ccrs.PlateCarree(),
length=4, linewidth=.25, regrid_shape=10,
target_extent=target_extent)
@ImageTesting(['barbs_1d'])
def test_barbs_1d():
x = np.array([20., 30., -17., 15.])
y = np.array([-1., 35., 11., 40.])
u = np.array([23., -18., 2., -11.])
v = np.array([5., -4., 19., 11.])
plot_extent = [-21, 40, -5, 45]
plt.figure(figsize=(6, 5))
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(),
length=8, linewidth=1, color='#7f7f7f')
@ImageTesting(['barbs_1d_transformed'])
def test_barbs_1d_transformed():
x = np.array([20., 30., -17., 15.])
y = np.array([-1., 35., 11., 40.])
u = np.array([23., -18., 2., -11.])
v = np.array([5., -4., 19., 11.])
plot_extent = [-20, 31, -5, 45]
plt.figure(figsize=(6, 5))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.barbs(x, y, u, v, transform=ccrs.PlateCarree(),
length=8, linewidth=1, color='#7f7f7f')
@ImageTesting(['streamplot'
if mpl.__version__ >= '1.5' else
'streamplot_pre_mpl_1.5'])
def test_streamplot():
x = np.arange(-60, 42.5, 2.5)
y = np.arange(30, 72.5, 2.5)
x2d, y2d = np.meshgrid(x, y)
u = np.cos(np.deg2rad(y2d))
v = np.cos(2. * np.deg2rad(x2d))
mag = (u**2 + v**2)**.5
plot_extent = [-60, 40, 30, 70]
plt.figure(figsize=(6, 3))
ax = plt.axes(projection=ccrs.NorthPolarStereo())
ax.set_extent(plot_extent, crs=ccrs.PlateCarree())
ax.coastlines()
ax.streamplot(x, y, u, v, transform=ccrs.PlateCarree(),
density=(1.5, 2), color=mag, linewidth=2*mag)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
lgpl-3.0
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/core/panel.py
|
9
|
54607
|
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
import warnings
import numpy as np
import pandas.computation.expressions as expressions
import pandas.core.common as com
import pandas.core.ops as ops
from pandas import compat
from pandas import lib
from pandas.compat import (map, zip, range, u, OrderedDict,
OrderedDefaultdict)
from pandas.core.categorical import Categorical
from pandas.core.common import (PandasError, _try_sort, _default_index,
_infer_dtype_from_scalar, is_list_like)
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
from pandas.tools.util import cartesian_product
from pandas.util.decorators import (deprecate, Appender, deprecate_kwarg)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
klass="Panel",
axes_single_arg="{0, 1, 2, 'items', 'major_axis', 'minor_axis'}")
_shared_doc_kwargs['args_transpose'] = ("three positional arguments: each one"
"of\n %s" %
_shared_doc_kwargs['axes_single_arg'])
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=['time', 'panel']):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = range(1960,1963)
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(range(1960,1963), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
time, panels = _ensure_like_indices(time, panels)
time_factor = Categorical.from_array(time, ordered=True)
panel_factor = Categorical.from_array(panels, ordered=True)
labels = [time_factor.codes, panel_factor.codes]
levels = [time_factor.categories, panel_factor.categories]
return MultiIndex(levels, labels, sortorder=None, names=names,
verify_integrity=False)
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=0
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
@property
def _constructor(self):
return type(self)
_constructor_sliced = DataFrame
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(data=data, items=items, major_axis=major_axis,
minor_axis=minor_axis, copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
"""
Generate ND initialization; axes are passed
as required objects to __init__
"""
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
passed_axes = [kwargs.pop(a, None) for a in self._AXIS_ORDERS]
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
elif lib.isscalar(data) and all(x is not None for x in passed_axes):
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty([len(x) for x in passed_axes], dtype=dtype)
values.fill(data)
mgr = self._init_matrix(values, passed_axes, dtype=dtype, copy=False)
copy = False
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
def _init_dict(self, data, axes, dtype=None):
haxis = axes.pop(self._info_axis_number)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v) for k, v
in compat.iteritems(data) if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data, OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in compat.iteritems(data):
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i)
if a is None else a for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
dtype : dtype, default None
Data type to force, otherwise infer
Returns
-------
Panel
"""
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in compat.iteritems(data):
for item, s in compat.iteritems(df):
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('Orientation must be one of {items, minor}.')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'], OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis_name] = Index(ks)
return cls(**d)
def __getitem__(self, key):
if isinstance(self._info_axis, MultiIndex):
return self._getitem_multilevel(key)
if not (is_list_like(key) or isinstance(key, slice)):
return super(Panel, self).__getitem__(key)
return self.ix[key]
def _getitem_multilevel(self, key):
info = self._info_axis
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(
self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis_name] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([values], fixed_axes)
# ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in self._info_axis:
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only.
Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = u('Dimensions: %s') % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return u('%s axis: %s to %s') % (a.capitalize(),
com.pprint_thing(v[0]),
com.pprint_thing(v[-1]))
else:
return u('%s axis: None') % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def _get_plane_axes_index(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes indexes
"""
axis_name = self._get_axis_name(axis)
if axis_name == 'major_axis':
index = 'minor_axis'
columns = 'items'
if axis_name == 'minor_axis':
index = 'major_axis'
columns = 'items'
elif axis_name == 'items':
index = 'major_axis'
columns = 'minor_axis'
return index, columns
def _get_plane_axes(self, axis):
"""
Get my plane axes indexes: these are already
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
return [self._get_axis(axi) for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparsePanel
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparsePanel
frames = dict(compat.iteritems(self))
return SparsePanel(frames, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=kind,
default_fill_value=fill_value)
def to_excel(self, path, na_rep='', engine=None, **kwargs):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
path : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
Other Parameters
----------------
float_format : string, default None
Format string for floating point numbers
cols : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : upper left cell row to dump data frame
startcol : upper left cell column to dump data frame
Notes
-----
Keyword arguments (and na_rep) are passed to the ``to_excel`` method
for each DataFrame written.
"""
from pandas.io.excel import ExcelWriter
if isinstance(path, compat.string_types):
writer = ExcelWriter(path, engine=engine)
else:
writer = path
kwargs['na_rep'] = na_rep
for item, df in compat.iteritems(self):
name = str(item)
df.to_excel(writer, name, **kwargs)
writer.save()
def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
# ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
takeable : interpret the passed labels as indexers, default False
Returns
-------
value : scalar value
"""
nargs = len(args)
nreq = self._AXIS_LEN
# require an arg for each axis
if nargs != nreq:
raise TypeError('There must be an argument for each axis, you gave'
' {0} args, but {1} are required'.format(nargs,
nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
return lower.get_value(*args[1:], takeable=takeable)
def set_value(self, *args, **kwargs):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
takeable : interpret the passed labels as indexers, default False
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
nargs = len(args)
nreq = self._AXIS_LEN + 1
if nargs != nreq:
raise TypeError('There must be an argument for each axis plus the '
'value provided, you gave {0} args, but {1} are '
'required'.format(nargs, nreq))
takeable = kwargs.pop('takeable', None)
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
lower = self._iget_item_cache(args[0])
else:
lower = self._get_item_cache(args[0])
lower.set_value(*args[1:], takeable=takeable)
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(
axes[0], self._info_axis)
# how to make this logic simpler?
if made_bigger:
com._possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
if self.ndim == values.ndim:
result = self._constructor(values)
# a dup selection will yield a full ndim
if result._get_axis(0).is_unique:
result = result[key]
return result
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __setitem__(self, key, value):
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
if value.shape != shape[1:]:
raise ValueError(
'shape of value must be {0}, shape of given object was '
'{1}'.format(shape[1:], tuple(map(int, value.shape))))
mat = np.asarray(value)
elif np.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def head(self, n=5):
raise NotImplementedError
def tail(self, n=5):
raise NotImplementedError
def _needs_reindex_multi(self, axes, method, level):
""" don't allow a multi reindex on Panel or above ndim """
return False
def align(self, other, **kwargs):
raise NotImplementedError
def dropna(self, axis=0, how='any', inplace=False):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = com.notnull(values)
for ax in reversed(sorted(set(range(self._AXIS_LEN)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
result = self.reindex_axis(new_ax, axis=axis)
if inplace:
self._update_inplace(result)
else:
return result
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif np.isscalar(other):
return self._combine_const(other, func)
else:
raise NotImplementedError(str(type(other)) +
' is not supported in combine operation with ' +
str(type(self)))
def _combine_const(self, other, func):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items.union(other.items)
major = self.major_axis.union(other.major_axis)
minor = self.minor_axis.union(other.minor_axis)
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def major_xs(self, key, copy=None):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
copy : boolean [deprecated]
Whether to make a copy of the data
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
Notes
-----
major_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of major_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
return self.xs(key, axis=self._AXIS_LEN - 2)
def minor_xs(self, key, copy=None):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
copy : boolean [deprecated]
Whether to make a copy of the data
Returns
-------
y : DataFrame
index -> major axis, columns -> items
Notes
-----
minor_xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of minor_xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
return self.xs(key, axis=self._AXIS_LEN - 1)
def xs(self, key, axis=1, copy=None):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
copy : boolean [deprecated]
Whether to make a copy of the data
Returns
-------
y : ndim(self)-1
Notes
-----
xs is only for getting, not setting values.
MultiIndex Slicers is a generic way to get/set values on any level or levels
it is a superset of xs functionality, see :ref:`MultiIndex Slicers <advanced.mi_slicers>`
"""
if copy is not None:
warnings.warn("copy keyword is deprecated, "
"default is to return a copy or a view if possible")
axis = self._get_axis_number(axis)
if axis == 0:
return self[key]
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=False)
result = self._construct_return_type(new_data)
copy = new_data.is_mixed_type
result._set_is_copy(self, copy=copy)
return result
_xs = xs
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
ax = self._get_axis(axis)
key = ax[i]
# xs cannot handle a non-scalar key, so just reindex here
# if we have a multi-index and a single tuple, then its a reduction (GH 7516)
if not (isinstance(ax, MultiIndex) and isinstance(key, tuple)):
if is_list_like(key):
indexer = {self._get_axis_name(axis): key}
return self.reindex(**indexer)
# a reduction
if axis == 0:
values = self._data.iget(i)
return self._box_item_values(key, values)
# xs by position
self._consolidate_inplace()
new_data = self._data.xs(i, axis=axis, copy=True, takeable=True)
return self._construct_return_type(new_data)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame whose
columns are the Panel's items and whose index is a MultiIndex formed
of the Panel's major and minor axes.
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
# shaped like the return DataFrame
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
def construct_multi_parts(idx, n_repeat, n_shuffle=1):
axis_idx = idx.to_hierarchical(n_repeat, n_shuffle)
labels = [x[selector] for x in axis_idx.labels]
levels = axis_idx.levels
names = axis_idx.names
return labels, levels, names
def construct_index_parts(idx, major=True):
levels = [idx]
if major:
labels = [np.arange(N).repeat(K)[selector]]
names = idx.name or 'major'
else:
labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
labels = [labels.ravel()[selector]]
names = idx.name or 'minor'
names = [names]
return labels, levels, names
if isinstance(self.major_axis, MultiIndex):
major_labels, major_levels, major_names = construct_multi_parts(
self.major_axis, n_repeat=K)
else:
major_labels, major_levels, major_names = construct_index_parts(
self.major_axis)
if isinstance(self.minor_axis, MultiIndex):
minor_labels, minor_levels, minor_names = construct_multi_parts(
self.minor_axis, n_repeat=N, n_shuffle=K)
else:
minor_labels, minor_levels, minor_names = construct_index_parts(
self.minor_axis, major=False)
levels = major_levels + minor_levels
labels = major_labels + minor_labels
names = major_names + minor_names
index = MultiIndex(levels=levels, labels=labels,
names=names, verify_integrity=False)
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def apply(self, func, axis='major', **kwargs):
"""
Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
e.g. if axis = 'items', the combination of major_axis/minor_axis
will each be passed as a Series; if axis = ('items', 'major'), DataFrames
of items & major axis will be passed
axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
Returns a Panel with the square root of each element
>>> p = pd.Panel(np.random.rand(4,3,2))
>>> p.apply(np.sqrt)
Equivalent to p.sum(1), returning a DataFrame
>>> p.apply(lambda x: x.sum(), axis=1)
Equivalent to previous:
>>> p.apply(lambda x: x.sum(), axis='minor')
Return the shapes of each DataFrame over axis 2 (i.e the shapes of items x major), as a Series
>>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
f = lambda x: func(x, **kwargs)
else:
f = func
# 2d-slabs
if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
# try ufunc like
if isinstance(f, np.ufunc):
try:
result = np.apply_along_axis(func, axis, self.values)
return self._wrap_result(result, axis=axis)
except (AttributeError):
pass
# 1d
return self._apply_1d(f, axis=axis)
def _apply_1d(self, func, axis):
axis_name = self._get_axis_name(axis)
ax = self._get_axis(axis)
ndim = self.ndim
values = self.values
# iter thru the axes
slice_axis = self._get_axis(axis)
slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
points = cartesian_product(planes)
results = []
for i in range(np.prod(shape)):
# construct the object
pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
# increment the indexer
slice_indexer[-1] += 1
n = -1
while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
# empty object
if not len(results):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
if isinstance(results[0], Series):
arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
for a in axis:
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
results.append((e, obj))
return self._construct_return_type(dict(results))
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
if numeric_only:
raise NotImplementedError(
'Panel.{0} does not implement numeric_only.'.format(name))
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna, **kwds)
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
if isinstance(result, dict):
ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
ndim += 1
# scalar
if ndim == 0:
return Series(result)
# same as self
elif self.ndim == ndim:
""" return the construction dictionary for these axes """
if axes is None:
return self._constructor(result)
return self._constructor(result, **self._construct_axes_dict())
# sliced
elif self.ndim == ndim + 1:
if axes is None:
return self._constructor_sliced(result)
return self._constructor_sliced(
result, **self._extract_axes_for_slice(self, axes))
raise PandasError('invalid _construct_return_type [self->%s] '
'[result->%s]' % (self, result))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis_name:
result = result.T
return self._construct_return_type(result, axes)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None
else kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None
else kwargs.pop('minor', None))
return super(Panel, self).reindex(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, items=None, major_axis=None, minor_axis=None, **kwargs):
major_axis = (major_axis if major_axis is not None
else kwargs.pop('major', None))
minor_axis = (minor_axis if minor_axis is not None
else kwargs.pop('minor', None))
return super(Panel, self).rename(items=items, major_axis=major_axis,
minor_axis=minor_axis, **kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(Panel, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['transpose'] % _shared_doc_kwargs)
def transpose(self, *args, **kwargs):
return super(Panel, self).transpose(*args, **kwargs)
@Appender(_shared_docs['fillna'] % _shared_doc_kwargs)
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
return super(Panel, self).fillna(value=value, method=method,
axis=axis, inplace=inplace,
limit=limit, downcast=downcast,
**kwargs)
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
@deprecate_kwarg(old_arg_name='lags', new_arg_name='periods')
def shift(self, periods=1, freq=None, axis='major'):
"""
Shift index by desired number of periods with an optional time freq.
The shifted data will not include the dropped periods and the
shifted axis will be smaller than the original. This is different
from the behavior of DataFrame.shift()
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, optional
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
shifted : Panel
"""
if freq:
return self.tshift(periods, freq, axis=axis)
return super(Panel, self).slice_shift(periods, axis=axis)
def tshift(self, periods=1, freq=None, axis='major'):
return super(Panel, self).tshift(periods, freq, axis)
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis_name = self._info_axis_name
axis_values = self._info_axis
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs) for i, a
in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a)
for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN -
len(axes):], axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
if values.ndim != self._AXIS_LEN:
raise ValueError("The number of dimensions required is {0}, "
"but the number of dimensions of the "
"ndarray given was {1}".format(self._AXIS_LEN,
values.ndim))
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either
an intersection of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
result = dict()
# caller differs dict/ODict, presered type
if isinstance(frames, OrderedDict):
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in compat.iteritems(frames):
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in compat.iteritems(adj_frames):
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
axes_dict['dtype'] = dtype
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in data.values():
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls, use_numexpr=True):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %%s
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name, str_rep=None, default_axis=None,
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True,
**eval_kwargs)
except TypeError:
result = op(x, y)
# handles discrepancy between numpy and numexpr on division/mod
# by 0 though, given that these are generally (always?)
# non-scalars, I'm not sure whether it's worth it at the moment
result = com._fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' panel'
else:
equiv = 'panel ' + op_desc['op'] + ' other'
_op_doc = """
%%s of series and other, element-wise (binary operator `%%s`).
Equivalent to ``%%s``.
Parameters
----------
other : %s or %s""" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + """
See also
--------
""" + cls.__name__ + ".%s\n"
doc = _op_doc % (op_desc['desc'], op_name, equiv, op_desc['reverse'])
else:
doc = _agg_doc % name
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
f.__name__ = name
return f
# add `div`, `mul`, `pow`, etc..
ops.add_flex_arithmetic_methods(
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'],
info_axis=0,
stat_axis=1,
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
slicers={'major_axis': 'index',
'minor_axis': 'columns'})
ops.add_special_arithmetic_methods(Panel, **ops.panel_special_funcs)
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
# legacy
class WidePanel(Panel):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
super(LongPanel, self).__init__(*args, **kwargs)
|
artistic-2.0
|
liuguoyaolgy/Stock
|
m_draw.py
|
1
|
8068
|
import matplotlib.pyplot as plt
import pymysql
pymysql.install_as_MySQLdb()
import pandas as pd
import numpy as np
import talib as ta
from m_db import m_db2
import datetime
#import seaborn as sns
v_hasbuy_f = 0
v_code_f = 0
v_date_f = 0
v_ccibuy_f = 0
v_pregntbuy_f = 0
v_ccisale_f = 0
v_buyprice = 0
v_saleprice = 0
v_tot_zhenfu = 0
arr_shouyiarr = []
cash =0
df = ''
def pre_data(stick_code,ktype='D'):
# ktype in ('D','W','M')
global df
db = m_db2()
try:
if ktype == 'D':
df = db.get_data("select * from t_stick_data_d where code = '"+stick_code+"' and date > '2015-09-01';")#and date>'2015-05-01'
elif ktype == 'W':
df = db.get_data("select * from t_stick_data_w where code = '"+stick_code+"' ;")#and date>'2015-05-01'
elif ktype == 'M':
df = db.get_data("select * from t_stick_data_m where code = '" + stick_code + "' ;") # and date>'2015-05-01'
except Exception as e:
print('ERR:',e)
return
df['cci'] = ta.CCI(df['high'].values.astype('double'),df['low'].values.astype('double'),df['close'].values.astype('double'))
df['diff'],df['dea'],df['macd'] = ta.MACD(df['close'].values.astype('double'),fastperiod=12, slowperiod=26, signalperiod=9)
df['obv'] = ta.OBV(df['close'].values.astype('double'),df['vol'].values.astype('double'))
df['volma5']=ta.MA(df['vol'].values.astype('double'),5);
df['volma20'] = ta.MA(df['vol'].values.astype('double'), 20);
df['MA20'] = ta.MA(df['close'].values.astype('double'), 5)
#print(df)
#i= ta.CDLCONCEALBABYSWALL(df['open'].values.astype('double'),df['high'].values.astype('double'),
# df['low'].values.astype('double'),df['close'].values.astype('double'),)
#print(i)
return df
# draw
def control(df):
db = m_db2()
if df is None:
print('is none')
return
try:
lendf = len(df)
if lendf > 2 and float(df.loc[lendf-1]['vol'])>1.4*float(df.loc[lendf-2]['vol'])\
and float(df.loc[lendf - 2]['open']) > float(df.loc[lendf-1]['close']) \
and float(df.loc[lendf-1]['close']) > float(df.loc[lendf-1]['open']) \
and float(df.loc[lendf-1]['open']) > float(df.loc[lendf - 2]['close']):
print('can buy:',df.loc[lendf-1]['code'])
try:
print("insert ;;;;;;;;;;;;;;;;;;;;;;;;;;",df.loc[lendf-1]['date'],df.loc[lendf-1]['code'])
db.insert_can_buy_code(df.loc[lendf-1]['date'],df.loc[lendf-1]['code'],'1')
db.commit()
except Exception as e:
print('ERR:',e)
except Exception as e:
pre_data('err:',e)
return
def drawkline(df):
try:
lendf = len(df)
except Exception as e:
print('ERR:',e)
return
maxvol = df['vol'].max()
minprice = df['low'].min()
for i in range(lendf):
if i > 1 and float(df.loc[i]['vol'])>1.4*float(df.loc[i-1]['vol'])\
and float(df.loc[i-1]['open'])>float(df.loc[i]['close'])\
and float(df.loc[i]['close'])>float(df.loc[i]['open'])\
and float(df.loc[i]['open'])>float(df.loc[i-1]['close']):
v_pregntbuy_f = 1
saveimg(df.loc[i-1:i+24])
if i > 1 and float(df.loc[i]['cci']) < 100 and float(df.loc[i]['cci']) > 100:
v_ccisale_f = 1
trade(df.loc[i],trandtype='')
return
def get_canBuy_code(type):
db = m_db2()
if type == 'YES':
db.cur.execute("select code,outstanding from t_all_stickcode where outstanding < 50000 \
and (substr(code,1,1)='0' or substr(code,1,1)='6'); ")
else:
db.cur.execute("select code,outstanding from t_all_stickcode ;")
sqlrs = db.cur.fetchall()
for code in sqlrs:
# print('pre_data:',code[0])
df = pre_data(code[0],'W')
control(df)
return
def saveimg(df,dfw,date):
fig = plt.figure()
ax1 = plt.subplot(211) # 在图表2中创建子图1
ax2 = plt.subplot(212) # 在图表2中创建子图1
# ax1 = plt.subplot(511) # 在图表2中创建子图1
# ax2 = plt.subplot(512) # 在图表2中创建子图2
# ax3 = plt.subplot(513) # 在图表2中创建子图2
# ax4 = plt.subplot(514) # 在图表2中创建子图2
# ax5 = plt.subplot(515) # 在图表2中创建子图2
try:
lendf = len(df)
except Exception as e:
return
maxvol = df['vol'].max()
minprice = df['low'].min()
beginindex = df.index.values[0]
for i in range(lendf):
df.index.values[i] -= beginindex;
plt.sca(ax1)
for i in df.index.values:
if df.loc[i]['open']<=df.loc[i]['close']:
color = 'red'
else:
color = 'green'
plt.vlines(i, df.loc[i]['low'], df.loc[i]['high'], linewidth=1,colors=color)
plt.vlines(i, df.loc[i]['open'], df.loc[i]['close'], linewidth=5, colors=color)
plt.ylabel(df.loc[df.index.values[0]]['code'])
df = dfw
try:
lendf = len(df)
except Exception as e:
return
maxvol = df['vol'].max()
minprice = df['low'].min()
beginindex = df.index.values[0]
for i in range(lendf):
df.index.values[i] -= beginindex;
plt.sca(ax2)
for i in df.index.values:
if df.loc[i]['open']<=df.loc[i]['close']:
color = 'red'
else:
color = 'green'
plt.vlines(i, df.loc[i]['low'], df.loc[i]['high'], linewidth=1,colors=color)
plt.vlines(i, df.loc[i]['open'], df.loc[i]['close'], linewidth=5, colors=color)
plt.ylabel(df.loc[df.index.values[0]]['code'])
# plt.sca(ax2)
# #plt.plot(df.index, df['diff'])
# plt.plot(df.index, df['dea'])
# plt.plot(df.index, df['macd'])
# plt.hlines(0, 0, len(df) - 1)
# plt.ylabel('macd')
#
# plt.sca(ax3)
# #vol = (float(df.loc[i]['vol']) / float(maxvol)) * float(minprice) / 4
# plt.vlines(df.index, '0', df['vol'], linewidth=5, colors='black')
# plt.plot(df.index,df['volma5'])
# plt.plot(df.index, df['volma20'])
# plt.ylabel('vol')
#
# plt.sca(ax4)
# plt.plot(df.index,df['cci'])
# plt.hlines(100,0,len(df)-1)
# plt.hlines(-100, 0, len(df)-1)
# plt.ylabel('cci')
#
# plt.sca(ax5)
# plt.plot(df.index,df['obv'])
# plt.ylabel('obv')
plt.savefig('/Users/lgy/PycharmProjects/img/'+df.loc[df.index.values[0]]['code']+' '+df.loc[df.index.values[0]]['date']+date+'.png')
#plt.close()
#plt.show()
plt.close('all')
return
def trade(df,trandtype=''):
global v_hasbuy_f
global v_code_f
global v_date_f
global v_ccibuy_f
global v_pregntbuy_f
global v_ccisale_f
global v_buyprice
global v_saleprice
global v_tot_zhenfu
global arr_shouyiarr
global cash
#sale
if 1 == v_hasbuy_f and 1 == v_ccisale_f:
v_tot_zhenfu += (v_saleprice-v_buyprice)/v_buyprice
v_hasbuy_f=0
v_ccibuy_f=0
arr_shouyiarr.append([df['date'],df['close']])
#buy
if 0 == v_hasbuy_f and 1 == v_pregntbuy_f:
v_hasbuy_f=1
v_buyprice = df['close'].values
#arr_shouyiarr.append(v_tot_zhenfu)
return
def show_canbuy_code(date):
db = m_db2()
return db.get_can_buy_code(date)
# db = m_db2()
# allstick = db.get_all_stick()
# for code in allstick['code'].values:
# print('code:',code)
# df = pre_data(code)
# drawkline(df)
def drawDayWeek(code,date,duadays,ktype='D'):
# code=xxxx date=YYYY-MM-DD
db = m_db2()
beginday=datetime.datetime.strptime(date,'%Y-%m-%d')
endday=beginday + datetime.timedelta(days=duadays)
beforday=beginday - datetime.timedelta(days=9)
befordayW = beginday - datetime.timedelta(days=400)
df = db.pre_data(code,beginday=beforday.strftime('%Y-%m-%d'),endday=endday.strftime('%Y-%m-%d'),ktype=ktype)
dfw = db.pre_data(code,beginday=befordayW.strftime('%Y-%m-%d'),endday=beforday.strftime('%Y-%m-%d'),ktype='W')
saveimg(df,dfw,date)
return
# df = pre_data('600848')
# drawkline(df)
|
gpl-2.0
|
DonBeo/scikit-learn
|
sklearn/tree/tree.py
|
4
|
33063
|
"""
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <[email protected]>
# Peter Prettenhofer <[email protected]>
# Brian Holt <[email protected]>
# Noel Dawe <[email protected]>
# Satrajit Gosh <[email protected]>
# Joly Arnaud <[email protected]>
# Fares Hedayati <[email protected]>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError, check_is_fitted
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
for k in range(self.n_outputs_):
classes_k, y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_samples, n_features = X.shape
if self.tree_ is None:
raise NotFittedError("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "auto" or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The inferred value of max_features.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, 'n_outputs_')
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_samples, n_features = X.shape
if self.tree_ is None:
raise NotFittedError("Tree not initialized. Perform a fit first.")
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
tree_ : Tree object
The underlying Tree object.
max_features_ : int,
The inferred value of max_features.
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
bsd-3-clause
|
eg-zhang/scikit-learn
|
sklearn/linear_model/tests/test_base.py
|
101
|
12205
|
# Author: Alexandre Gramfort <[email protected]>
# Fabian Pedregosa <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.base import center_data, sparse_center_data, _rescale_data
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_greater
from sklearn.datasets.samples_generator import make_sparse_uncorrelated
from sklearn.datasets.samples_generator import make_regression
def test_linear_regression():
# Test LinearRegression on a simple dataset.
# a simple dataset
X = [[1], [2]]
Y = [1, 2]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [1, 2])
# test it also for degenerate input
X = [[1]]
Y = [0]
clf = LinearRegression()
clf.fit(X, Y)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.predict(X), [0])
def test_linear_regression_sample_weights():
rng = np.random.RandomState(0)
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1.0 + rng.rand(n_samples)
clf = LinearRegression()
clf.fit(X, y, sample_weight)
coefs1 = clf.coef_
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_greater(clf.score(X, y), 0.9)
assert_array_almost_equal(clf.predict(X), y)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
scaled_y = y * np.sqrt(sample_weight)
scaled_X = X * np.sqrt(sample_weight)[:, np.newaxis]
clf.fit(X, y)
coefs2 = clf.coef_
assert_array_almost_equal(coefs1, coefs2)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
clf = LinearRegression()
# make sure the "OK" sample weights actually work
clf.fit(X, y, sample_weights_OK)
clf.fit(X, y, sample_weights_OK_1)
clf.fit(X, y, sample_weights_OK_2)
def test_fit_intercept():
# Test assertions on betas shape.
X2 = np.array([[0.38349978, 0.61650022],
[0.58853682, 0.41146318]])
X3 = np.array([[0.27677969, 0.70693172, 0.01628859],
[0.08385139, 0.20692515, 0.70922346]])
y = np.array([1, 1])
lr2_without_intercept = LinearRegression(fit_intercept=False).fit(X2, y)
lr2_with_intercept = LinearRegression(fit_intercept=True).fit(X2, y)
lr3_without_intercept = LinearRegression(fit_intercept=False).fit(X3, y)
lr3_with_intercept = LinearRegression(fit_intercept=True).fit(X3, y)
assert_equal(lr2_with_intercept.coef_.shape,
lr2_without_intercept.coef_.shape)
assert_equal(lr3_with_intercept.coef_.shape,
lr3_without_intercept.coef_.shape)
assert_equal(lr2_without_intercept.coef_.ndim,
lr3_without_intercept.coef_.ndim)
def test_linear_regression_sparse(random_state=0):
"Test that linear regression also works with sparse data"
random_state = check_random_state(random_state)
for i in range(10):
n = 100
X = sparse.eye(n, n)
beta = random_state.rand(n)
y = X * beta[:, np.newaxis]
ols = LinearRegression()
ols.fit(X, y.ravel())
assert_array_almost_equal(beta, ols.coef_ + ols.intercept_)
assert_array_almost_equal(ols.residues_, 0)
def test_linear_regression_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions"
X, y = make_regression(random_state=random_state)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
clf = LinearRegression(fit_intercept=True)
clf.fit((X), Y)
assert_equal(clf.coef_.shape, (2, n_features))
Y_pred = clf.predict(X)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_linear_regression_sparse_multiple_outcome(random_state=0):
"Test multiple-outcome linear regressions with sparse data"
random_state = check_random_state(random_state)
X, y = make_sparse_uncorrelated(random_state=random_state)
X = sparse.coo_matrix(X)
Y = np.vstack((y, y)).T
n_features = X.shape[1]
ols = LinearRegression()
ols.fit(X, Y)
assert_equal(ols.coef_.shape, (2, n_features))
Y_pred = ols.predict(X)
ols.fit(X, y.ravel())
y_pred = ols.predict(X)
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T, Y_pred, decimal=3)
def test_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
expected_X_mean = np.mean(X, axis=0)
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(X, axis=0) * np.sqrt(X.shape[0])
expected_y_mean = np.mean(y, axis=0)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_center_data_multioutput():
n_samples = 200
n_features = 3
n_outputs = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_outputs)
expected_y_mean = np.mean(y, axis=0)
args = [(center_data, X), (sparse_center_data, sparse.csc_matrix(X))]
for center, X in args:
_, yt, _, y_mean, _ = center(X, y, fit_intercept=False,
normalize=False)
assert_array_almost_equal(y_mean, np.zeros(n_outputs))
assert_array_almost_equal(yt, y)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=False)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
_, yt, _, y_mean, _ = center(X, y, fit_intercept=True,
normalize=True)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(yt, y - y_mean)
def test_center_data_weighted():
n_samples = 200
n_features = 2
rng = check_random_state(0)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
sample_weight = rng.rand(n_samples)
expected_X_mean = np.average(X, axis=0, weights=sample_weight)
expected_y_mean = np.average(y, axis=0, weights=sample_weight)
# XXX: if normalize=True, should we expect a weighted standard deviation?
# Currently not weighted, but calculated with respect to weighted mean
# XXX: currently scaled to variance=n_samples
expected_X_std = (np.sqrt(X.shape[0]) *
np.mean((X - expected_X_mean) ** 2, axis=0) ** .5)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=False,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt, X - expected_X_mean)
assert_array_almost_equal(yt, y - expected_y_mean)
Xt, yt, X_mean, y_mean, X_std = center_data(X, y, fit_intercept=True,
normalize=True,
sample_weight=sample_weight)
assert_array_almost_equal(X_mean, expected_X_mean)
assert_array_almost_equal(y_mean, expected_y_mean)
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt, (X - expected_X_mean) / expected_X_std)
assert_array_almost_equal(yt, y - expected_y_mean)
def test_sparse_center_data():
n_samples = 200
n_features = 2
rng = check_random_state(0)
# random_state not supported yet in sparse.rand
X = sparse.rand(n_samples, n_features, density=.5) # , random_state=rng
X = X.tolil()
y = rng.rand(n_samples)
XA = X.toarray()
# XXX: currently scaled to variance=n_samples
expected_X_std = np.std(XA, axis=0) * np.sqrt(X.shape[0])
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=False,
normalize=False)
assert_array_almost_equal(X_mean, np.zeros(n_features))
assert_array_almost_equal(y_mean, 0)
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y)
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=False)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, np.ones(n_features))
assert_array_almost_equal(Xt.A, XA)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
Xt, yt, X_mean, y_mean, X_std = sparse_center_data(X, y,
fit_intercept=True,
normalize=True)
assert_array_almost_equal(X_mean, np.mean(XA, axis=0))
assert_array_almost_equal(y_mean, np.mean(y, axis=0))
assert_array_almost_equal(X_std, expected_X_std)
assert_array_almost_equal(Xt.A, XA / expected_X_std)
assert_array_almost_equal(yt, y - np.mean(y, axis=0))
def test_csr_sparse_center_data():
# Test output format of sparse_center_data, when input is csr
X, y = make_regression()
X[X < 2.5] = 0.0
csr = sparse.csr_matrix(X)
csr_, y, _, _, _ = sparse_center_data(csr, y, True)
assert_equal(csr_.getformat(), 'csr')
def test_rescale_data():
n_samples = 200
n_features = 2
rng = np.random.RandomState(0)
sample_weight = 1.0 + rng.rand(n_samples)
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples)
rescaled_X, rescaled_y = _rescale_data(X, y, sample_weight)
rescaled_X2 = X * np.sqrt(sample_weight)[:, np.newaxis]
rescaled_y2 = y * np.sqrt(sample_weight)
assert_array_almost_equal(rescaled_X, rescaled_X2)
assert_array_almost_equal(rescaled_y, rescaled_y2)
|
bsd-3-clause
|
mne-tools/mne-tools.github.io
|
0.19/_downloads/0c99a413293e0e0befc94b3f0775629b/plot_modifying_data_inplace.py
|
4
|
2788
|
"""
.. _tut_modifying_data_inplace:
Modifying data in-place
=======================
It is often necessary to modify data once you have loaded it into memory.
Common examples of this are signal processing, feature extraction, and data
cleaning. Some functionality is pre-built into MNE-python, though it is also
possible to apply an arbitrary function to the data.
"""
import mne
import os.path as op
import numpy as np
from matplotlib import pyplot as plt
###############################################################################
# Load an example dataset, the preload flag loads the data into memory now
data_path = op.join(mne.datasets.sample.data_path(), 'MEG',
'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(data_path, preload=True)
raw = raw.crop(0, 10)
print(raw)
###############################################################################
# Signal processing
# -----------------
#
# Most MNE objects have in-built methods for filtering:
filt_bands = [(1, 3), (3, 10), (10, 20), (20, 60)]
_, (ax, ax2) = plt.subplots(2, 1, figsize=(15, 10))
data, times = raw[0]
_ = ax.plot(data[0])
for fmin, fmax in filt_bands:
raw_filt = raw.copy()
raw_filt.filter(fmin, fmax, fir_design='firwin')
_ = ax2.plot(raw_filt[0][0][0])
ax2.legend(filt_bands)
ax.set_title('Raw data')
ax2.set_title('Band-pass filtered data')
###############################################################################
# In addition, there are functions for applying the Hilbert transform, which is
# useful to calculate phase / amplitude of your signal.
# Filter signal with a fairly steep filter, then take hilbert transform
raw_band = raw.copy()
raw_band.filter(12, 18, l_trans_bandwidth=2., h_trans_bandwidth=2.,
fir_design='firwin')
raw_hilb = raw_band.copy()
hilb_picks = mne.pick_types(raw_band.info, meg=False, eeg=True)
raw_hilb.apply_hilbert(hilb_picks)
print(raw_hilb[0][0].dtype)
###############################################################################
# Finally, it is possible to apply arbitrary functions to your data to do
# what you want. Here we will use this to take the amplitude and phase of
# the hilbert transformed data.
#
# .. note:: You can also use ``amplitude=True`` in the call to
# :meth:`mne.io.Raw.apply_hilbert` to do this automatically.
#
# Take the amplitude and phase
raw_amp = raw_hilb.copy()
raw_amp.apply_function(np.abs, hilb_picks)
raw_phase = raw_hilb.copy()
raw_phase.apply_function(np.angle, hilb_picks)
_, (a1, a2) = plt.subplots(2, 1, figsize=(15, 10))
a1.plot(raw_band[hilb_picks[0]][0][0].real)
a1.plot(raw_amp[hilb_picks[0]][0][0].real)
a2.plot(raw_phase[hilb_picks[0]][0][0].real)
a1.set_title('Amplitude of frequency band')
a2.set_title('Phase of frequency band')
|
bsd-3-clause
|
MaartenGr/BERTopic
|
bertopic/plotting/_heatmap.py
|
1
|
4174
|
import numpy as np
from typing import List
from scipy.cluster.hierarchy import fcluster, linkage
from sklearn.metrics.pairwise import cosine_similarity
import plotly.express as px
import plotly.graph_objects as go
def visualize_heatmap(topic_model,
topics: List[int] = None,
top_n_topics: int = None,
n_clusters: int = None,
width: int = 800,
height: int = 800) -> go.Figure:
""" Visualize a heatmap of the topic's similarity matrix
Based on the cosine similarity matrix between topic embeddings,
a heatmap is created showing the similarity between topics.
Arguments:
topic_model: A fitted BERTopic instance.
topics: A selection of topics to visualize.
top_n_topics: Only select the top n most frequent topics.
n_clusters: Create n clusters and order the similarity
matrix by those clusters.
width: The width of the figure.
height: The height of the figure.
Returns:
fig: A plotly figure
Usage:
To visualize the similarity matrix of
topics simply run:
```python
topic_model.visualize_heatmap()
```
Or if you want to save the resulting figure:
```python
fig = topic_model.visualize_heatmap()
fig.write_html("path/to/file.html")
```
<iframe src="../../tutorial/visualization/heatmap.html"
style="width:1000px; height: 720px; border: 0px;""></iframe>
"""
# Select topic embeddings
if topic_model.topic_embeddings is not None:
embeddings = np.array(topic_model.topic_embeddings)
else:
embeddings = topic_model.c_tf_idf
# Select topics based on top_n and topics args
if topics is not None:
topics = list(topics)
elif top_n_topics is not None:
topics = sorted(topic_model.get_topic_freq().Topic.to_list()[1:top_n_topics + 1])
else:
topics = sorted(list(topic_model.get_topics().keys()))
# Order heatmap by similar clusters of topics
if n_clusters:
if n_clusters >= len(set(topics)):
raise ValueError("Make sure to set `n_clusters` lower than "
"the total number of unique topics.")
embeddings = embeddings[[topic + 1 for topic in topics]]
distance_matrix = cosine_similarity(embeddings)
Z = linkage(distance_matrix, 'ward')
clusters = fcluster(Z, t=n_clusters, criterion='maxclust')
# Extract new order of topics
mapping = {cluster: [] for cluster in clusters}
for topic, cluster in zip(topics, clusters):
mapping[cluster].append(topic)
mapping = [cluster for cluster in mapping.values()]
sorted_topics = [topic for cluster in mapping for topic in cluster]
else:
sorted_topics = topics
# Select embeddings
indices = np.array([topics.index(topic) for topic in sorted_topics])
embeddings = embeddings[indices]
distance_matrix = cosine_similarity(embeddings)
# Create nicer labels
new_labels = [[[str(topic), None]] + topic_model.get_topic(topic) for topic in sorted_topics]
new_labels = ["_".join([label[0] for label in labels[:4]]) for labels in new_labels]
new_labels = [label if len(label) < 30 else label[:27] + "..." for label in new_labels]
fig = px.imshow(distance_matrix,
labels=dict(color="Similarity Score"),
x=new_labels,
y=new_labels,
color_continuous_scale='GnBu'
)
fig.update_layout(
title={
'text': "<b>Similarity Matrix",
'y': .95,
'x': 0.55,
'xanchor': 'center',
'yanchor': 'top',
'font': dict(
size=22,
color="Black")
},
width=width,
height=height,
hoverlabel=dict(
bgcolor="white",
font_size=16,
font_family="Rockwell"
),
)
fig.update_layout(showlegend=True)
fig.update_layout(legend_title_text='Trend')
return fig
|
mit
|
dsullivan7/scikit-learn
|
examples/cluster/plot_cluster_comparison.py
|
246
|
4684
|
"""
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
|
bsd-3-clause
|
dilanfd/fremont_bridge_bike_analysis
|
module_code/data.py
|
1
|
1055
|
"""This modules helps download data."""
import pandas as pd
import os
from urllib.request import urlretrieve
FREMONT_URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
# We will need to reset and run all all kernals all the time.
# if we set the notebook as is it will create download the potentially large
# data set fremont.csv everytime it runns. So we want to make sure we only
# download it it is not already available. For that we create a function.
def get_fremont_data(filename = 'fremont.csv',
url=FREMONT_URL, force_download=False):
""" download and cache fremont bike data set"""
if force_download or not os.path.exists(filename):
urlretrieve(url, filename)
df = pd.read_csv('fremont.csv', index_col='Date')
df.index = pd.to_datetime(df.index)
df.columns = ['West', 'East']
df['Total'] = df['West'] + df['East']
return df
|
gpl-3.0
|
Rathcke/uni
|
ml/exam/src/q1.py
|
2
|
4966
|
import numpy as np
from numpy.linalg import *
import matplotlib.pyplot as plt
import pylab as P
from sklearn.preprocessing import normalize
### Load data ###
def loadData(fname):
data = []
with open(fname, 'r') as f:
for l in f:
data.append(l.split(","))
f.close
return np.array(data, float)
data = loadData('ML2015TrafficSignsTrain.csv')
classes = data[:, 1568]
data = data[:, 0:1568]
### Normalize data ###
meanVec = np.mean(data, axis=0)
varVec = np.var(data, axis=0)
for i in range(len(data)):
data[i] = data[i] - meanVec
data[i] = data[i] / np.sqrt(varVec)
### Histogram ###
def makeFreqHistogram(size, classes):
P.figure()
n, bins, patches = P.hist(classes, size, normed=1, histtype='bar',
color=['red'],
label=['Traffic sign frequency'])
P.legend()
P.show()
return
makeFreqHistogram(43, classes)
### PCA ###
def pca(data, classes):
data = data[:, 0:1568]
meanV = np.mean(data, axis=0)
covM = np.cov(data, rowvar=0)
eigVal, eigVec = np.linalg.eig(covM)
eigVal = np.real(eigVal)
eigVec = np.real(eigVec)
eigP = []
for i in range(len(eigVal)):
eigP.append((np.abs(eigVal[i]), eigVec[:,i]))
matW = np.hstack((eigP[0][1].reshape(1568, 1), eigP[1][1].reshape(1568,1)))
newData = np.dot(np.transpose(matW), np.transpose(data))
return (eigVal, newData)
### Eigenspectrum plot ###
(eigVal, newData) = pca(data, classes)
eigVal.sort()
plt.yscale('log')
plt.plot(np.arange(1,1569), eigVal[::-1], 'k')
plt.title('Eigenspectrum')
plt.show()
### Number of principal components to explain 90% of variance ###
var90 = np.sum(eigVal)*0.9
b = 0.0
i = 0
for a in eigVal[::-1]:
i += 1
b += a
if (b > var90):
print "Number of principal components to explain 90% of the variance:"
print i
break
### Scatter plot ###
for i in range(1275):
c = classes[i]
if (c == 0 or c == 1 or c == 2 or c == 3 or c == 4 or c == 5 or c == 6 or
c == 7 or c == 8 or c == 9 or c == 10 or c == 15 or c == 16 or
c == 17 or c == 32 or c == 33 or c == 34 or c == 35 or c == 36 or
c == 37 or c == 38 or c == 39 or c == 40 or c == 41 or c == 42):
plt.plot(newData[0, i], newData[1, i], 'ro')
elif (c == 11 or c == 18 or c == 19 or c == 20 or c == 21 or c == 22 or
c == 23 or c == 24 or c == 25 or c == 26 or c == 27 or
c == 28 or c == 29 or c == 30 or c == 31):
plt.plot(newData[0, i], newData[1, i], 'b^')
elif (c == 12):
plt.plot(newData[0, i], newData[1, i], 'gD')
elif (c == 13):
plt.plot(newData[0, i], newData[1, i], 'kv')
else:
plt.plot(newData[0, i], newData[1, i], 'y8')
plt.xlabel('x-values')
plt.ylabel('y-values')
plt.title('Result from PCA')
plt.show()
### K-means Clustering ###
np.random.seed(1)
def kmeans(k, data, classes):
entries = data.shape[0]
centroids = [None] * k
for i in range(k):
rand = np.random.random_integers(0, entries-1)
centroids[i] = data[rand]
centroids = np.array(centroids, float)
dists = np.zeros((entries, k))
labels = np.zeros((entries, 1))
oldCentroids = np.zeros(centroids.shape)
while (np.linalg.norm(centroids - oldCentroids) > 0.0001):
oldCentroids = np.copy(centroids)
for i in range(k):
for j in range(entries):
dists[j, i] = np.linalg.norm(centroids[i] - data[j])
for i in range(entries):
entryDists = dists[i ,:]
labels[i] = np.argmin(entryDists)
for i in range(k):
centroids[i] = np.mean(data[labels[:, 0] == i], axis=0)
return centroids
cents = kmeans(4, data, classes)
modData = np.r_[data, cents]
modClasses = np.append(classes, [99, 99, 99, 99])
### Scatter plot with centroids ###
(eigVal, newData) = pca(modData, modClasses)
for i in range(1279):
c = modClasses[i]
if (c == 0 or c == 1 or c == 2 or c == 3 or c == 4 or c == 5 or c == 6 or
c == 7 or c == 8 or c == 9 or c == 10 or c == 15 or c == 16 or
c == 17 or c == 32 or c == 33 or c == 34 or c == 35 or c == 36 or
c == 37 or c == 38 or c == 39 or c == 40 or c == 41 or c == 42):
plt.plot(newData[0, i], newData[1, i], 'ro')
elif (c == 11 or c == 18 or c == 19 or c == 20 or c == 21 or c == 22 or
c == 23 or c == 24 or c == 25 or c == 26 or c == 27 or
c == 28 or c == 29 or c == 30 or c == 31):
plt.plot(newData[0, i], newData[1, i], 'b^')
elif (c == 12):
plt.plot(newData[0, i], newData[1, i], 'gD')
elif (c == 13):
plt.plot(newData[0, i], newData[1, i], 'kv')
elif (c == 99):
plt.plot(newData[0, i], newData[1, i], 'm*', ms=20)
else:
plt.plot(newData[0, i], newData[1, i], 'y8')
plt.xlabel('x-values')
plt.ylabel('y-values')
plt.title('Result from 4-means clustering')
plt.show()
|
gpl-3.0
|
dsm054/pandas
|
pandas/tests/indexing/test_coercion.py
|
1
|
35754
|
# -*- coding: utf-8 -*-
import itertools
import numpy as np
import pytest
import pandas.compat as compat
import pandas as pd
import pandas.util.testing as tm
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
@pytest.fixture(autouse=True, scope='class')
def check_comprehensiveness(request):
# Iterate over combination of dtype, method and klass
# and ensure that each are contained within a collected test
cls = request.cls
combos = itertools.product(cls.klasses, cls.dtypes, [cls.method])
def has_test(combo):
klass, dtype, method = combo
cls_funcs = request.node.session.items
return any(klass in x.name and dtype in x.name and
method in x.name for x in cls_funcs)
for combo in combos:
if not has_test(combo):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(cls), combo))
yield
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
assert left.dtype == dtype
assert right.dtype == dtype
class TestSetitemCoercion(CoercionBase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
assert temp.dtype == expected_dtype
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
@pytest.mark.parametrize("val,exp_dtype", [
(1, np.object),
(1.1, np.object),
(1 + 1j, np.object),
(True, np.object)])
def test_setitem_series_object(self, val, exp_dtype):
obj = pd.Series(list('abcd'))
assert obj.dtype == np.object
exp = pd.Series(['a', val, 'c', 'd'])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(1, np.int64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object)])
def test_setitem_series_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.dtype == np.int64
if exp_dtype is np.float64:
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
pytest.xfail("GH12747 The result must be float")
exp = pd.Series([1, val, 3, 4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(np.int32(1), np.int8),
(np.int16(2**9), np.int16)])
def test_setitem_series_int8(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], dtype=np.int8)
assert obj.dtype == np.int8
if exp_dtype is np.int16:
exp = pd.Series([1, 0, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, np.int8)
pytest.xfail("BUG: it must be Series([1, 1, 3, 4], dtype=np.int16")
exp = pd.Series([1, val, 3, 4], dtype=np.int8)
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(1, np.float64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object)])
def test_setitem_series_float64(self, val, exp_dtype):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
exp = pd.Series([1.1, val, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, np.object)])
def test_setitem_series_complex128(self, val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, val, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(1, np.int64),
(3, np.int64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.bool)])
def test_setitem_series_bool(self, val, exp_dtype):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool
if exp_dtype is np.int64:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool)
pytest.xfail("TODO_GH12747 The result must be int")
elif exp_dtype is np.float64:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool)
pytest.xfail("TODO_GH12747 The result must be float")
elif exp_dtype is np.complex128:
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, val, exp, np.bool)
pytest.xfail("TODO_GH12747 The result must be complex")
exp = pd.Series([True, val, True, False])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(pd.Timestamp('2012-01-01'), 'datetime64[ns]'),
(1, np.object),
('x', np.object)])
def test_setitem_series_datetime64(self, val, exp_dtype):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
assert obj.dtype == 'datetime64[ns]'
exp = pd.Series([pd.Timestamp('2011-01-01'),
val,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(pd.Timestamp('2012-01-01', tz='US/Eastern'),
'datetime64[ns, US/Eastern]'),
(pd.Timestamp('2012-01-01', tz='US/Pacific'), np.object),
(pd.Timestamp('2012-01-01'), np.object),
(1, np.object)])
def test_setitem_series_datetime64tz(self, val, exp_dtype):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
assert obj.dtype == 'datetime64[ns, US/Eastern]'
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
val,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(pd.Timedelta('12 day'), 'timedelta64[ns]'),
(1, np.object),
('x', np.object)])
def test_setitem_series_timedelta64(self, val, exp_dtype):
obj = pd.Series([pd.Timedelta('1 day'),
pd.Timedelta('2 day'),
pd.Timedelta('3 day'),
pd.Timedelta('4 day')])
assert obj.dtype == 'timedelta64[ns]'
exp = pd.Series([pd.Timedelta('1 day'),
val,
pd.Timedelta('3 day'),
pd.Timedelta('4 day')])
self._assert_setitem_series_conversion(obj, val, exp, exp_dtype)
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
assert temp.index.dtype == expected_dtype
@pytest.mark.parametrize("val,exp_dtype", [
('x', np.object),
(5, IndexError),
(1.1, np.object)])
def test_setitem_index_object(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
assert obj.index.dtype == np.object
if exp_dtype is IndexError:
temp = obj.copy()
with pytest.raises(exp_dtype):
temp[5] = 5
else:
exp_index = pd.Index(list('abcd') + [val])
self._assert_setitem_index_conversion(obj, val, exp_index,
exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(5, np.int64),
(1.1, np.float64),
('x', np.object)])
def test_setitem_index_int64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4])
assert obj.index.dtype == np.int64
exp_index = pd.Index([0, 1, 2, 3, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
@pytest.mark.parametrize("val,exp_dtype", [
(5, IndexError),
(5.1, np.float64),
('x', np.object)])
def test_setitem_index_float64(self, val, exp_dtype):
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
assert obj.index.dtype == np.float64
if exp_dtype is IndexError:
# float + int -> int
temp = obj.copy()
with pytest.raises(exp_dtype):
temp[5] = 5
pytest.xfail("TODO_GH12747 The result must be float")
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, val])
self._assert_setitem_index_conversion(obj, val, exp_index, exp_dtype)
def test_setitem_series_period(self):
pass
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
assert res.dtype == expected_dtype
@pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [
(1, 1, np.object),
(1.1, 1.1, np.object),
(False, False, np.object),
('x', 'x', np.object)])
def test_insert_index_object(self, insert, coerced_val, coerced_dtype):
obj = pd.Index(list('abcd'))
assert obj.dtype == np.object
exp = pd.Index(['a', coerced_val, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [
(1, 1, np.int64),
(1.1, 1.1, np.float64),
(False, 0, np.int64),
('x', 'x', np.object)])
def test_insert_index_int64(self, insert, coerced_val, coerced_dtype):
obj = pd.Int64Index([1, 2, 3, 4])
assert obj.dtype == np.int64
exp = pd.Index([1, coerced_val, 2, 3, 4])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [
(1, 1., np.float64),
(1.1, 1.1, np.float64),
(False, 0., np.float64),
('x', 'x', np.object)])
def test_insert_index_float64(self, insert, coerced_val, coerced_dtype):
obj = pd.Float64Index([1., 2., 3., 4.])
assert obj.dtype == np.float64
exp = pd.Index([1., coerced_val, 2., 3., 4.])
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
@pytest.mark.parametrize('fill_val,exp_dtype', [
(pd.Timestamp('2012-01-01'), 'datetime64[ns]'),
(pd.Timestamp('2012-01-01', tz='US/Eastern'),
'datetime64[ns, US/Eastern]')],
ids=['datetime64', 'datetime64tz'])
def test_insert_index_datetimes(self, fill_val, exp_dtype):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz=fill_val.tz)
assert obj.dtype == exp_dtype
exp = pd.DatetimeIndex(['2011-01-01', fill_val.date(), '2011-01-02',
'2011-01-03', '2011-01-04'], tz=fill_val.tz)
self._assert_insert_conversion(obj, fill_val, exp, exp_dtype)
msg = "Passed item and index have different timezone"
if fill_val.tz:
with pytest.raises(ValueError, match=msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
with pytest.raises(ValueError, match=msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
msg = "cannot insert DatetimeIndex with incompatible label"
with pytest.raises(TypeError, match=msg):
obj.insert(1, 1)
pytest.xfail("ToDo: must coerce to object")
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
assert obj.dtype == 'timedelta64[ns]'
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with pytest.raises(TypeError, match=msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with pytest.raises(TypeError, match=msg):
obj.insert(1, 1)
@pytest.mark.parametrize("insert, coerced_val, coerced_dtype", [
(pd.Period('2012-01', freq='M'), '2012-01', 'period[M]'),
(pd.Timestamp('2012-01-01'), pd.Timestamp('2012-01-01'), np.object),
(1, 1, np.object),
('x', 'x', np.object)])
def test_insert_index_period(self, insert, coerced_val, coerced_dtype):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
assert obj.dtype == 'period[M]'
if isinstance(insert, pd.Period):
index_type = pd.PeriodIndex
else:
index_type = pd.Index
exp = index_type([pd.Period('2011-01', freq='M'),
coerced_val,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, insert, exp, coerced_dtype)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
class TestWhereCoercion(CoercionBase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index],
ids=['series', 'index'])
@pytest.mark.parametrize("fill_val,exp_dtype", [
(1, np.object),
(1.1, np.object),
(1 + 1j, np.object),
(True, np.object)])
def test_where_object(self, klass, fill_val, exp_dtype):
obj = klass(list('abcd'))
assert obj.dtype == np.object
cond = klass([True, False, True, False])
if fill_val is True and klass is pd.Series:
ret_val = 1
else:
ret_val = fill_val
exp = klass(['a', ret_val, 'c', ret_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(fill_val * x for x in [5, 6, 7, 8])
exp = klass(['a', values[1], 'c', values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index],
ids=['series', 'index'])
@pytest.mark.parametrize("fill_val,exp_dtype", [
(1, np.int64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object)])
def test_where_int64(self, klass, fill_val, exp_dtype):
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1, 2, 3, 4])
assert obj.dtype == np.int64
cond = klass([True, False, True, False])
exp = klass([1, fill_val, 3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1, values[1], 3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index],
ids=['series', 'index'])
@pytest.mark.parametrize("fill_val, exp_dtype", [
(1, np.float64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object)])
def test_where_float64(self, klass, fill_val, exp_dtype):
if klass is pd.Index and exp_dtype is np.complex128:
pytest.skip("Complex Index not supported")
obj = klass([1.1, 2.2, 3.3, 4.4])
assert obj.dtype == np.float64
cond = klass([True, False, True, False])
exp = klass([1.1, fill_val, 3.3, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = klass([True, False, True, True])
else:
values = klass(x * fill_val for x in [5, 6, 7, 8])
exp = klass([1.1, values[1], 3.3, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize("fill_val,exp_dtype", [
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, np.object)])
def test_where_series_complex128(self, fill_val, exp_dtype):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
cond = pd.Series([True, False, True, False])
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = pd.Series([True, False, True, True])
else:
values = pd.Series(x * fill_val for x in [5, 6, 7, 8])
exp = pd.Series([1 + 1j, values[1], 3 + 3j, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize("fill_val,exp_dtype", [
(1, np.object),
(1.1, np.object),
(1 + 1j, np.object),
(True, np.bool)])
def test_where_series_bool(self, fill_val, exp_dtype):
obj = pd.Series([True, False, True, False])
assert obj.dtype == np.bool
cond = pd.Series([True, False, True, False])
exp = pd.Series([True, fill_val, True, fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
if fill_val is True:
values = pd.Series([True, False, True, True])
else:
values = pd.Series(x * fill_val for x in [5, 6, 7, 8])
exp = pd.Series([True, values[1], True, values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.parametrize("fill_val,exp_dtype", [
(pd.Timestamp('2012-01-01'), 'datetime64[ns]'),
(pd.Timestamp('2012-01-01', tz='US/Eastern'), np.object)],
ids=['datetime64', 'datetime64tz'])
def test_where_series_datetime64(self, fill_val, exp_dtype):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
assert obj.dtype == 'datetime64[ns]'
cond = pd.Series([True, False, True, False])
exp = pd.Series([pd.Timestamp('2011-01-01'), fill_val,
pd.Timestamp('2011-01-03'), fill_val])
self._assert_where_conversion(obj, cond, fill_val, exp, exp_dtype)
values = pd.Series(pd.date_range(fill_val, periods=4))
if fill_val.tz:
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 00:00', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 00:00',
tz='US/Eastern')])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
exp = pd.Series([pd.Timestamp('2011-01-01'), values[1],
pd.Timestamp('2011-01-03'), values[3]])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
def test_where_index_datetime(self):
fill_val = pd.Timestamp('2012-01-01')
exp_dtype = 'datetime64[ns]'
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
assert obj.dtype == 'datetime64[ns]'
cond = pd.Index([True, False, True, False])
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
@pytest.mark.xfail(
reason="GH 22839: do not ignore timezone, must be object")
def test_where_index_datetimetz(self):
fill_val = pd.Timestamp('2012-01-01', tz='US/Eastern')
exp_dtype = np.object
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
assert obj.dtype == 'datetime64[ns]'
cond = pd.Index([True, False, True, False])
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with pytest.raises(TypeError, match=msg):
obj.where(cond, fill_val)
values = pd.Index(pd.date_range(fill_val, periods=4))
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04', tz='US/Eastern')],
dtype=exp_dtype)
self._assert_where_conversion(obj, cond, values, exp, exp_dtype)
def test_where_index_complex128(self):
pass
def test_where_index_bool(self):
pass
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase):
# not indexing, but place here for consisntency
method = 'fillna'
def test_has_comprehensive_tests(self):
pass
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index],
ids=['series', 'index'])
@pytest.mark.parametrize("fill_val, fill_dtype", [
(1, np.object),
(1.1, np.object),
(1 + 1j, np.object),
(True, np.object)])
def test_fillna_object(self, klass, fill_val, fill_dtype):
obj = klass(['a', np.nan, 'c', 'd'])
assert obj.dtype == np.object
exp = klass(['a', fill_val, 'c', 'd'])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index],
ids=['series', 'index'])
@pytest.mark.parametrize("fill_val,fill_dtype", [
(1, np.float64),
(1.1, np.float64),
(1 + 1j, np.complex128),
(True, np.object)])
def test_fillna_float64(self, klass, fill_val, fill_dtype):
obj = klass([1.1, np.nan, 3.3, 4.4])
assert obj.dtype == np.float64
exp = klass([1.1, fill_val, 3.3, 4.4])
# float + complex -> we don't support a complex Index
# complex for Series,
# object for Index
if fill_dtype == np.complex128 and klass == pd.Index:
fill_dtype = np.object
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize("fill_val,fill_dtype", [
(1, np.complex128),
(1.1, np.complex128),
(1 + 1j, np.complex128),
(True, np.object)])
def test_fillna_series_complex128(self, fill_val, fill_dtype):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
assert obj.dtype == np.complex128
exp = pd.Series([1 + 1j, fill_val, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index],
ids=['series', 'index'])
@pytest.mark.parametrize("fill_val,fill_dtype", [
(pd.Timestamp('2012-01-01'), 'datetime64[ns]'),
(pd.Timestamp('2012-01-01', tz='US/Eastern'), np.object),
(1, np.object), ('x', np.object)],
ids=['datetime64', 'datetime64tz', 'object', 'object'])
def test_fillna_datetime(self, klass, fill_val, fill_dtype):
obj = klass([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
assert obj.dtype == 'datetime64[ns]'
exp = klass([pd.Timestamp('2011-01-01'),
fill_val,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
@pytest.mark.parametrize("klass", [pd.Series, pd.Index])
@pytest.mark.parametrize("fill_val,fill_dtype", [
(pd.Timestamp('2012-01-01', tz='US/Eastern'),
'datetime64[ns, US/Eastern]'),
(pd.Timestamp('2012-01-01'), np.object),
(pd.Timestamp('2012-01-01', tz='Asia/Tokyo'), np.object),
(1, np.object),
('x', np.object)])
def test_fillna_datetime64tz(self, klass, fill_val, fill_dtype):
tz = 'US/Eastern'
obj = klass([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
assert obj.dtype == 'datetime64[ns, US/Eastern]'
exp = klass([pd.Timestamp('2011-01-01', tz=tz),
fill_val,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_fillna_conversion(obj, fill_val, exp, fill_dtype)
def test_fillna_series_int64(self):
pass
def test_fillna_index_int64(self):
pass
def test_fillna_series_bool(self):
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_timedelta64(self):
pass
def test_fillna_series_period(self):
pass
def test_fillna_index_timedelta64(self):
pass
def test_fillna_index_period(self):
pass
class TestReplaceSeriesCoercion(CoercionBase):
klasses = ['series']
method = 'replace'
rep = {}
rep['object'] = ['a', 'b']
rep['int64'] = [4, 5]
rep['float64'] = [1.1, 2.2]
rep['complex128'] = [1 + 1j, 2 + 2j]
rep['bool'] = [True, False]
rep['datetime64[ns]'] = [pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-03')]
for tz in ['UTC', 'US/Eastern']:
# to test tz => different tz replacement
key = 'datetime64[ns, {0}]'.format(tz)
rep[key] = [pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz)]
rep['timedelta64[ns]'] = [pd.Timedelta('1 day'),
pd.Timedelta('2 day')]
@pytest.mark.parametrize('how', ['dict', 'series'])
@pytest.mark.parametrize('to_key', [
'object', 'int64', 'float64', 'complex128', 'bool', 'datetime64[ns]',
'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]', 'timedelta64[ns]'
], ids=['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'datetime64tz', 'timedelta64'])
@pytest.mark.parametrize('from_key', [
'object', 'int64', 'float64', 'complex128', 'bool', 'datetime64[ns]',
'datetime64[ns, UTC]', 'datetime64[ns, US/Eastern]', 'timedelta64[ns]']
)
def test_replace_series(self, how, to_key, from_key):
if from_key == 'bool' and how == 'series' and compat.PY3:
# doesn't work in PY3, though ...dict_from_bool works fine
pytest.skip("doesn't work as in PY3")
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
assert obj.dtype == from_key
if (from_key.startswith('datetime') and to_key.startswith('datetime')):
# tested below
return
elif from_key in ['datetime64[ns, US/Eastern]', 'datetime64[ns, UTC]']:
# tested below
return
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
if ((from_key == 'float64' and to_key in ('int64')) or
(from_key == 'complex128' and
to_key in ('int64', 'float64'))):
if compat.is_platform_32bit() or compat.is_platform_windows():
pytest.skip("32-bit platform buggy: {0} -> {1}".format
(from_key, to_key))
# Expected: do not downcast by replacement
exp = pd.Series(self.rep[to_key], index=index,
name='yyy', dtype=from_key)
else:
exp = pd.Series(self.rep[to_key], index=index, name='yyy')
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
# TODO(jbrockmendel) commented out to only have a single xfail printed
@pytest.mark.xfail(reason='GH #18376, tzawareness-compat bug '
'in BlockManager.replace_list')
# @pytest.mark.parametrize('how', ['dict', 'series'])
# @pytest.mark.parametrize('to_key', ['timedelta64[ns]', 'bool', 'object',
# 'complex128', 'float64', 'int64'])
# @pytest.mark.parametrize('from_key', ['datetime64[ns, UTC]',
# 'datetime64[ns, US/Eastern]'])
# def test_replace_series_datetime_tz(self, how, to_key, from_key):
def test_replace_series_datetime_tz(self):
how = 'series'
from_key = 'datetime64[ns, US/Eastern]'
to_key = 'timedelta64[ns]'
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
assert obj.dtype == from_key
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name='yyy')
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
# TODO(jreback) commented out to only have a single xfail printed
@pytest.mark.xfail(reason="different tz, "
"currently mask_missing raises SystemError")
# @pytest.mark.parametrize('how', ['dict', 'series'])
# @pytest.mark.parametrize('to_key', [
# 'datetime64[ns]', 'datetime64[ns, UTC]',
# 'datetime64[ns, US/Eastern]'])
# @pytest.mark.parametrize('from_key', [
# 'datetime64[ns]', 'datetime64[ns, UTC]',
# 'datetime64[ns, US/Eastern]'])
# def test_replace_series_datetime_datetime(self, how, to_key, from_key):
def test_replace_series_datetime_datetime(self):
how = 'dict'
to_key = 'datetime64[ns]'
from_key = 'datetime64[ns]'
index = pd.Index([3, 4], name='xxx')
obj = pd.Series(self.rep[from_key], index=index, name='yyy')
assert obj.dtype == from_key
if how == 'dict':
replacer = dict(zip(self.rep[from_key], self.rep[to_key]))
elif how == 'series':
replacer = pd.Series(self.rep[to_key], index=self.rep[from_key])
else:
raise ValueError
result = obj.replace(replacer)
exp = pd.Series(self.rep[to_key], index=index, name='yyy')
assert exp.dtype == to_key
tm.assert_series_equal(result, exp)
def test_replace_series_period(self):
pass
|
bsd-3-clause
|
TaxIPP-Life/Til
|
til/pgm/run_pension.py
|
2
|
1952
|
# -*- coding: utf-8 -*-
import pandas as pd
import sys
import datetime as dt
from dateutil.relativedelta import relativedelta
from numpy import array, around
import time
from til_pension.pension_data import PensionData
from til_pension.pension_legislation import PensionParam, PensionLegislation
from til_pension.simulation import PensionSimulation
from utils import output_til_to_liam
def get_pension(context, yearleg):
''' return a PensionSimulation '''
sali = context['longitudinal']['sali']
workstate = context['longitudinal']['workstate']
# calcul de la date de naissance au bon format
datesim = context['period']
datesim_in_month = 12*(datesim // 100) + datesim % 100
datenaiss_in_month = datesim_in_month - context['agem']
naiss = 100*(datenaiss_in_month // 12) + datenaiss_in_month % 12 + 1
naiss = pd.Series(naiss)
naiss = pd.Series(naiss).map(lambda t: dt.date(t // 100, t % 100, 1))
info_ind = pd.DataFrame({'index':context['id'], 'agem': context['agem'],'naiss': naiss, 'sexe' : context['sexe'],
'nb_enf_all': context['nb_enf'], 'nb_pac': context['nb_pac'], 'nb_enf_RG': context['nb_enf_RG'],
'nb_enf_RSI': context['nb_enf_RSI'], 'nb_enf_FP': context['nb_enf_FP'], 'tauxprime': context['tauxprime']})
info_ind = info_ind.to_records(index=False)
workstate = workstate.loc[workstate['id'].isin(info_ind.index), :].copy()
workstate.set_index('id', inplace=True)
workstate.sort_index(inplace=True)
sali = sali.loc[sali['id'].isin(info_ind.index), :].copy()
sali.set_index('id', inplace=True)
sali.sort_index(inplace=True)
sali.fillna(0, inplace=True)
data = PensionData.from_arrays(workstate, sali, info_ind)
param = PensionParam(yearleg, data)
legislation = PensionLegislation(param)
simulation = PensionSimulation(data, legislation)
simulation.set_config()
return simulation
|
gpl-3.0
|
mjescobar/RF_Estimation
|
STA/helpers/molido/sta_chain_datos0003.py
|
2
|
4119
|
#============================================================
# SPIKE TRIGGERED AVERAGE (STA) ALGORITHM IN CHAIN
# Do STA for a list of Unit Cells.
# AASTUDILLO 2014
#============================================================
#============================================================
# Package import section:
#============================================================
# import matplotlib # Graph and plot library
# matplotlib.use("Agg") # for save images without show it in windows (for server use)
# import pylab as pl
# import matplotlib.cm as cm # plot lib
# import matplotlib.pyplot as plt # plot lib (for figures)
# import mpl_toolkits.mplot3d.axes3d as p3d # 3D Plot lib
# from matplotlib.pyplot import * # plot lib python
# from matplotlib.ticker import NullFormatter # ticker formatter
#-----------------------------------------------------------
# Methods, Signal processing, etc:
#-----------------------------------------------------------
import scipy # numerical methods lib (like Matlab functions)
import scipy.io # input output lib (for save matlab matrix)
import scipy.signal as signal # signal processing lib
import numpy as np # numerical methods lib
import sys # system lib
import os # operative system lib
import random # Random number methods
import time # System timer options
#import scipy.misc as scim # scientific python package for image basic process
import glob # package for get file names from files in a folder
# import matplotlib.pyplot as plt
#from pylab import * # laboratory and plot methods lib
#from scipy import misc
#from PIL import Image, ImageChops
from sta_functions import *
import gc # garbage collector
#=============================================
# GET SPIKE TIME FILE NAMES
#=============================================
archivosruta = 'D:/Experimentos_CINV/datos_20-11-2013_bga50um_2/'
#
archivosfolder = 'TS_datos0003/'
archivofiltro = '*.txt'
globstring = archivosruta + archivosfolder +'/'+ archivofiltro
archivofilenames = glob.glob(globstring) # get file names from folder
archivofilenames.sort()
print "\t length filenames: ", len(archivofilenames)
print "\t last filenames : ",archivofilenames[len(archivofilenames)-2]
getimagenames = 0
openimagesandwrite = 0
calculatemeanrf = 0
tipoalgoritmo = 2
# =============================================
# SET OTHERS OPTIONS
# =============================================
# FOLDER NAME TO SAVE EACH FOLDER RESULTS
stafolder = 'STA_datos0003'
# FOLDER NAME TO LOAD STIMULUS ENSEMBLE: IMAGE STIMULUS FOLDER
imageruta = 'D:/'
imagefolder = 'checkImages'
imagefiltro = '*.png'
# SPIKE TIME STAMPS FOLDER FOR LOAD SPIKE TRAINS
timefolder = archivosfolder #'TS_datos0003_2/'
# SET THE ADQUISITION SAMPLING RATE OF THE RECORDS
samplingRate = 20000 # Hz
# SET THE NUMBER OF FRAMES BEFORE AND AFTER A SPIKE TO ANALIZE:
# number of frames previous to each spike for STA windows
numberframes = 13
# number of frames posterior to each spike for STA windows
numberframespost = 5
# SET THE NAME OF THE STIMULUS SYNCHRONY ANALYSIS FILE
# IT CONTAINS THE INITIAL AND FINAL TIME STAMPS OF EACH FRAME
synchronyfile = 'inicio_fin_frame_datos0003.txt'
#inicio_fin_frame = np.loadtxt(synchronyfile)
# SET THE SIZE OF EACH FRAME IN PIXELS
sizex = 380 #500 #750
sizey = 380 #500 #700
# set if do logarithm analysis for plot:
dolog = 0
#timestampName = archivofilenames[118]
c = 1
inicio = 536+10+10+10+10+10+10+10+10+10+10+10+10+10+10
final = inicio + 10
for unitname in archivofilenames[inicio:final]:
timestampName1 = os.path.basename(unitname)
timestampName = os.path.splitext(timestampName1)[0]
print 'Analyzing Unit ',timestampName, ' loop :', c ,' unit n ', c + inicio
sta_each( getimagenames,openimagesandwrite,calculatemeanrf,tipoalgoritmo,timestampName ,stafolder ,imageruta ,imagefolder ,imagefiltro ,timefolder ,samplingRate ,numberframes ,numberframespost , synchronyfile ,sizex ,sizey, dolog)
c = c +1
gc.collect()
|
gpl-2.0
|
cbertinato/pandas
|
pandas/tests/test_errors.py
|
1
|
1759
|
import pytest
from pandas.errors import AbstractMethodError
import pandas as pd # noqa
@pytest.mark.parametrize(
"exc", ['UnsupportedFunctionCall', 'UnsortedIndexError',
'OutOfBoundsDatetime',
'ParserError', 'PerformanceWarning', 'DtypeWarning',
'EmptyDataError', 'ParserWarning', 'MergeError'])
def test_exception_importable(exc):
from pandas import errors
e = getattr(errors, exc)
assert e is not None
# check that we can raise on them
with pytest.raises(e):
raise e()
def test_catch_oob():
from pandas import errors
try:
pd.Timestamp('15000101')
except errors.OutOfBoundsDatetime:
pass
def test_error_rename():
# see gh-12665
from pandas.errors import ParserError
from pandas.io.common import CParserError
try:
raise CParserError()
except ParserError:
pass
try:
raise ParserError()
except CParserError:
pass
class Foo:
@classmethod
def classmethod(cls):
raise AbstractMethodError(cls, methodtype='classmethod')
@property
def property(self):
raise AbstractMethodError(self, methodtype='property')
def method(self):
raise AbstractMethodError(self)
def test_AbstractMethodError_classmethod():
xpr = "This classmethod must be defined in the concrete class Foo"
with pytest.raises(AbstractMethodError, match=xpr):
Foo.classmethod()
xpr = "This property must be defined in the concrete class Foo"
with pytest.raises(AbstractMethodError, match=xpr):
Foo().property
xpr = "This method must be defined in the concrete class Foo"
with pytest.raises(AbstractMethodError, match=xpr):
Foo().method()
|
bsd-3-clause
|
yanlend/scikit-learn
|
examples/semi_supervised/plot_label_propagation_digits.py
|
268
|
2723
|
"""
===================================================
Label Propagation digits: Demonstrating performance
===================================================
This example demonstrates the power of semisupervised learning by
training a Label Spreading model to classify handwritten digits
with sets of very few labels.
The handwritten digit dataset has 1797 total points. The model will
be trained using all points, but only 30 will be labeled. Results
in the form of a confusion matrix and a series of metrics over each
class will be very good.
At the end, the top 10 most uncertain predictions will be shown.
"""
print(__doc__)
# Authors: Clay Woolam <[email protected]>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn import datasets
from sklearn.semi_supervised import label_propagation
from sklearn.metrics import confusion_matrix, classification_report
digits = datasets.load_digits()
rng = np.random.RandomState(0)
indices = np.arange(len(digits.data))
rng.shuffle(indices)
X = digits.data[indices[:330]]
y = digits.target[indices[:330]]
images = digits.images[indices[:330]]
n_total_samples = len(y)
n_labeled_points = 30
indices = np.arange(n_total_samples)
unlabeled_set = indices[n_labeled_points:]
# shuffle everything around
y_train = np.copy(y)
y_train[unlabeled_set] = -1
###############################################################################
# Learn with LabelSpreading
lp_model = label_propagation.LabelSpreading(gamma=0.25, max_iter=5)
lp_model.fit(X, y_train)
predicted_labels = lp_model.transduction_[unlabeled_set]
true_labels = y[unlabeled_set]
cm = confusion_matrix(true_labels, predicted_labels, labels=lp_model.classes_)
print("Label Spreading model: %d labeled & %d unlabeled points (%d total)" %
(n_labeled_points, n_total_samples - n_labeled_points, n_total_samples))
print(classification_report(true_labels, predicted_labels))
print("Confusion matrix")
print(cm)
# calculate uncertainty values for each transduced distribution
pred_entropies = stats.distributions.entropy(lp_model.label_distributions_.T)
# pick the top 10 most uncertain labels
uncertainty_index = np.argsort(pred_entropies)[-10:]
###############################################################################
# plot
f = plt.figure(figsize=(7, 5))
for index, image_index in enumerate(uncertainty_index):
image = images[image_index]
sub = f.add_subplot(2, 5, index + 1)
sub.imshow(image, cmap=plt.cm.gray_r)
plt.xticks([])
plt.yticks([])
sub.set_title('predict: %i\ntrue: %i' % (
lp_model.transduction_[image_index], y[image_index]))
f.suptitle('Learning with small amount of labeled data')
plt.show()
|
bsd-3-clause
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/tests/io/parser/test_textreader.py
|
2
|
10638
|
"""
Tests the TextReader class in parsers.pyx, which
is integral to the C engine in parsers.py
"""
from io import BytesIO, StringIO
import os
import numpy as np
from numpy import nan
import pytest
import pandas._libs.parsers as parser
from pandas._libs.parsers import TextReader
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.util.testing import assert_frame_equal
from pandas.io.parsers import TextFileReader, read_csv
class TestTextReader:
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.dirpath = datapath("io", "parser", "data")
self.csv1 = os.path.join(self.dirpath, "test1.csv")
self.csv2 = os.path.join(self.dirpath, "test2.csv")
self.xls1 = os.path.join(self.dirpath, "test.xls")
def test_file_handle(self):
with open(self.csv1, "rb") as f:
reader = TextReader(f)
reader.read()
def test_string_filename(self):
reader = TextReader(self.csv1, header=None)
reader.read()
def test_file_handle_mmap(self):
with open(self.csv1, "rb") as f:
reader = TextReader(f, memory_map=True, header=None)
reader.read()
def test_StringIO(self):
with open(self.csv1, "rb") as f:
text = f.read()
src = BytesIO(text)
reader = TextReader(src, header=None)
reader.read()
def test_string_factorize(self):
# should this be optional?
data = "a\nb\na\nb\na"
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert len(set(map(id, result[0]))) == 2
def test_skipinitialspace(self):
data = "a, b\na, b\na, b\na, b"
reader = TextReader(StringIO(data), skipinitialspace=True, header=None)
result = reader.read()
tm.assert_numpy_array_equal(
result[0], np.array(["a", "a", "a", "a"], dtype=np.object_)
)
tm.assert_numpy_array_equal(
result[1], np.array(["b", "b", "b", "b"], dtype=np.object_)
)
def test_parse_booleans(self):
data = "True\nFalse\nTrue\nTrue"
reader = TextReader(StringIO(data), header=None)
result = reader.read()
assert result[0].dtype == np.bool_
def test_delimit_whitespace(self):
data = 'a b\na\t\t "b"\n"a"\t \t b'
reader = TextReader(StringIO(data), delim_whitespace=True, header=None)
result = reader.read()
tm.assert_numpy_array_equal(
result[0], np.array(["a", "a", "a"], dtype=np.object_)
)
tm.assert_numpy_array_equal(
result[1], np.array(["b", "b", "b"], dtype=np.object_)
)
def test_embedded_newline(self):
data = 'a\n"hello\nthere"\nthis'
reader = TextReader(StringIO(data), header=None)
result = reader.read()
expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_)
tm.assert_numpy_array_equal(result[0], expected)
def test_euro_decimal(self):
data = "12345,67\n345,678"
reader = TextReader(StringIO(data), delimiter=":", decimal=",", header=None)
result = reader.read()
expected = np.array([12345.67, 345.678])
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands(self):
data = "123,456\n12,500"
reader = TextReader(StringIO(data), delimiter=":", thousands=",", header=None)
result = reader.read()
expected = np.array([123456, 12500], dtype=np.int64)
tm.assert_almost_equal(result[0], expected)
def test_integer_thousands_alt(self):
data = "123.456\n12.500"
reader = TextFileReader(
StringIO(data), delimiter=":", thousands=".", header=None
)
result = reader.read()
expected = DataFrame([123456, 12500])
tm.assert_frame_equal(result, expected)
def test_skip_bad_lines(self, capsys):
# too many lines, see #2430 for why
data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r"
reader = TextReader(StringIO(data), delimiter=":", header=None)
msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4"
with pytest.raises(parser.ParserError, match=msg):
reader.read()
reader = TextReader(
StringIO(data),
delimiter=":",
header=None,
error_bad_lines=False,
warn_bad_lines=False,
)
result = reader.read()
expected = {
0: np.array(["a", "d", "g", "l"], dtype=object),
1: np.array(["b", "e", "h", "m"], dtype=object),
2: np.array(["c", "f", "i", "n"], dtype=object),
}
assert_array_dicts_equal(result, expected)
reader = TextReader(
StringIO(data),
delimiter=":",
header=None,
error_bad_lines=False,
warn_bad_lines=True,
)
reader.read()
captured = capsys.readouterr()
assert "Skipping line 4" in captured.err
assert "Skipping line 6" in captured.err
def test_header_not_enough_lines(self):
data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6"
reader = TextReader(StringIO(data), delimiter=",", header=2)
header = reader.header
expected = [["a", "b", "c"]]
assert header == expected
recs = reader.read()
expected = {
0: np.array([1, 4], dtype=np.int64),
1: np.array([2, 5], dtype=np.int64),
2: np.array([3, 6], dtype=np.int64),
}
assert_array_dicts_equal(recs, expected)
def test_escapechar(self):
data = '\\"hello world"\n' '\\"hello world"\n' '\\"hello world"'
reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\")
result = reader.read()
expected = {0: np.array(['"hello world"'] * 3, dtype=object)}
assert_array_dicts_equal(result, expected)
def test_eof_has_eol(self):
# handling of new line at EOF
pass
def test_na_substitution(self):
pass
def test_numpy_string_dtype(self):
data = """\
a,1
aa,2
aaa,3
aaaa,4
aaaaa,5"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", header=None, **kwds)
reader = _make_reader(dtype="S5,i4")
result = reader.read()
assert result[0].dtype == "S5"
ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5")
assert (result[0] == ex_values).all()
assert result[1].dtype == "i4"
reader = _make_reader(dtype="S4")
result = reader.read()
assert result[0].dtype == "S4"
ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4")
assert (result[0] == ex_values).all()
assert result[1].dtype == "S4"
def test_pass_dtype(self):
data = """\
one,two
1,a
2,b
3,c
4,d"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", **kwds)
reader = _make_reader(dtype={"one": "u1", 1: "S1"})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "S1"
reader = _make_reader(dtype={"one": np.uint8, 1: object})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "O"
reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")})
result = reader.read()
assert result[0].dtype == "u1"
assert result[1].dtype == "O"
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
def _make_reader(**kwds):
return TextReader(StringIO(data), delimiter=",", **kwds)
reader = _make_reader(usecols=(1, 2))
result = reader.read()
exp = _make_reader().read()
assert len(result) == 2
assert (result[1] == exp[1]).all()
assert (result[2] == exp[2]).all()
def test_cr_delimited(self):
def _test(text, **kwargs):
nice_text = text.replace("\r", "\r\n")
result = TextReader(StringIO(text), **kwargs).read()
expected = TextReader(StringIO(nice_text), **kwargs).read()
assert_array_dicts_equal(result, expected)
data = "a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12"
_test(data, delimiter=",")
data = "a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12"
_test(data, delim_whitespace=True)
data = "a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12"
_test(data, delimiter=",")
sample = (
"A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r"
"AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r"
",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0"
)
_test(sample, delimiter=",")
data = "A B C\r 2 3\r4 5 6"
_test(data, delim_whitespace=True)
data = "A B C\r2 3\r4 5 6"
_test(data, delim_whitespace=True)
def test_empty_field_eof(self):
data = "a,b,c\n1,2,3\n4,,"
result = TextReader(StringIO(data), delimiter=",").read()
expected = {
0: np.array([1, 4], dtype=np.int64),
1: np.array(["2", ""], dtype=object),
2: np.array(["3", ""], dtype=object),
}
assert_array_dicts_equal(result, expected)
# GH5664
a = DataFrame([["b"], [nan]], columns=["a"], index=["a", "c"])
b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1])
c = DataFrame(
[[1, 2, 3, 4], [6, nan, nan, nan], [8, 9, 10, 11], [13, 14, nan, nan]],
columns=list("abcd"),
index=[0, 5, 7, 12],
)
for _ in range(100):
df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c")
assert_frame_equal(df, a)
df = read_csv(
StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c"
)
assert_frame_equal(df, b)
df = read_csv(
StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"),
names=list("abcd"),
engine="c",
)
assert_frame_equal(df, c)
def test_empty_csv_input(self):
# GH14867
df = read_csv(StringIO(), chunksize=20, header=None, names=["a", "b", "c"])
assert isinstance(df, TextFileReader)
def assert_array_dicts_equal(left, right):
for k, v in left.items():
tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))
|
apache-2.0
|
UNH-CORE/turbine-test-bed
|
drag_right/run.py
|
1
|
5330
|
#!/usr/bin/env python
"""
This module will run a calibration on the drag slides from 0 to 500 lbf.
"""
from __future__ import division, print_function
import pandas as pd
import numpy as np
from pxl import timeseries as ts
import time
import daqmx
import os
import sys
import json
import scipy.stats
import matplotlib.pyplot as plt
if sys.version_info[0] == 2:
input = raw_input
side = "right"
test_dur = 30.0 # Seconds
sample_rate = 2000 # Hz
max_force = 500.0 # lbf
min_force = 0.0
steps_ascending = 10
steps_descending = 10
device = "cDAQ9188-16D66BBMod3"
phys_chan = "ai2"
plot = True
def create_dataframe(direction):
df = pd.DataFrame()
if direction == "ascending":
df["nominal_force"] = np.linspace(min_force, max_force, steps_ascending)
elif direction == "descending":
df["nominal_force"] = np.linspace(max_force, min_force, steps_descending)
df["initial_force"] = np.zeros(len(df.nominal_force))
df["final_force"] = np.zeros(len(df.nominal_force))
df["mean_volts_per_volt"] = np.zeros(len(df.nominal_force))
df["std_volts_per_volt"] = np.zeros(len(df.nominal_force))
return df
def collect_data(duration):
"""Collects data from the specified channel for the duration."""
print("\nCollecting data for {} seconds".format(duration))
c = daqmx.channels.AnalogInputBridgeChannel()
c.physical_channel = "{}/{}".format(device, phys_chan)
c.name = "volts_per_volt"
c.voltage_exc_value = 10.0
task = daqmx.tasks.Task()
task.add_channel(c)
task.sample_rate = sample_rate
task.setup_append_data()
task.start()
while len(task.data["time"]) < duration*sample_rate:
time.sleep(0.2)
task.stop()
task.clear()
print("Data collection complete")
return task.data
def regress(applied_force, volts_per_volt):
"""Linearly regress applied force versus V/V"""
results = scipy.stats.linregress(volts_per_volt, applied_force)
slope, intercept, r_value, p_value, std_err = results
return {"slope" : slope,
"intercept" : intercept,
"r_value" : r_value,
"p_value" : p_value,
"std_err" : std_err,
"units" : "N/(V/V)"}
def save_raw_data(data_dict, index, direction):
folder = os.path.join("data", "raw", direction, str(index))
path = os.path.join(folder, "data.h5")
if not os.path.isdir(folder):
os.makedirs(folder)
ts.savehdf(path, data_dict.to_dict("list"), mode="w")
print("Saved raw data to", path)
def save_metadata(metadata):
with open("calibration.json", "w") as f:
json.dump(metadata, f, indent=4)
def run_cal(direction):
print("Running calibration", direction)
df = create_dataframe(direction)
for index, force in enumerate(df.nominal_force):
print("\nSet the applied force to {} lbf".format(force))
initial_force = float(input("What is the current applied force? "))
df.initial_force[index] = initial_force
rawdata = collect_data(test_dur)
save_raw_data(rawdata, index, direction)
df.mean_volts_per_volt[index] = np.mean(rawdata["volts_per_volt"])
df.std_volts_per_volt[index] = np.std(rawdata["volts_per_volt"])
print("Mean measured voltage: {} V/V".format(df.mean_volts_per_volt[index]))
final_force = float(input("What is the current applied force? "))
df.final_force[index] = final_force
df["mean_force_lbf"] = (df.initial_force + df.final_force)/2
df["mean_force_newtons"] = df.mean_force_lbf*4.44822162
print("\n{} calibration complete".format(direction.title()))
print("\nResults:\n")
print(df)
csv_folder = os.path.join("data", "processed")
if not os.path.isdir(csv_folder):
os.makedirs(csv_folder)
csv_path = os.path.join(csv_folder, direction + ".csv")
df.to_csv(csv_path, index=False)
regression = regress(df.mean_force_newtons, df.mean_volts_per_volt)
print("\n{} regression:".format(direction.title()))
for k, v in regression.items():
print(k, ":", v)
return df, regression
def main():
print("Calibrating {} drag slide\n".format(side))
metadata = {}
metadata["side"] = side
metadata["9237 physical channel"] = phys_chan
df_asc, reg_asc = run_cal("ascending")
df_desc, reg_desc = run_cal("descending")
metadata["linear regression ascending"] = reg_asc
metadata["linear regression descending"] = reg_desc
df_all = df_asc.append(df_desc)
reg_all = regress(df_all.mean_force_newtons, df_all.mean_volts_per_volt)
metadata["linear regression all"] = reg_all
metadata["timestamp"] = time.asctime()
save_metadata(metadata)
if plot:
plt.style.use("ggplot")
plt.figure()
plt.plot(df_asc.mean_volts_per_volt, df_asc.mean_force_newtons, "ok",
label="Meas. asc.")
plt.plot(df_desc.mean_volts_per_volt, df_desc.mean_force_newtons, "sb",
label="Meas. desc.")
plt.xlabel("V/V")
plt.ylabel("Applied force (N)")
plt.plot(df_all.mean_volts_per_volt, df_all.mean_volts_per_volt*reg_all["slope"] \
+ reg_all["intercept"], label="Lin. reg. all")
plt.legend(loc=2)
plt.grid(True)
plt.show()
if __name__ == "__main__":
main()
|
mit
|
MatthieuBizien/scikit-learn
|
examples/svm/plot_weighted_samples.py
|
95
|
1943
|
"""
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
|
bsd-3-clause
|
mwv/scikit-learn
|
examples/cluster/plot_kmeans_stability_low_dim_dense.py
|
338
|
4324
|
"""
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
|
bsd-3-clause
|
chaluemwut/fbserver
|
venv/lib/python2.7/site-packages/sklearn/utils/fixes.py
|
3
|
10209
|
"""Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import numpy as np
import scipy.sparse as sp
np_version = []
for x in np.__version__.split('.'):
try:
np_version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
np_version.append(x)
np_version = tuple(np_version)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x-y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
|
apache-2.0
|
yask123/scikit-learn
|
benchmarks/bench_glmnet.py
|
297
|
3848
|
"""
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of pylab
import pylab as pl
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
pl.clf()
xx = range(0, n * step, step)
pl.title('Lasso regression on sample dataset (%d features)' % n_features)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of samples to classify')
pl.ylabel('Time (s)')
pl.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
pl.figure('scikit-learn vs. glmnet benchmark results')
pl.title('Regression in high dimensional spaces (%d samples)' % n_samples)
pl.plot(xx, scikit_results, 'b-', label='scikit-learn')
pl.plot(xx, glmnet_results, 'r-', label='glmnet')
pl.legend()
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
leesavide/pythonista-docs
|
Documentation/matplotlib/mpl_examples/pylab_examples/legend_auto.py
|
3
|
2281
|
"""
This file was written to test matplotlib's autolegend placement
algorithm, but shows lots of different ways to create legends so is
useful as a general examples
Thanks to John Gill and Phil ?? for help at the matplotlib sprint at
pycon 2005 where the auto-legend support was written.
"""
from pylab import *
import sys
rcParams['legend.loc'] = 'best'
N = 100
x = arange(N)
def fig_1():
figure(1)
t = arange(0, 40.0 * pi, 0.1)
l, = plot(t, 100*sin(t), 'r', label='sine')
legend(framealpha=0.5)
def fig_2():
figure(2)
plot(x, 'o', label='x=y')
legend()
def fig_3():
figure(3)
plot(x, -x, 'o', label='x= -y')
legend()
def fig_4():
figure(4)
plot(x, ones(len(x)), 'o', label='y=1')
plot(x, -ones(len(x)), 'o', label='y=-1')
legend()
def fig_5():
figure(5)
n, bins, patches = hist(randn(1000), 40, normed=1)
l, = plot(bins, normpdf(bins, 0.0, 1.0), 'r--', label='fit', linewidth=3)
legend([l, patches[0]], ['fit', 'hist'])
def fig_6():
figure(6)
plot(x, 50-x, 'o', label='y=1')
plot(x, x-50, 'o', label='y=-1')
legend()
def fig_7():
figure(7)
xx = x - (N/2.0)
plot(xx, (xx*xx)-1225, 'bo', label='$y=x^2$')
plot(xx, 25*xx, 'go', label='$y=25x$')
plot(xx, -25*xx, 'mo', label='$y=-25x$')
legend()
def fig_8():
figure(8)
b1 = bar(x, x, color='m')
b2 = bar(x, x[::-1], color='g')
legend([b1[0], b2[0]], ['up', 'down'])
def fig_9():
figure(9)
b1 = bar(x, -x)
b2 = bar(x, -x[::-1], color='r')
legend([b1[0], b2[0]], ['down', 'up'])
def fig_10():
figure(10)
b1 = bar(x, x, bottom=-100, color='m')
b2 = bar(x, x[::-1], bottom=-100, color='g')
b3 = bar(x, -x, bottom=100)
b4 = bar(x, -x[::-1], bottom=100, color='r')
legend([b1[0], b2[0], b3[0], b4[0]], ['bottom right', 'bottom left',
'top left', 'top right'])
if __name__ == '__main__':
nfigs = 10
figures = []
for f in sys.argv[1:]:
try:
figures.append(int(f))
except ValueError:
pass
if len(figures) == 0:
figures = range(1, nfigs+1)
for fig in figures:
fn_name = "fig_%d" % fig
fn = globals()[fn_name]
fn()
show()
|
apache-2.0
|
akrherz/iem
|
htdocs/plotting/auto/scripts100/p161.py
|
1
|
5947
|
"""Max Dewpoint"""
import datetime
from collections import OrderedDict
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
MDICT = OrderedDict(
[
("all", "No Month/Time Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
METRICS = {
"avg_sknt": "Avg Wind Speed (kts)",
"max_sknt": "Max Wind Speed (kts)",
"max_gust": "Max Wind Speed Gust (kts)",
"max_tmpf": "Max Air Temp (F)",
"min_tmpf": "Min Air Temp (F)",
"max_dwpf": "Max Dew Point Temp (F)",
"min_dwpf": "Min Dew Point Temp (F)",
"max_feel": "Max Feels Like Temperature (F)",
"avg_feel": "Avg Feels Like Temperature (F)",
"min_feel": "Min Feels Like Temperature (F)",
"max_rh": "Max Relative Humidity (%)",
"avg_rh": "Avg Relative Humidity (%)",
"min_rh": "Min Relative Humidity (%)",
"pday": "Precipitation (inch)",
}
DIRS = OrderedDict([("aoa", "At or Above"), ("below", "Below")])
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This application plots the number of days for a
given month or period of months that a given variable was above or below
some threshold.
"""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="AMW",
network="IA_ASOS",
label="Select Station:",
),
dict(
type="select",
name="var",
default="max_dwpf",
label="Which Variable",
options=METRICS,
),
dict(
type="select",
name="dir",
default="aoa",
label="Threshold Direction:",
options=DIRS,
),
dict(type="int", name="thres", default=65, label="Threshold"),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
dict(
type="year",
min=1928,
default=datetime.date.today().year,
label="Year to Highlight",
name="year",
),
]
return desc
def get_context(fdict):
"""Do the processing work"""
pgconn = get_dbconn("iem")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
month = ctx["month"]
varname = ctx["var"]
mydir = ctx["dir"]
threshold = ctx["thres"]
offset = "day"
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
offset = "day + '1 month'::interval"
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
opp = ">=" if mydir == "aoa" else "<"
ctx["df"] = read_sql(
f"""
SELECT extract(year from {offset})::int as year,
sum(case when {varname}::int {opp} %s then 1 else 0 end) as count
from summary s JOIN stations t on (s.iemid = t.iemid)
WHERE t.id = %s and t.network = %s and extract(month from day) in %s
and {varname} is not null
GROUP by year ORDER by year ASC
""",
pgconn,
params=(threshold, station, ctx["network"], tuple(months)),
index_col="year",
)
ctx["title"] = "(%s) %s %s %.0f" % (
MDICT[ctx["month"]],
METRICS[ctx["var"]],
DIRS[ctx["dir"]],
ctx["thres"],
)
ctx["subtitle"] = "%s [%s]" % (
ctx["_nt"].sts[ctx["zstation"]]["name"],
ctx["zstation"],
)
return ctx
def highcharts(fdict):
"""Highcharts output"""
ctx = get_context(fdict)
ctx["df"].reset_index(inplace=True)
data = ctx["df"][["year", "count"]].to_json(orient="values")
return (
"""
$("#ap_container").highcharts({
chart: {
type: 'column'
},
yAxis: {title: {text: 'Days'}},
title: {text: '"""
+ ctx["title"]
+ """'},
subtitle: {text: '"""
+ ctx["subtitle"]
+ """'},
series: [{
name: 'Days',
data: """
+ data
+ """
}]
});
"""
)
def plotter(fdict):
""" Go """
ctx = get_context(fdict)
df = ctx["df"]
if df.empty:
raise NoDataFound("Error, no results returned!")
(fig, ax) = plt.subplots(1, 1)
ax.set_title("%s\n%s" % (ctx["title"], ctx["subtitle"]))
ax.bar(
df.index.values, df["count"], align="center", fc="green", ec="green"
)
if ctx["year"] in df.index:
ax.bar(
ctx["year"],
df.at[ctx["year"], "count"],
align="center",
fc="red",
ec="red",
zorder=5,
)
ax.grid(True)
ax.set_ylabel("Days Per Period")
ax.set_xlim(df.index.min() - 0.5, df.index.max() + 0.5)
avgv = df["count"].mean()
ax.axhline(avgv)
ax.text(df.index.max() + 1, avgv, "%.1f" % (avgv,))
return fig, df
if __name__ == "__main__":
plotter(dict(network="IA_ASOS", station="AMW"))
|
mit
|
FRESNA/PyPSA
|
pypsa/contingency.py
|
1
|
9855
|
## Copyright 2016-2017 Tom Brown (FIAS)
## This program is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Functionality for contingency analysis, such as branch outages.
"""
# make the code as Python 3 compatible as possible
from __future__ import division, absolute_import
__author__ = "Tom Brown (FIAS)"
__copyright__ = "Copyright 2016-2017 Tom Brown (FIAS), GNU GPL 3"
from scipy.sparse import issparse, csr_matrix, csc_matrix, hstack as shstack, vstack as svstack
from numpy import r_, ones, zeros, newaxis
import logging
logger = logging.getLogger(__name__)
import numpy as np
import pandas as pd
import collections
from .pf import calculate_PTDF, _as_snapshots
from .opt import l_constraint
def calculate_BODF(sub_network, skip_pre=False):
"""
Calculate the Branch Outage Distribution Factor (BODF) for
sub_network.
Sets sub_network.BODF as a (dense) numpy array.
The BODF is a num_branch x num_branch 2d array.
For the outage of branch l, the new flow on branch k is
given in terms of the flow before the outage
f_k^after = f_k^before + BODF_{kl} f_l^before
Note that BODF_{ll} = -1.
Parameters
----------
sub_network : pypsa.SubNetwork
skip_pre : bool, default False
Skip the preliminary step of computing the PTDF.
Examples
--------
>>> sub_network.caculate_BODF()
"""
if not skip_pre:
calculate_PTDF(sub_network)
num_branches = sub_network.PTDF.shape[0]
#build LxL version of PTDF
branch_PTDF = sub_network.PTDF*sub_network.K
denominator = csr_matrix((1/(1-np.diag(branch_PTDF)),(r_[:num_branches],r_[:num_branches])))
sub_network.BODF = branch_PTDF*denominator
#make sure the flow on the branch itself is zero
np.fill_diagonal(sub_network.BODF,-1)
def network_lpf_contingency(network, snapshots=None, branch_outages=None):
"""
Computes linear power flow for a selection of branch outages.
Parameters
----------
snapshots : list-like|single snapshot
A subset or an elements of network.snapshots on which to run
the power flow, defaults to network.snapshots
NB: currently this only works for a single snapshot
branch_outages : list-like
A list of passive branches which are to be tested for outages.
If None, it's take as all network.passive_branches_i()
Returns
-------
p0 : pandas.DataFrame
num_passive_branch x num_branch_outages DataFrame of new power flows
Examples
--------
>>> network.lpf_contingency(snapshot, branch_outages)
"""
if snapshots is None:
snapshots = network.snapshots
if isinstance(snapshots, collections.Iterable):
logger.warning("Apologies LPF contingency, this only works for single snapshots at the moment, taking the first snapshot.")
snapshot = snapshots[0]
else:
snapshot = snapshots
network.lpf(snapshot)
# Store the flows from the base case
passive_branches = network.passive_branches()
if branch_outages is None:
branch_outages = passive_branches.index
p0_base = pd.Series(index=passive_branches.index)
for c in network.passive_branch_components:
pnl = network.pnl(c)
p0_base[c] = pnl.p0.loc[snapshot]
for sn in network.sub_networks.obj:
sn._branches = sn.branches()
sn.calculate_BODF()
p0 = pd.DataFrame(index=passive_branches.index)
p0["base"] = p0_base
for branch in branch_outages:
if type(branch) is not tuple:
logger.warning("No type given for {}, assuming it is a line".format(branch))
branch = ("Line",branch)
sn = network.sub_networks.obj[passive_branches.sub_network[branch]]
branch_i = sn._branches.index.get_loc(branch)
p0_new = p0_base + pd.Series(sn.BODF[:,branch_i]*p0_base[branch],sn._branches.index)
p0[branch] = p0_new
return p0
def network_sclopf(network, snapshots=None, branch_outages=None, solver_name="glpk",
skip_pre=False, extra_functionality=None, solver_options={},
keep_files=False, formulation="angles", ptdf_tolerance=0.):
"""
Computes Security-Constrained Linear Optimal Power Flow (SCLOPF).
This ensures that no branch is overloaded even given the branch outages.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
branch_outages : list-like
A list of passive branches which are to be tested for outages.
If None, it's take as all network.passive_branches_i()
solver_name : string
Must be a solver name that pyomo recognises and that is
installed, e.g. "glpk", "gurobi"
skip_pre : bool, default False
Skip the preliminary steps of computing topology, calculating
dependent values and finding bus controls.
extra_functionality : callable function
This function must take two arguments
`extra_functionality(network,snapshots)` and is called after
the model building is complete, but before it is sent to the
solver. It allows the user to add/change constraints and
add/change the objective function.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
formulation : string
Formulation of the linear power flow equations to use; must be
one of ["angles","cycles","kirchoff","ptdf"]
ptdf_tolerance : float
Returns
-------
None
Examples
--------
>>> network.sclopf(network, branch_outages)
"""
if not skip_pre:
network.determine_network_topology()
snapshots = _as_snapshots(network, snapshots)
passive_branches = network.passive_branches()
if branch_outages is None:
branch_outages = passive_branches.index
#prepare the sub networks by calculating BODF and preparing helper DataFrames
for sn in network.sub_networks.obj:
sn.calculate_BODF()
sn._branches = sn.branches()
sn._branches["_i"] = range(sn._branches.shape[0])
sn._extendable_branches = sn._branches[sn._branches.s_nom_extendable]
sn._fixed_branches = sn._branches[~ sn._branches.s_nom_extendable]
def add_contingency_constraints(network,snapshots):
#a list of tuples with branch_outage and passive branches in same sub_network
branch_outage_keys = []
flow_upper = {}
flow_lower = {}
for branch in branch_outages:
if type(branch) is not tuple:
logger.warning("No type given for {}, assuming it is a line".format(branch))
branch = ("Line",branch)
sub = network.sub_networks.at[passive_branches.at[branch,"sub_network"],"obj"]
branch_i = sub._branches.at[branch,"_i"]
branch_outage_keys.extend([(branch[0],branch[1],b[0],b[1]) for b in sub._branches.index])
flow_upper.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn])],"<=",sub._fixed_branches.at[b,"s_nom"]] for b in sub._fixed_branches.index for sn in snapshots})
flow_upper.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn]),(-1,network.model.passive_branch_s_nom[b[0],b[1]])],"<=",0] for b in sub._extendable_branches.index for sn in snapshots})
flow_lower.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn])],">=",-sub._fixed_branches.at[b,"s_nom"]] for b in sub._fixed_branches.index for sn in snapshots})
flow_lower.update({(branch[0],branch[1],b[0],b[1],sn) : [[(1,network.model.passive_branch_p[b[0],b[1],sn]),(sub.BODF[sub._branches.at[b,"_i"],branch_i],network.model.passive_branch_p[branch[0],branch[1],sn]),(1,network.model.passive_branch_s_nom[b[0],b[1]])],">=",0] for b in sub._extendable_branches.index for sn in snapshots})
l_constraint(network.model,"contingency_flow_upper",flow_upper,branch_outage_keys,snapshots)
l_constraint(network.model,"contingency_flow_lower",flow_lower,branch_outage_keys,snapshots)
if extra_functionality is not None:
extra_functionality(network, snapshots)
#need to skip preparation otherwise it recalculates the sub-networks
network.lopf(snapshots=snapshots, solver_name=solver_name, skip_pre=True,
extra_functionality=add_contingency_constraints,
solver_options=solver_options, keep_files=keep_files,
formulation=formulation, ptdf_tolerance=ptdf_tolerance)
|
gpl-3.0
|
nik7273/computational-medical-knowledge
|
src/analyze.py
|
2
|
1910
|
# -*- coding: utf-8 -*-
"""
python analyze.py --pipeline "method1 method2" --input "file1 file2"
"""
import optparse
import itertools
import os
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
import utils as tech
import numpy as np
from sys import argv
from os.path import basename
parser = optparse.OptionParser()
parser.add_option('--pipeline', action="store", default="jaccard", help="List of steps in pipeline")
parser.add_option('--input', action="store", default=".", help="List of input files")
options, args = parser.parse_args()
command_line_arguments = [command.strip().lower() for command in options.pipeline.split()]
pipeline = [getattr(tech, command) for command in command_line_arguments]
if os.path.isdir(options.input):
input_filenames = [os.path.join(options.input,filename.strip().lower()) for filename in os.listdir(options.input)]
input_filenames = [filename for filename in input_filenames if filename.endswith('.txt')]
else:
input_filenames = [filename.strip().lower() for filename in options.input.split()]
data = {basename(filename):tech.filestream_to_word_list(open(filename,'rb')) for filename in input_filenames}
#Analysis methods are pairwise
#Calculate Jaccard similarity
keys = data.keys()
jaccard_similarity = np.zeros((len(keys),len(keys)))
for j in xrange(jaccard_similarity.shape[1]):
for i in xrange(j):
jaccard_similarity[i,j] = tech.jaccard(data[keys[i]],data[keys[j]])
jaccard_similarity += jaccard_similarity.transpose()
jaccard_similarity[np.diag_indices_from(jaccard_similarity)] = 1
np.savetxt('../data/jaccard_similarity.tsv',jaccard_similarity,fmt='%.04f',header = ' '.join(keys))
fig, ax = plt.subplots()
ax = sns.heatmap(jaccard_similarity, annot=True, fmt='.02f', square = True,
xticklabels = keys, yticklabels=keys)
plt.tight_layout()
plt.savefig('../graphs/jaccard_similarity.png')
|
apache-2.0
|
mjudsp/Tsallis
|
sklearn/linear_model/tests/test_least_angle.py
|
42
|
20925
|
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.model_selection import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
@ignore_warnings
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = assert_warns(DeprecationWarning, estimator.decision_function, X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually guaranteed in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
|
bsd-3-clause
|
agrawalabhishek/NAOS
|
python/plotEnergyAndAngularMomentum.py
|
1
|
2895
|
'''
Copyright (c) 2016 Abhishek Agrawal ([email protected])
Distributed under the MIT License.
See accompanying file LICENSE.md or copy at http://opensource.org/licenses/MIT
'''
# Set up modules and packages
# I/O
import csv
from pprint import pprint
# Numerical
import numpy as np
import pandas as pd
from scipy.interpolate import griddata
import math
# System
import sys
import time
from tqdm import tqdm
# Get plotting packages
import matplotlib
import matplotlib.colors as colors
import matplotlib.axes
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from matplotlib import rcParams
from matplotlib import cm
from matplotlib import gridspec
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import axes3d
from mpl_toolkits.basemap import Basemap
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.tri as tri
print ""
print "---------------------------------------------------------------------------------"
print " NAOS "
print " "
print " Copyright (c) 2016, A. Agrawal ([email protected]) "
print "---------------------------------------------------------------------------------"
print ""
# Start timer.
start_time = time.time( )
## Operations
# Read data in csv file. data returned as a panda series.
data = pd.read_csv( '../data/solutionParticleAroundUniformlyRotatingEllipsoid_orbitalElements.csv' )
t = data[ 'time' ].values
energy = data[ 'energy' ].values
angularMomentum = data[ 'angularMomentum' ].values
# convert time in seconds to earth days
t = t / ( 24.0 * 60.0 * 60.0 )
## Set up the figure
fig = plt.figure( )
ax1 = fig.add_subplot( 211 )
ax2 = fig.add_subplot( 212 )
## plot the energy
ax1.plot( t, energy, color=colors.cnames['purple'] )
ax1.set_xlabel('time [Earth days]')
ax1.set_ylabel('energy')
ax1.ticklabel_format(style='sci', axis='both', scilimits=(0,0), useOffset=False)
ax1.grid( )
## plot angular momentum
ax2.plot( t, angularMomentum, color=colors.cnames['purple'] )
ax2.set_xlabel('time [Earth days]')
ax2.set_ylabel('angular momentum')
ax2.ticklabel_format(style='sci', axis='both', scilimits=(0,0), useOffset=False)
ax2.grid( )
## show plot
plt.tight_layout( )
plt.show( )
# Stop timer
end_time = time.time( )
# Print elapsed time
print "Script time: " + str("{:,g}".format(end_time - start_time)) + "s"
print ""
print "------------------------------------------------------------------"
print " Exited successfully! "
print "------------------------------------------------------------------"
print ""
|
mit
|
samchrisinger/osf.io
|
scripts/analytics/utils.py
|
16
|
2605
|
# -*- coding: utf-8 -*-
import os
import unicodecsv as csv
from bson import ObjectId
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import requests
from website import util
from website import settings as website_settings
def oid_to_datetime(oid):
return ObjectId(oid).generation_time
def mkdirp(path):
try:
os.makedirs(path)
except OSError:
pass
def plot_dates(dates, *args, **kwargs):
if dates is None or len(dates) == 0:
return -1
"""Plot date histogram."""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(
[mdates.date2num(each) for each in dates],
*args, **kwargs
)
fig.autofmt_xdate()
ax.format_xdata = mdates.DateFormatter('%Y-%m-%d')
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
return fig
def make_csv(fp, rows, headers=None):
writer = csv.writer(fp)
if headers:
writer.writerow(headers)
writer.writerows(rows)
def create_object(name, content_type, node, user, stream=None, kind=None, path='/'):
"""Create an object (file/folder) OSF using WaterButler v1 API
:param str name: The name of the requested file
:param str content_type: Content-Type
:param StringIO stream: file-like stream to be uploaded
:param Node node: Project Node
:param User user: User whose cookie will be used
:param str path: Waterbutler V1 path of the requested file
"""
assert(kind != 'file' or stream != None)
node_id = node._id
cookies = {website_settings.COOKIE_NAME: user.get_or_create_cookie()}
# create or update a file
url = util.waterbutler_api_url_for(node_id, 'osfstorage', path)
resp = requests.get(url, cookies=cookies)
data = resp.json()['data']
existing = None
for item in data:
if item['attributes']['name'] == name:
existing = item
if stream:
stream.seek(0)
# create a new file/folder?
if not existing:
url = util.waterbutler_api_url_for(node_id, 'osfstorage', path, kind=kind, name=name)
resp = requests.put(
url,
data=stream,
headers={'Content-Type': content_type},
cookies=cookies,
)
elif kind == 'file':
url = util.waterbutler_api_url_for(node_id, 'osfstorage', existing['attributes']['path'], kind=kind)
resp = requests.put(
url,
data=stream,
headers={'Content-Type': content_type},
cookies=cookies,
)
else:
return existing
return resp.json()['data']
|
apache-2.0
|
cgarling/AperPhot-DS9
|
ds9_phot.py
|
1
|
26810
|
install_path='/opt/local/bin/'
import sys
sys.path.append(install_path)
import numpy as np
#from astropy.io import fits
#from astropy.modeling import models,fitting
import os
from scipy.optimize import curve_fit,minimize
import pyds9
import matplotlib as mpl
mpl.use('tkagg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import Tkinter as tk
import math
from photutils import CircularAperture,CircularAnnulus,aperture_photometry
from photutils.background import Background2D,MMMBackground
#to do:
#1: Add interactive way to change radial profile display, perhaps via menu: need to add Moffat profile, that is default for iraf imexam -r. need to be able to compare.
#3: Once centroid is determined, add interactive way to do aperture photometry, perhaps with photutils: http://photutils.readthedocs.io/en/stable/
#package management:
#in /opt/local/bin, installed matplotlib, numpy, scipy, pyfits, pyds9
#tested with most recent versions of all packages, and works
MAX_REJECT = 0.5
MIN_NPIXELS = 5
GOOD_PIXEL = 0
BAD_PIXEL = 1
KREJ = 2.5
MAX_ITERATIONS = 5
def zscale (image, nsamples=1000, contrast=0.25, bpmask=None, zmask=None):
"""Implement IRAF zscale algorithm
nsamples=1000 and contrast=0.25 are the IRAF display task defaults
bpmask and zmask not implemented yet
image is a 2-d numpy array
returns (z1, z2)
"""
# Sample the image
samples = zsc_sample (image, nsamples, bpmask, zmask)
npix = len(samples)
samples.sort()
zmin = samples[0]
zmax = samples[-1]
# For a zero-indexed array
center_pixel = (npix - 1) / 2
if npix%2 == 1:
median = samples[center_pixel]
else:
median = 0.5 * (samples[center_pixel] + samples[center_pixel + 1])
#
# Fit a line to the sorted array of samples
minpix = max(MIN_NPIXELS, int(npix * MAX_REJECT))
ngrow = max (1, int (npix * 0.01))
ngoodpix, zstart, zslope = zsc_fit_line (samples, npix, KREJ, ngrow,
MAX_ITERATIONS)
if ngoodpix < minpix:
z1 = zmin
z2 = zmax
else:
if contrast > 0: zslope = zslope / contrast
z1 = max (zmin, median - (center_pixel - 1) * zslope)
z2 = min (zmax, median + (npix - center_pixel) * zslope)
return z1, z2
def zsc_sample (image, maxpix, bpmask=None, zmask=None):
# Figure out which pixels to use for the zscale algorithm
# Returns the 1-d array samples
# Don't worry about the bad pixel mask or zmask for the moment
# Sample in a square grid, and return the first maxpix in the sample
nc = image.shape[0]
nl = image.shape[1]
stride = max (1.0, math.sqrt((nc - 1) * (nl - 1) / float(maxpix)))
stride = int (stride)
samples = image[::stride,::stride].flatten()
return samples[:maxpix]
def zsc_fit_line (samples, npix, krej, ngrow, maxiter):
#
# First re-map indices from -1.0 to 1.0
xscale = 2.0 / (npix - 1)
xnorm = np.arange(npix)
xnorm = xnorm * xscale - 1.0
ngoodpix = npix
minpix = max (MIN_NPIXELS, int (npix*MAX_REJECT))
last_ngoodpix = npix + 1
# This is the mask used in k-sigma clipping. 0 is good, 1 is bad
badpix = np.zeros(npix, dtype="int32")
#
# Iterate
for niter in range(maxiter):
if (ngoodpix >= last_ngoodpix) or (ngoodpix < minpix):
break
# Accumulate sums to calculate straight line fit
goodpixels = np.where(badpix == GOOD_PIXEL)
sumx = xnorm[goodpixels].sum()
sumxx = (xnorm[goodpixels]*xnorm[goodpixels]).sum()
sumxy = (xnorm[goodpixels]*samples[goodpixels]).sum()
sumy = samples[goodpixels].sum()
sum = len(goodpixels[0])
delta = sum * sumxx - sumx * sumx
# Slope and intercept
intercept = (sumxx * sumy - sumx * sumxy) / delta
slope = (sum * sumxy - sumx * sumy) / delta
# Subtract fitted line from the data array
fitted = xnorm*slope + intercept
flat = samples - fitted
# Compute the k-sigma rejection threshold
ngoodpix, mean, sigma = zsc_compute_sigma (flat, badpix, npix)
threshold = sigma * krej
# Detect and reject pixels further than k*sigma from the fitted line
lcut = -threshold
hcut = threshold
below = np.where(flat < lcut)
above = np.where(flat > hcut)
badpix[below] = BAD_PIXEL
badpix[above] = BAD_PIXEL
# Convolve with a kernel of length ngrow
kernel = np.ones(ngrow,dtype="int32")
badpix = np.convolve(badpix, kernel, mode='same')
ngoodpix = len(np.where(badpix == GOOD_PIXEL)[0])
niter += 1
# Transform the line coefficients back to the X range [0:npix-1]
zstart = intercept - slope
zslope = slope * xscale
return ngoodpix, zstart, zslope
def zsc_compute_sigma (flat, badpix, npix):
# Compute the rms deviation from the mean of a flattened array.
# Ignore rejected pixels
# Accumulate sum and sum of squares
goodpixels = np.where(badpix == GOOD_PIXEL)
sumz = flat[goodpixels].sum()
sumsq = (flat[goodpixels]*flat[goodpixels]).sum()
ngoodpix = len(goodpixels[0])
if ngoodpix == 0:
mean = None
sigma = None
elif ngoodpix == 1:
mean = sumz
sigma = None
else:
mean = sumz / ngoodpix
temp = sumsq / (ngoodpix - 1) - sumz*sumz / (ngoodpix * (ngoodpix - 1))
if temp < 0:
sigma = 0.0
else:
sigma = math.sqrt (temp)
return ngoodpix, mean, sigma
'''end zscale routines
# The above functions are available at the url
# https://github.com/spacetelescope/stsci.numdisplay/blob/master/lib/stsci/numdisplay/zscale.py
# and are subject to the following license:
#
# Copyright (C) 2005 Association of Universities for Research in Astronomy (AURA)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
'''
###############################################################################
###############################################################################
#try new implementation
def gauss1d(x, amplitude, xo, sigma_x, offset):
return amplitude*np.exp(-((x-xo)**2.)/(2.*sigma_x**2.))+offset
def gauss2d((x, y), amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
xo = float(xo)
yo = float(yo)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
g = offset + amplitude*np.exp( - (a*((x-xo)**2) + 2*b*(x-xo)*(y-yo)
+ c*((y-yo)**2)))
return g.ravel()
def moffat1d(x,amplitude,xo,alpha,beta,offset):
#fwhm is 2*gamma
return (amplitude*(1.+((x-xo)/alpha)**2.)**-beta)+offset
def moffat2d((x,y),amplitude,xo,yo,gamma_x,gamma_y,theta,alpha,offset):
#amplitude,xo,yo,gamma_x,gamma_y,theta,alpha,offset=float(amplitude),float(xo),float(yo),float(gamma_x),float(gamma_y),float(theta),float(alpha),float(offset)
g=amplitude*(1.+(((x-xo)/gamma_x)**2.+((y-yo)/gamma_y)**2.)**-alpha)+offset
return g.ravel()
###############################################################################
###############################################################################
# def radial_profile(data, center):
# y,x = np.indices((data.shape)) # first determine radii of all pixels
# r = np.sqrt((x-center[0])**2+(y-center[1])**2)
# ind = np.argsort(r.flat) # get sorted indices
# sr = r.flat[ind] # sorted radii
# sim = data.flat[ind] # image values sorted by radii
# ri = sr.astype(np.int32) # integer part of radii (bin size = 1)
# # determining distance between changes
# deltar = ri[1:] - ri[:-1] # assume all radii represented
# rind = np.where(deltar)[0] # location of changed radius
# nr = rind[1:] - rind[:-1] # number in radius bin
# csim = np.cumsum(sim, dtype=np.float64) # cumulative sum to figure out sums for each radii bin
# tbin = csim[rind[1:]] - csim[rind[:-1]] # sum for image values in radius bins
# radialprofile = tbin/nr # the answer
# return radialprofile
def radial_profile(data, center):
y,x = np.indices((data.shape)) # first determine radii of all pixels
r = np.sqrt((x-center[0])**2+(y-center[1])**2)
ind = np.argsort(r.flat) # get sorted indices
sr = r.flat[ind] # sorted radii
sim = data.flat[ind] # image values sorted by radii
return sr,sim
def radial_profile_gauss1d_chisquare((x,y,amplitude,sigma_x,sky),image):
# x=op_vals[0]
# y=op_vals[1]
# amplitude=op_vals[2]
# if amplitude<0 or sigma_x<0 or sky<0:return 50
radial=radial_profile(image,(x,y))
# print 0.5*np.sum((radial[1]-gauss1d(radial[0],amplitude,0,sigma_x,sky))**2.)
return 0.5*np.sum((radial[1]-gauss1d(radial[0],amplitude,0,sigma_x,sky))**2.)
###############################################################################
###############################################################################
#def onmove(event):
#if event.inaxes:
#just update the labeltext
#labeltext.set('X: '+str(int(event.ydata)+x-new_image_halfwidth))
#label2text.set('Y: '+str(int(event.xdata)+y-new_image_halfwidth))
#label3text.set('Value: '+str(d.get('data physical '+str(int(event.ydata)+x-new_image_halfwidth)+' '+str(int(event.xdata)+y-new_image_halfwidth)+' 1 1 yes')))
def onpress(event):
if event.key=='p':
filename=d.get('file')
#hdu=fits.open(filename)
hdu=d.get_pyfits()
data=hdu[0].data
x=d.get('crosshair image')
x,y=x.split()
x,y=int(float(x)),int(float(y))
new_image_halfwidth=int(entry1.get())
aperture_radius=int(entry2.get())
inner_sky_radius=int(entry3.get())
outer_sky_radius=int(entry4.get())
new_image=data[y-new_image_halfwidth:y+new_image_halfwidth,x-new_image_halfwidth:x+new_image_halfwidth]
x_grid=np.arange(x-new_image_halfwidth,x+new_image_halfwidth,1)
y_grid=np.arange(y-new_image_halfwidth,y+new_image_halfwidth,1)
x_grid, y_grid = np.meshgrid(x_grid, y_grid)
guess=(np.amax(new_image)-new_image[0,0],x,y,3,3,0,new_image[0,0])
popt,pcov=curve_fit(gauss2d,(x_grid,y_grid),new_image.ravel(),p0=guess)
#print popt
x=int(popt[1])
y=int(popt[2])
labeltext.set('X: '+str(x))
label2text.set('Y: '+str(y))
new_image=data[y-new_image_halfwidth:y+new_image_halfwidth,x-new_image_halfwidth:x+new_image_halfwidth]
# for artist in ax1.get_children():
# if hasattr(artist,'get_label') and artist.get_label()=='centroid':
# artist.remove()
ax1.clear()
ax1.matshow(np.flip(new_image,axis=0),cmap='gray',origin='upper',clim=zscale(new_image),zorder=0)
ax1.scatter([new_image_halfwidth+1],[new_image_halfwidth-1],marker='+',s=120,c='k',zorder=1)#ax1.scatter([popt[1]-x+new_image_halfwidth],[popt[2]-y+new_image_halfwidth],marker='+',s=120,c='k',zorder=1)
aperture_circle=plt.Circle((popt[1]-x+new_image_halfwidth,popt[2]-y+new_image_halfwidth),radius=aperture_radius,linewidth=3,color='hotpink',fill=False,lw=3,zorder=2)
ax1.add_patch(aperture_circle)
inner_sky=plt.Circle((popt[1]-x+new_image_halfwidth,popt[2]-y+new_image_halfwidth),radius=inner_sky_radius,linewidth=3,color='lime',fill=False,zorder=2)
ax1.add_patch(inner_sky)
outer_sky=plt.Circle((popt[1]-x+new_image_halfwidth,popt[2]-y+new_image_halfwidth),radius=outer_sky_radius,linewidth=3,color='red',fill=False,zorder=2)
ax1.add_patch(outer_sky)
canvas.draw()
#update the radial plot
ax2.clear()
radial=radial_profile(new_image,[new_image_halfwidth,new_image_halfwidth])
#perform the aperture photometry
#currently 2% different from IDL atv
aperture=CircularAperture((x,y),r=aperture_radius)
annulus=CircularAnnulus((x,y),r_in=inner_sky_radius,r_out=outer_sky_radius)
phot_table=aperture_photometry(data,[aperture,annulus])
#new background estimation
bkg_mask=np.ma.masked_outside(radial[0].reshape(new_image.shape),inner_sky_radius,outer_sky_radius)
bkg_map=Background2D(new_image,tuple(np.array(new_image.shape)/4),mask=bkg_mask.mask,exclude_mesh_method='all')
bkg_map_med=MMMBackground().calc_background(bkg_map.data)#bkg_map_med=np.median(bkg_map.background)
#print 'Map sky mean '+str(bkg_map_med)
#bkg_mean=phot_table['aperture_sum_1']/annulus.area()
#print 'Aperture sky mean '+str(bkg_mean)
#phot_table['residual_aperture_sum']=phot_table['aperture_sum_0']-bkg_mean*aperture.area()
phot_table['residual_aperture_sum']=phot_table['aperture_sum_0']-bkg_map_med*aperture.area()
#print 'Map sky result: '+str(phot_table['aperture_sum_0']-bkg_map_med*aperture.area())
#print "Aperture Photometry Result: "+str(phot_table['residual_aperture_sum'])
label8text.set('Sky Value: '+str(int(bkg_map_med)))
label9text.set('Aperture Counts: '+str(int(phot_table['residual_aperture_sum'][0])))
label10text.set('Mag: '+str(-2.5*np.log10(int(phot_table['residual_aperture_sum'][0]))+25.)[:5])
ax2.scatter(radial[0],radial[1])
if var10.get()==1:
ax2.plot(np.linspace(0,new_image_halfwidth,num=50),gauss1d(np.linspace(0,new_image_halfwidth,num=50),popt[0],0,np.mean([popt[3],popt[4]]),popt[6]),c='k',lw=2)
ax2.text(0.5,0.93,'Gaussian FWHM: '+str(2.35482*np.mean([popt[3],popt[4]]))[:5],transform=ax2.transAxes,fontsize=int(15*scaling))
if var11.get()==1:
moffat1d_guess=(np.amax(new_image)-bkg_map_med,0,3,1,bkg_map_med)
popt2,pcov2=curve_fit(moffat1d,radial[0],radial[1],p0=moffat1d_guess)
ax2.plot(np.linspace(0,new_image_halfwidth,num=50),moffat1d(np.linspace(0,new_image_halfwidth,num=50),popt2[0],popt2[1],popt2[2],popt2[3],popt2[4]),c='r',lw=2)
ax2.text(0.5,0.85,'Moffat FWHM: '+str(2.0*popt2[2]*np.sqrt(2.0**(1./popt2[3])-1.))[:5],transform=ax2.transAxes,fontsize=int(15*scaling))
ax2.grid(True,color='white',linestyle='-',linewidth=1)
ax2.set_axisbelow(True)
ax2.autoscale(False)
ax2.set_xlim([0,new_image_halfwidth])
ax2.set_xlabel('Radius (Pixels)')
ax2.set_ylim([np.amin(radial[1]),np.amax(radial[1])])
ax2.set_axis_bgcolor('0.85')
ax2.axvline(aperture_radius,linewidth=2,color='hotpink')
ax2.axvline(inner_sky_radius,linewidth=2,color='lime')
ax2.axvline(outer_sky_radius,linewidth=2,color='red')
ax2.axhline(bkg_map_med,linewidth=2,color='yellow')
canvas2.draw()
def handler():
open_opt=open(install_path+'phot_vars.py','r')
linedata=open_opt.readlines()
open_opt.close()
linedata[0]='new_image_halfwidth_default='+entry1.get()+'\n'
linedata[1]='aperture_radius='+entry2.get()+'\n'
linedata[2]='inner_sky_radius='+entry3.get()+'\n'
linedata[3]='outer_sky_radius='+entry4.get()+'\n'
new_str=''
for j in linedata:new_str+=j
open_opt=open(install_path+'phot_vars.py','w')
open_opt.write(new_str)
open_opt.close()
def update():
MyObject = type('MyObject', (object,), {})
obj = MyObject()
obj.key = 'p'
handler()
onpress(obj)
####################################main#######################################
#import the user defined display parameters
from phot_vars import *
## tkinter integration
root=tk.Tk()
#set title
root.title("Aperture Photometry")
scaling=0.75
label4text = tk.StringVar()
label4text.set('Half Width of Star Window')
label4 = tk.Label(root, textvariable=label4text, height=1)
label4.config(font=("Verdana", int(16*scaling)))
label4.grid(row=2,column=0,columnspan=1)
#set the width of the entry box column
#root.columnconfigure(2,width=
entry1=tk.Entry(root)
entry1.insert(0,str(new_image_halfwidth_default))
entry1.grid(row=2,column=2)
new_image_halfwidth=int(entry1.get())
label5text = tk.StringVar()
label5text.set('Aperture Radius (pix)')
label5=tk.Label(root, textvariable=label5text, height=1)
label5.config(font=("Verdana", int(16*scaling)))
label5.grid(row=3,column=0,columnspan=1)
entry2=tk.Entry(root)
entry2.insert(0,str(aperture_radius))
entry2.grid(row=3,column=2)
aperture_radius=int(entry2.get())
entry3=tk.Entry(root)
entry3.insert(0,str(inner_sky_radius))
entry3.grid(row=4,column=2)
inner_sky_radius=int(entry3.get())
label6text = tk.StringVar()
label6text.set('Inner Sky Radius (pix)')
label6=tk.Label(root, textvariable=label6text, height=1)
label6.config(font=("Verdana", int(16*scaling)))
label6.grid(row=4,column=0,columnspan=1)
entry4=tk.Entry(root)
entry4.insert(0,str(outer_sky_radius))
entry4.grid(row=5,column=2)
outer_sky_radius=int(entry4.get())
label7text = tk.StringVar()
label7text.set('Outer Sky Radius (pix)')
label7=tk.Label(root, textvariable=label7text, height=1)
label7.config(font=("Verdana", int(16*scaling)))
label7.grid(row=5,column=0,columnspan=1)
#label10text = tk.StringVar()
#label10text.set('Gaussian PSF')
#label10=tk.Label(root,textvariable=label10text,height=1)
#label10.config(font=("Verdana",12))
#label10.grid(row=2,column=3,columnspan=1)
var10 = tk.IntVar()
var10.set(1)
tk.Checkbutton(root, text="Gaussian PDF", font=("Verdana",int(16*scaling)), variable=var10,command=update).grid(row=2, column=3,columnspan=1)
var11 = tk.IntVar()
var11.set(1)
tk.Checkbutton(root, text="Moffat PDF", font=("Verdana",int(16*scaling)), variable=var11,command=update).grid(row=2, column=4,columnspan=1)
b = tk.Button(root, text="Update Parameters",command=update)#width=15
b.grid(row=6,column=1)
d=pyds9.DS9()
filename=d.get('file')
#hdu=fits.open(filename)
hdu=d.get_pyfits()
data=hdu[0].data
x=d.get('crosshair image')
x,y=x.split()
x,y=int(float(x)),int(float(y))
new_image=data[y-new_image_halfwidth:y+new_image_halfwidth,x-new_image_halfwidth:x+new_image_halfwidth]
x_grid=np.arange(x-new_image_halfwidth,x+new_image_halfwidth,1)
y_grid=np.arange(y-new_image_halfwidth,y+new_image_halfwidth,1)
x_grid, y_grid = np.meshgrid(x_grid, y_grid)
#guess=(np.amax(new_image)-new_image[0,0],x,y,3,3,0,new_image[0,0])
guess=(np.amax(new_image)-new_image[0,0],x_grid[new_image==np.amax(new_image)][0],y_grid[new_image==np.amax(new_image)][0],3,3,0,new_image[0,0])
#def gauss2d((x, y), amplitude, xo, yo, sigma_x, sigma_y, theta, offset):
popt,pcov=curve_fit(gauss2d,(x_grid,y_grid),new_image.ravel(),p0=guess)
#guess=(x,y,np.amax(new_image)-new_image[0,0],3,new_image[0,0])
#def radial_profile_gauss1d_chisquare((x,y,amplitude,sigma_x,sky),image):
#popt,pcov=curve_fit(gauss1d,x_grid,radial_profile(new_image,[new_image_halfwidth,new_image_halfwidth]),p0=guess)
#popt=minimize(radial_profile_gauss1d_chisquare,guess,args=(new_image),options={'maxiter':500},method = 'BFGS',bounds=((0,2*new_image_halfwidth),(0,2*new_image_halfwidth),(0,65534),(0,np.inf),(0,np.inf)))
#print popt
x=int(popt[1])
y=int(popt[2])
new_image=data[y-new_image_halfwidth:y+new_image_halfwidth,x-new_image_halfwidth:x+new_image_halfwidth]
#plot the star in the image, in zscale
fig=Figure(figsize=(int(6*scaling),int(6*scaling)))
ax1=fig.add_axes([0,0,1,1])
ax1.matshow(np.flip(new_image,axis=0),cmap='gray',origin='upper',clim=zscale(new_image),zorder=0)
ax1.autoscale(False)
ax1.scatter([new_image_halfwidth+1],[new_image_halfwidth-1],marker='+',s=120,c='k',zorder=1)#ax1.scatter([popt[1]-x+new_image_halfwidth],[popt[2]-y+new_image_halfwidth],marker='+',s=120,c='k',label='centroid',zorder=1)
aperture_circle=plt.Circle((popt[1]-x+new_image_halfwidth,popt[2]-y+new_image_halfwidth),radius=aperture_radius,linewidth=3,color='hotpink',fill=False,zorder=2)
ax1.add_patch(aperture_circle)
inner_sky=plt.Circle((popt[1]-x+new_image_halfwidth,popt[2]-y+new_image_halfwidth),radius=inner_sky_radius,linewidth=3,color='lime',fill=False,zorder=2)
ax1.add_patch(inner_sky)
outer_sky=plt.Circle((popt[1]-x+new_image_halfwidth,popt[2]-y+new_image_halfwidth),radius=outer_sky_radius,linewidth=3,color='red',fill=False,zorder=2)
ax1.add_patch(outer_sky)
radial=radial_profile(new_image,[new_image_halfwidth,new_image_halfwidth])
#perform the aperture photometry
#currently 2% different from IDL atv
aperture=CircularAperture((x,y),r=aperture_radius)
annulus=CircularAnnulus((x,y),r_in=inner_sky_radius,r_out=outer_sky_radius)
phot_table=aperture_photometry(data,[aperture,annulus])
#new background estimation
bkg_mask=np.ma.masked_outside(radial[0].reshape(new_image.shape),inner_sky_radius,outer_sky_radius)
bkg_map=Background2D(new_image,tuple(np.array(new_image.shape)/4),mask=bkg_mask.mask,exclude_mesh_method='all')
bkg_map_med=MMMBackground().calc_background(bkg_map.data)#bkg_map_med=np.median(bkg_map.background)
#print 'Map sky mean '+str(bkg_map_med)
#bkg_mean=phot_table['aperture_sum_1']/annulus.area()
#print 'Aperture sky mean '+str(bkg_mean)
#phot_table['residual_aperture_sum']=phot_table['aperture_sum_0']-bkg_mean*aperture.area()
phot_table['residual_aperture_sum']=phot_table['aperture_sum_0']-bkg_map_med*aperture.area()
#print 'Map sky result: '+str(phot_table['aperture_sum_0']-bkg_map_med*aperture.area())
#print "Aperture Photometry Result: "+str(phot_table['residual_aperture_sum'])
fig2=Figure(figsize=(int(6*scaling),int(6*scaling)))
fig2.set_facecolor('0.85')
ax2=fig2.add_axes([0.1,0.1,.9,.9])
ax2.grid(True,color='white',linestyle='-',linewidth=1)
ax2.set_axisbelow(True)
ax2.scatter(radial[0],radial[1])
ax2.plot(np.linspace(0,new_image_halfwidth,num=50),gauss1d(np.linspace(0,new_image_halfwidth,num=50),popt[0],0,np.mean([popt[3],popt[4]]),popt[6]),c='k',lw=2)
#guess=(np.amax(new_image)-new_image[0,0],x,y,3,3,0,new_image[0,0])
#popt,pcov=curve_fit(gauss2d,(x_grid,y_grid),new_image.ravel(),p0=guess)
#moffat1d(x,amplitude,xo,gamma,alpha,offset):
#def moffat1d(x,amplitude,xo,alpha,beta,offset):
#fwhm is 2*gamma
#return (amplitude*(1.+((x-xo)/alpha)**2.)**-beta)+offset
moffat1d_guess=(np.amax(new_image)-bkg_map_med,0,3,1,bkg_map_med)
popt2,pcov2=curve_fit(moffat1d,radial[0],radial[1],p0=moffat1d_guess)
#moffat2d_guess=(np.amax(new_image)-bkg_map_med,x,y,3.0,3.0,10.**-8.,1.,bkg_map_med)
#popt3,pcov3=curve_fit(moffat2d,(x_grid,y_grid),new_image.ravel(),p0=moffat2d_guess)
# plt.scatter(radial[0],radial[1])
# plt.plot(radial[0],moffat1d(radial[0],popt2[0],popt2[1],popt2[2],popt2[3],popt2[4]))
# plt.show()
ax2.plot(np.linspace(0,new_image_halfwidth,num=50),moffat1d(np.linspace(0,new_image_halfwidth,num=50),popt2[0],popt2[1],popt2[2],popt2[3],popt2[4]),c='r',lw=2)
ax2.text(0.5,0.85,'Moffat FWHM: '+str(2.0*popt2[2]*np.sqrt(2.0**(1./popt2[3])-1.))[:5],transform=ax2.transAxes,fontsize=15*scaling)
ax2.autoscale(False)
ax2.set_xlim([0,new_image_halfwidth])
ax2.set_xlabel('Radius (Pixels)')
ax2.set_ylim([np.amin(radial[1]),np.amax(radial[1])])
ax2.text(0.5,0.93,'Gaussian FWHM: '+str(2.35482*np.mean([popt[3],popt[4]]))[:5],transform=ax2.transAxes,fontsize=15*scaling)
ax2.set_axis_bgcolor('0.85')
ax2.axvline(aperture_radius,linewidth=2,color='hotpink')
ax2.axvline(inner_sky_radius,linewidth=2,color='lime')
ax2.axvline(outer_sky_radius,linewidth=2,color='red')
ax2.axhline(bkg_map_med,linewidth=2,color='yellow')
canvas=FigureCanvasTkAgg(fig,master=root)
plot_widget=canvas.get_tk_widget()
plot_widget.grid(row=0,column=0,columnspan=3)
canvas2=FigureCanvasTkAgg(fig2,master=root)
plot_widget=canvas2.get_tk_widget()
plot_widget.grid(row=0,column=3,columnspan=3)
#initialize label object
label0text = tk.StringVar()
#set label value
label0text.set('Centroid: ')
label0 = tk.Label(root, textvariable=label0text, height=1)
label0.config(font=("Verdana", int(16*scaling),'bold'))
label0.grid(row=1,column=0) #adds the label to the program's grid
#initialize label object
labeltext = tk.StringVar()
#set label value
labeltext.set('X: '+str(x))
label1 = tk.Label(root, textvariable=labeltext, height=1)
label1.config(font=("Verdana", int(16*scaling)))
label1.grid(row=1,column=1) #adds the label to the program's grid
#initialize label object
label2text = tk.StringVar()
#set label value
label2text.set('Y: '+str(y))
label2 = tk.Label(root, textvariable=label2text, height=1)
label2.config(font=("Verdana", int(16*scaling)))
label2.grid(row=1,column=2) #adds the label to the program's grid
#initialize label object
# label3text = tk.StringVar()
# label3text.set('Value: '+str(d.get('data physical '+str(x)+' '+str(y)+' 1 1 yes')))
# label3 = tk.Label(root, textvariable=label3text, height=1)
# label3.config(font=("Verdana", 12))
# label3.grid(row=1,column=2)
label8text = tk.StringVar()
label8text.set('Sky Value: '+str(int(bkg_map_med)))
label8=tk.Label(root, textvariable=label8text, height=1)
label8.config(font=("Verdana", int(16*scaling)))
label8.grid(row=1,column=3)
label9text = tk.StringVar()
label9text.set('Aperture Counts: '+str(int(phot_table['residual_aperture_sum'][0])))
label9=tk.Label(root, textvariable=label9text, height=1)
label9.config(font=("Verdana", int(16*scaling)))
label9.grid(row=1,column=4)
label10text = tk.StringVar()
label10text.set('Mag: '+str(-2.5*np.log10(int(phot_table['residual_aperture_sum'][0]))+25.)[:5])
label10=tk.Label(root, textvariable=label10text, height=1)
label10.config(font=("Verdana", int(16*scaling)))
label10.grid(row=1,column=5)
#canvas.mpl_connect('motion_notify_event',onmove)
canvas.mpl_connect('key_press_event',onpress)
#root.protocol('WM_DELETE_WINDOW',handler)
root.mainloop()
|
bsd-3-clause
|
jason-neal/companion_simulations
|
Notebooks/Broadcasting_spectrum_one_component_model.py
|
1
|
3967
|
# coding: utf-8
# # Broadcasting on a spectrum - One component model
# In[ ]:
from astropy.io import fits
import numpy as np
import scipy as sp
from scipy.interpolate import interp1d
from scipy.stats import chisquare
from PyAstronomy.pyasl import dopplerShift
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib')
# In[ ]:
def one_comp_model(wav, model1, gammas):
# Make 1 component simulations, broadcasting over gamma values.
# Enable single scalar inputs (turn to 1d np.array)
if not hasattr(gammas, "__len__"):
gammas = np.asarray(gammas)[np.newaxis]
print(len(gammas))
m1 = model1
print(model1.shape)
m1g = np.empty(model1.shape + (len(gammas),)) # am2rvm1g = am2rvm1 with gamma doppler-shift
print(m1g.shape)
for j, gamma in enumerate(gammas):
wav_j = (1 + gamma / 299792.458) * wav
m1g[:, j] = interp1d(wav_j, m1, axis=0, bounds_error=False)(wav)
return interp1d(w, m1g, axis=0) # pass it the wavelength values to return
# In[ ]:
# Load in the data
wav = "/home/jneal/Phd/data/phoenixmodels/WAVE_PHOENIX-ACES-AGSS-COND-2011.fits"
host = "/home/jneal/Phd/data/phoenixmodels/HD30501-lte05200-4.50-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
comp = "/home/jneal/Phd/data/phoenixmodels/HD30501b-lte02500-5.00-0.0.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits"
w = fits.getdata(wav) / 10
h = fits.getdata(host)
c = fits.getdata(comp)
# In[ ]:
mask = (2111 < w) & (w < 2117)
w = w[mask]
h = h[mask]
c = c[mask]
# crude normalization
h = h / np.max(h)
c = c / np.max(c)
# In[ ]:
# Create a simulated spectrum
# Parameters
c_kms = 299792.458 # km/s
# s_alpha = np.array([0.1])
# s_rv = np.array([1.5])
s_gamma = np.array([0.5])
answers = (s_gamma,)
# Compact simulation of one component
# comp = interp1d((1 + s_rv / c_kms) * w, s_alpha * c, bounds_error=False)(w)
Sim_func = interp1d((1 + s_gamma / c_kms) * w, h, bounds_error=False, axis=0)
sim_f_orgw = Sim_func(w)
sim_w = np.linspace(2114, 2115, 1024)
sim_f = Sim_func(sim_w)
# In[ ]:
# Simulate with ocm function
sim_ocm_f = one_comp_model(w, h, s_gamma)(sim_w)
# In[ ]:
plt.close()
plt.plot(w, sim_f_orgw, label="org_w")
plt.plot(sim_w, sim_f, label="sim")
plt.plot(sim_w, np.squeeze(sim_ocm_f), label="ocm sim")
plt.legend()
plt.show()
sim_f.shape
# sim_w, sim_f are the observations to perform chisquared against!
# # Parameters for chi-sqruare map
# In[ ]:
gammas = np.arange(-0.9, 1, 0.015)
print(len(gammas))
# In[ ]:
ocm = one_comp_model(w, h, gammas=gammas)
# In[ ]:
# One component model
ocm_obs = ocm(sim_w) # Interpolate to observed values.
ocm_obs.shape
# # Calcualte Chi-Square
# In[ ]:
chi2 = chisquare(sim_f[:, np.newaxis], ocm_obs).statistic
chi2.shape
# In[ ]:
min_indx = np.unravel_index(chi2.argmin(), chi2.shape)
print(gammas[min_indx[0]])
# In[ ]:
# Compare to ocm generated simulation
chi2_ocm = chisquare(sim_ocm_f, ocm_obs).statistic
min_indx_ocm = np.unravel_index(chi2.argmin(), chi2.shape)
# ocm_chi2_ocm = chisquare(ocm_sim_f[:, np.newaxis], ocm_obs).statistic
# min_indx_ocm = np.unravel_index(chi2.argmin(), chi2.shape)
print("sim results =", gammas[min_indx[0]])
print("ocm results =", gammas[min_indx_ocm[0]]) # observation simulated with the ocm model
print("answer", answers)
# In[ ]:
# Putting resulted min values back into ocm
res = one_comp_model(w, h, gammas[min_indx[0]])
res_sim = res(sim_w)
res_ocm = one_comp_model(w, h, gammas[min_indx_ocm[0]])
res_sim_ocm = res_ocm(sim_w)
# In[ ]:
print(answers)
plt.plot(sim_w, sim_f, "--", label="Obs")
plt.plot(sim_w, np.squeeze(res_sim) + 0.01, label="1 comp")
plt.plot(sim_w, np.squeeze(res_sim_ocm) + 0.02, label="ocm 1 comp")
plt.legend()
plt.show()
# In[ ]:
plt.close()
plt.figure()
# In[ ]:
plt.figure()
plt.plot(gammas, chi2)
plt.xlabel("gammas")
plt.ylabel("Chisquare")
# In[ ]:
plt.figure()
plt.contourf(chi2[:, 1, :])
# In[ ]:
plt.close()
plt.close()
# In[ ]:
|
mit
|
chemreac/chemreac
|
examples/steady_state.py
|
2
|
4970
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from math import log
import argh
import numpy as np
from chemreac import ReactionDiffusion
from chemreac.integrate import run
from chemreac.util.plotting import plot_solver_linear_error
def efield_cb(x, logx=False):
"""
Returns a flat efield (-1)
"""
return -np.ones_like(x)
def y0_flat_cb(x, logx=False, use_log2=False):
xc = x[:-1] + np.diff(x)/2
if logx:
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x, xc = map(expb, (x, xc))
return 17 - 11*(xc-x[0])/(x[-1]-x[0])
def y0_cylindrical_cb(x, logx=False, use_log2=False):
xc = x[:-1] + np.diff(x)/2
if logx:
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x, xc = map(expb, (x, xc))
return 17 - np.log((xc-x[0])/(x[-1]-x[0]))
def y0_spherical_cb(x, logx=False, use_log2=False):
xc = x[:-1] + np.diff(x)/2
if logx:
expb = (lambda arg: 2**arg) if use_log2 else np.exp
x, xc = map(expb, (x, xc))
return 3 + 0.1/((xc-x[0])/(x[-1]-x[0]))
def integrate_rd(D=2e-3, t0=3., tend=7., x0=0.0, xend=1.0, mu=None, N=32,
nt=25, geom='f', logt=False, logy=False, logx=False,
random=False, nstencil=3, lrefl=False, rrefl=False,
num_jacobian=False, method='bdf', plot=False,
atol=1e-6, rtol=1e-6, efield=False, random_seed=42,
verbose=False, use_log2=False):
if random_seed:
np.random.seed(random_seed)
n = 1
mu = float(mu or x0)
tout = np.linspace(t0, tend, nt)
assert geom in 'fcs'
# Setup the grid
logb = (lambda arg: log(arg)/log(2)) if use_log2 else log
_x0 = logb(x0) if logx else x0
_xend = logb(xend) if logx else xend
x = np.linspace(_x0, _xend, N+1)
if random:
x += (np.random.random(N+1)-0.5)*(_xend-_x0)/(N+2)
mob = 0.3
# Initial conditions
y0 = {
'f': y0_flat_cb,
'c': y0_cylindrical_cb,
's': y0_spherical_cb
}[geom](x, logx)
# Setup the system
stoich_active = []
stoich_prod = []
k = []
assert not lrefl
assert not rrefl
rd = ReactionDiffusion(
n, stoich_active, stoich_prod, k, N,
D=[D],
z_chg=[1],
mobility=[mob],
x=x,
geom=geom,
logy=logy,
logt=logt,
logx=logx,
nstencil=nstencil,
lrefl=lrefl,
rrefl=rrefl,
use_log2=use_log2
)
if efield:
if geom != 'f':
raise ValueError("Only analytic sol. for flat drift implemented.")
rd.efield = efield_cb(rd.xcenters, logx)
# Analytic reference values
t = tout.copy().reshape((nt, 1))
Cref = np.repeat(y0[np.newaxis, :, np.newaxis], nt, axis=0)
if efield:
Cref += t.reshape((nt, 1, 1))*mob
# Run the integration
integr = run(rd, y0, tout, atol=atol, rtol=rtol,
with_jacobian=(not num_jacobian), method=method)
Cout, info = integr.Cout, integr.info
if verbose:
print(info)
def lin_err(i=slice(None), j=slice(None)):
return integr.Cout[i, :, j] - Cref[i, :, j]
rmsd = np.sum(lin_err()**2 / N, axis=1)**0.5
ave_rmsd_over_atol = np.average(rmsd, axis=0)/info['atol']
# Plot results
if plot:
import matplotlib.pyplot as plt
def _plot(y, c, ttl=None, apply_exp_on_y=False):
plt.plot(rd.xcenters, rd.expb(y) if apply_exp_on_y else y, c=c)
if N < 100:
plt.vlines(rd.x, 0, np.ones_like(rd.x)*max(y), linewidth=.1,
colors='gray')
plt.xlabel('x / m')
plt.ylabel('C / M')
if ttl:
plt.title(ttl)
for i in range(nt):
c = 1-tout[i]/tend
c = (1.0-c, .5-c/2, .5-c/2) # over time: dark red -> light red
plt.subplot(4, 1, 1)
_plot(Cout[i, :, 0], c, 'Simulation (N={})'.format(rd.N),
apply_exp_on_y=logy)
plt.subplot(4, 1, 2)
_plot(Cref[i, :, 0], c, 'Analytic', apply_exp_on_y=logy)
ax_err = plt.subplot(4, 1, 3)
plot_solver_linear_error(integr, Cref, ax_err, ti=i,
bi=slice(None),
color=c, fill=(i == 0))
plt.title('Linear rel error / Log abs. tol. (={})'.format(
info['atol']))
plt.subplot(4, 1, 4)
tspan = [tout[0], tout[-1]]
plt.plot(tout, rmsd[:, 0] / info['atol'], 'r')
plt.plot(tspan, [ave_rmsd_over_atol[0]]*2, 'r--')
plt.xlabel('Time / s')
plt.ylabel(r'$\sqrt{\langle E^2 \rangle} / atol$')
plt.tight_layout()
plt.show()
return tout, Cout, info, ave_rmsd_over_atol, rd
if __name__ == '__main__':
argh.dispatch_command(integrate_rd, output_file=None)
|
bsd-2-clause
|
michigraber/scikit-learn
|
benchmarks/bench_glm.py
|
297
|
1493
|
"""
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
kamiseko/factor-test
|
pb-cap-mom1-yoygr-vol-dda.py
|
1
|
17038
|
#!/Tsan/bin/python
# -*- coding: utf-8 -*-
# Libraries To Use
from __future__ import division
from CloudQuant import MiniSimulator
import numpy as np
import pandas as pd
import pdb
import cvxopt as cv
from cvxopt import solvers
from datetime import datetime, date, time
import barraRiskModel as brm
import factorFilterFunctions as ff
# define path
path = ff.data_path
filenameHS300 = 'LZ_GPA_INDXQUOTE_CLOSE.csv'
filenameICWeight = 'ICfactorWeight7factorsPB.csv'
filenameOwnVol = 'Own_Factor_Volatility_90d.csv' # 90天收益波动率
filenameDDA20 = 'Own_Factor_DDA-20d.csv' # 股票每日成交额(前复权)
filenameOWNILLIQ ='Own_Factor_ILLQ-1d.csv' # 非流动性因子(自算)
# Variables and Constants
Init_Cap = 50000000
Start_Date = '20120101'
End_Date = '20161231'
ShortMA = 12
LongMA = 15
Period = 60
Position_Max = 200
rowsNum = 21
timeStampNum = 2000
thresholdNum = 0.2
indexPool = ['000300', '000905']
# Use own library to get the last day of each month
benchMarkData = pd.read_csv(path+filenameHS300, infer_datetime_format=True, parse_dates=[0], index_col=0)[-timeStampNum:]
startOfMonth, endOfMonth = ff.getLastDayOfMonth(benchMarkData.index)
DDA20df = pd.read_csv(path+filenameDDA20, infer_datetime_format=True, parse_dates=[0], index_col=0)[-timeStampNum:]
Volatilitydf = pd.read_csv(path+filenameOwnVol, infer_datetime_format=True, parse_dates=[0], index_col=0)[-timeStampNum:]
# factor weight
OwnILLQdf = pd.read_csv(path+filenameOWNILLIQ, infer_datetime_format=True, parse_dates=[0], index_col=0)[-timeStampNum:]
factorWeight = pd.read_csv(path+filenameICWeight, infer_datetime_format=True, parse_dates=[0], index_col=0)
def getNewMatrix(inputArray, t, m):
newMatrix = []
n = t-m+1
for i in range(n):
newdata = list(inputArray[i:m+i])
newMatrix.append(newdata)
#newMatrix = np.array(newMatrix).reshape(n,m)
return np.array(newMatrix)
def recreateArray(newMatrix,t,m):
ret = []
n = t - m + 1
for p in range(1, t+1):
if p < m:
alpha = p
elif p > t-m+1:
alpha = t-p+1
else:
alpha = m
sigma = 0
for j in range(1, m+1):
i = p - j + 1
if i > 0 and i < n+1:
sigma += newMatrix[i-1][j-1]
ret.append(sigma/alpha)
return np.array(ret)
def getSVD(inputArray,t,m):
inputmatrix = getNewMatrix(inputArray, t, m)
u, s, v = np.linalg.svd(inputmatrix)
eviNum = 1 if s[0]/s.sum() > 0.99 else 2
sNew = np.zeros((eviNum, eviNum))
np.fill_diagonal(sNew, s[:eviNum])
matrixForts = np.dot(np.dot(u[:, :eviNum].reshape(u.shape[0], eviNum), sNew), v[:eviNum])
newts = recreateArray(matrixForts, t, m)
return newts
def initial(sdk):
sdk.prepareData(['LZ_GPA_INDEX_CSI500WEIGHT', 'LZ_GPA_VAL_PB', 'LZ_GPA_FIN_IND_OCFTODEBT', 'LZ_GPA_FIN_IND_QFA_YOYGR',
'LZ_GPA_DERI_Momentum_1M',
'LZ_GPA_CMFTR_CUM_FACTOR', 'LZ_GPA_QUOTE_TCLOSE', 'LZ_GPA_INDXQUOTE_CLOSE'])
dateList = map(lambda x: x.date().strftime("%Y%m%d"), endOfMonth) # change time stamp to string
sdk.setGlobal('dateList', dateList)
sdk.setGlobal('sellSignal', [True])
DDA20df.columns = sdk.getStockList()
Volatilitydf.columns = sdk.getStockList()
OwnILLQdf.columns = sdk.getStockList()
print DDA20df.head().iloc[:, :5]
print len(sdk.getStockList())
def initPerDay(sdk):
today = sdk.getNowDate()
dateList = sdk.getGlobal('dateList')
if today in dateList: # judge whether today is the last day of the month
today = datetime.strptime(today, '%Y%m%d')
stockPool = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_INDEX_CSI500WEIGHT')[-20:]), columns=sdk.getStockList())
stockPool = stockPool.iloc[-1].dropna(how='any').index.tolist() # get today's ZX500 stock pool
# PBData
PBDF = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_VAL_PB')[-20:]), columns=sdk.getStockList())
PBDF = PBDF[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
PBSlice = (PBDF - PBDF.mean())/PBDF.std() # normalize
# OCFTODEBT Data
OCFTODEBT = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_FIN_IND_OCFTODEBT')[-20:]), columns=sdk.getStockList())
OCFTODEBT = OCFTODEBT[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
OCFTODEBTSlice = (OCFTODEBT - OCFTODEBT.mean())/OCFTODEBT.std()
# MOM1MData
MOM1MDF = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_DERI_Momentum_1M')[-20:]), columns=sdk.getStockList())
MOM1MDF = MOM1MDF[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
MOM1MSlice = (MOM1MDF - MOM1MDF.mean())/MOM1MDF.std()
# YOYGRData
YOYGRDF = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_FIN_IND_QFA_YOYGR')[-20:]), columns=sdk.getStockList())
YOYGRDF = YOYGRDF[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
YOYGRSlice = (YOYGRDF-YOYGRDF.mean())/YOYGRDF.std()
# ILLIQData
#ILLIQDF = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_DERI_ILLIQ')[-20:]), columns=sdk.getStockList())
DDA20 = DDA20df.loc[:today]
DDA20 = DDA20.iloc[-20:]
DDA20 = DDA20[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
DDA20Slice = (DDA20 - DDA20.mean())/DDA20.std()
# Volatility
VOL = Volatilitydf.loc[:today]
VOL = VOL.iloc[-20:]
VOL = VOL[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
VOLSlice = (VOL -VOL.mean())/VOL.std()
# OwnILLQData
OwnILLQ = OwnILLQdf.loc[:today]
OwnILLQ = OwnILLQ.iloc[-20:]
OwnILLQ = OwnILLQ[stockPool].fillna(method='ffill').fillna(method='bfill').iloc[-1]
OwnILLQSlice = (OwnILLQ - OwnILLQ.mean()) / OwnILLQ.std()
# Select the corresponding factor weight
WeightSlice = factorWeight.loc[today]
finalIndictor = WeightSlice['PB'] * PBSlice + WeightSlice['OCFTODEBT'] * OCFTODEBTSlice + \
WeightSlice['MOM_1M'] * MOM1MSlice + WeightSlice['YOYGR'] * YOYGRSlice + \
WeightSlice['VOLATILITY'] * VOLSlice + WeightSlice['DDA20'] * DDA20Slice \
+ WeightSlice['OWNILLIQ'] * OwnILLQSlice
stocksTobuy = finalIndictor.sort_values(ascending=False).index.tolist()[50:50+Position_Max]
sdk.setGlobal('buyList', stocksTobuy)
# To calculate the realized volatility matrix
CPData = np.array(sdk.getFieldData('LZ_GPA_QUOTE_TCLOSE')[-121:])
ClosePriceDF = pd.DataFrame(CPData, columns=sdk.getStockList())[stocksTobuy]
ADfactor = np.array(sdk.getFieldData('LZ_GPA_CMFTR_CUM_FACTOR')[-121:])
ADfactorDF = pd.DataFrame(ADfactor, columns=sdk.getStockList())[stocksTobuy]
AdForward = ADfactorDF / ADfactorDF.max()
adjustedPrice = (AdForward * ClosePriceDF)
adjustedPrice = adjustedPrice.loc[:, adjustedPrice.isnull().sum() < (len(adjustedPrice) * thresholdNum)]
adjustedPrice = adjustedPrice.fillna(method='ffill').fillna(method='bfill')
print adjustedPrice.shape[1]
#covMatrix = adjustedPrice.ewm(ignore_na=True, min_periods=0, com=10).cov(pairwise = True)[-60:].iloc[-1]
adjustedPrice = np.log(adjustedPrice/adjustedPrice.shift(1))[-120:] # calculate daily log return of each stock
#covMatrix = brm.calEWMcovariance(adjustedPrice)
covMatrix = adjustedPrice.cov(1)
print 'Nan Num:', covMatrix.isnull().sum().sum()
#covMatrix.values
# Quadratic Optimizaiton
stkNum = covMatrix.shape[1]
P = cv.matrix(covMatrix.values)
q = cv.matrix(0.0, (stkNum, 1))
G = cv.matrix(np.concatenate((np.diag(np.ones(stkNum)), - np.diag(np.ones(stkNum)))))
h = cv.matrix(np.append(0.01 * np.ones(stkNum), np.zeros(stkNum)))
A = cv.matrix(np.ones(stkNum)).T
b = cv.matrix(1.0).T
sol = solvers.qp(P, q, G, h, A, b)
print sol['x'] # this shows the desired x weight
# Add percentage of total Asset to reduce max drawdowm
stocksWeightTarget = pd.Series(data=sol['x'] * sdk.getAccountAsset().previousAsset, index=covMatrix.columns)
quotes = sdk.getQuotes(covMatrix.columns)
noNoneData = list(set(covMatrix.columns) & set(quotes.keys()))
stocksWeightTarget = stocksWeightTarget.loc[noNoneData]
for stk in stocksWeightTarget.index:
stocksWeightTarget.loc[stk] = stocksWeightTarget.loc[stk] / quotes[stk].open
positionIndicator = sdk.getPositions()
# stockPrices = pd.Series(data = np.array([i.open for i in positionIndicator]), index = covMatrix.columns)
# stocksWeightTarget = stocksWeightTarget / stockPrices
stocksWeightCurrently = pd.Series(data=[i.optPosition for i in positionIndicator], index=[i.code for i in positionIndicator])
stocksTarget = stocksWeightTarget.index.tolist()
stocksCurrent = stocksWeightCurrently.index.tolist()
finalWeight = pd.Series(index=list(set(stocksTarget) | set(stocksCurrent)), data=None)
intersection = list(set(stocksCurrent) & set(stocksTarget))
differenceSell = list(set(stocksCurrent) - set(stocksTarget))
differenceBuy = list(set(stocksTarget) - set(stocksCurrent))
finalWeight.loc[intersection] = stocksWeightTarget.loc[intersection] - stocksWeightCurrently.loc[intersection]
finalWeight.loc[differenceSell] = - stocksWeightCurrently.loc[differenceSell]
finalWeight.loc[differenceBuy] = stocksWeightTarget.loc[differenceBuy]
sdk.setGlobal('finalWeight', finalWeight)
#adjustedPrice.dropna(axis=1, how='any', inplace=True)
# quote all stocks
positionStocks = [i.code for i in sdk.getPositions()] ## Stocks we currently hold
totalPool = np.array(list(set(stocksTobuy) | set(positionStocks)))
# print (stockPool)
# sdk.subscribeQuote(stockPool) ##subscribe
sdk.setGlobal('POOL', totalPool)
#print stockPool
#print today
# ret = []
# for stock in sdk.getGlobal("POOL"):
# late = [i.close for i in sdk.getLatest(code=stock, count=period)]
# mean = np.nanmean(late)
# ret.append(mean)
# return np.array(ret)
## Trading function
def buyStocks(sdk, finalWeight, quotes): # stockToBuy is a pd.Series
quoteStocks = quotes.keys()
stockToBuy = list(set(finalWeight[finalWeight > 0].index.tolist()) & set(quoteStocks))
asset = sdk.getAccountInfo()
codeList = [i.code for i in sdk.getPositions()]
# print sdk.getPositions()
if stockToBuy and asset:
if len(codeList) < Position_Max:
if len(codeList) + len(stockToBuy) > Position_Max:
frozenStk = frozenset(stockToBuy)
spareList = sdk.getGlobal('buyList')
stockToBuy = [x for x in spareList if x in frozenStk][:Position_Max - len(codeList)]
orders = []
for stock in stockToBuy:
#assert np.any(quotes) # judge whether quotes is None
buyPrice = quotes[stock].open
buyAmount = finalWeight.loc[stock] # Can also buy half of the amount
if buyPrice > 0 and buyAmount > 100:
orders.append([stock, buyPrice, int(np.floor(buyAmount / 100)) * 100, 'BUY'])
sdk.makeOrders(orders)
sdk.sdklog(orders, 'buy') # Add this trade into log
def sellStocks(sdk, finalWeight, quotes):
quoteStocks = quotes.keys()
stockToSell = list(set(finalWeight[finalWeight < 0].index.tolist()) & set(quoteStocks))
if stockToSell:
orders = []
positions = sdk.getPositions() # check the amount of stock we cuurently hold
for pos in positions:
if pos.code in stockToSell:
sellPrice = quotes[pos.code].open
sellAmount = pos.optPosition
if sellPrice > 0 and sellAmount > 100:
orders.append([pos.code, sellPrice, sellAmount, 'SELL'])
sdk.makeOrders(orders)
sdk.sdklog(orders, 'sell') ## Add this trade into log
# def cancelAllOrdersInq(sdk):
# ods = sdk.getQueueOrders()
def sellAllStocks(sdk, stockToSell, quotes):
quoteStocks = quotes.keys()
stockToSell = list(set(stockToSell) & set(quoteStocks))
if stockToSell:
orders = []
positions = sdk.getPositions() # check the amount of stock we cuurently hold
for pos in positions:
if pos.code in stockToSell:
sellPrice = quotes[pos.code].open
sellAmount = pos.optPosition
if sellPrice > 0 and sellAmount > 100:
orders.append([pos.code, sellPrice, sellAmount, 'SELL'])
sdk.makeOrders(orders)
sdk.sdklog(orders, 'sell') ## Add this trade into log
'''def cutLose(sdk, thresholdNum, period): # cut lose function
codeList = [i.code for i in sdk.getPositions()]
quotes = sdk.getQuotes(codeList)
closePriceDF = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_QUOTE_TCLOSE')[-period:]),
columns=sdk.getStockList())[quotes.keys()]
ADfactor = np.array(sdk.getFieldData('LZ_GPA_CMFTR_CUM_FACTOR')[-period:])
ADfactorDF = pd.DataFrame(ADfactor, columns=sdk.getStockList())[quotes.keys()]
AdForward = ADfactorDF / ADfactorDF.max()
closePriceDF = (AdForward * closePriceDF)
maxPrice = closePriceDF.iloc[:-1].max()
return closePriceDF.loc[:, (closePriceDF.iloc[-1] < (1-thresholdNum) * maxPrice).values].columns.tolist()'''
# Main section
def strategy(sdk):
'''market timing by svd, check whether HS300 index and ZZ500 index is surging up or plunged'''
sellSignalList = sdk.getGlobal('sellSignal')
indexDF = pd.DataFrame(np.array(sdk.getFieldData('LZ_GPA_INDXQUOTE_CLOSE')[-22:]), columns=sdk.getIndexList())
# print sdk.getIndexList()
indexDF = indexDF[indexPool]
indexDF = indexDF.fillna(method='ffill').fillna(method='bfill')[-LongMA:]
# if (indexDF.loc[:, indexDF.iloc[-1] == indexDF.iloc[-5:].min()].shape[1] == 2) and \
# (indexDF.loc[:, indexDF.iloc[-1] < indexDF.iloc[-12:].mean()].shape[1] == 2):
SH300SVD = getSVD(indexDF[indexPool[0]].values, len(indexDF), ShortMA)
ZZ500SVD = getSVD(indexDF[indexPool[1]].values, len(indexDF), ShortMA)
# index marketing
if SH300SVD[-2] < indexDF[indexPool[0]].iloc[-2] and SH300SVD[-1] > indexDF[indexPool[0]].iloc[-1] and \
ZZ500SVD[-2] < indexDF[indexPool[1]].iloc[-2] and ZZ500SVD[-1] > indexDF[indexPool[1]].iloc[-1]:
sellAllFlag = True
else:
sellAllFlag = False
sellSignalList.append(sellAllFlag)
sdk.setGlobal('sellSignal', sellSignalList) # add sell signal to list
if sellSignalList[-2] and sellSignalList[-1]:
HoldingList = [i.code for i in sdk.getPositions()]
quotes = sdk.getQuotes(HoldingList)
stockToSell = HoldingList
if stockToSell:
sellAllStocks(sdk, stockToSell, quotes)
else: # if sell-all function not triggered, normally trade
today = sdk.getNowDate()
dateList = sdk.getGlobal('dateList')
#cutLoseList = cutLose(sdk, 0.13 , 5) # cut lose
#quotes = sdk.getQuotes(cutLoseList)
#if cutLoseList:
# print cutLoseList
# sellAllStocks(sdk, cutLoseList, quotes)
if today in dateList: # if today is the last day of the month
sdk.sdklog(sdk.getNowDate(), 'now')
tempoPool = sdk.getGlobal('POOL')
quotes = sdk.getQuotes(tempoPool)
finalWeight = sdk.getGlobal('finalWeight')
if len(finalWeight[finalWeight < 0]) > 0: # sell stocks with negative weights
sellStocks(sdk, finalWeight, quotes)
#sdk.setGlobal('StkCurrentlyHolding', [i.code for i in sdk.getPositions()])
if len(finalWeight[finalWeight > 0]) > 0:
buyStocks(sdk, finalWeight, quotes)
def main():
config = {'username': 'toriphy', 'password': '1991toriphy', 'rootpath': 'D:/cStrategy/',
'initCapitalStock': Init_Cap,
'startDate': Start_Date, 'endDate': End_Date, 'cycle': 1, 'executeMode': 'D', 'feeRate': 0.0015,
'feeLimit': 5, 'strategyName': '50+7fa-Nocutloss-MA12-15-cov120', 'logfile': 'ma.log', 'dealByVolume': True,
'memorySize': 5,
'initial': initial, 'preparePerDay': initPerDay, 'strategy': strategy}
MiniSimulator(**config).run()
if __name__ == "__main__":
main()
|
mit
|
justacec/bokeh
|
bokeh/charts/attributes.py
|
6
|
14622
|
from __future__ import absolute_import
from copy import copy
from itertools import cycle
import pandas as pd
from bokeh.charts import DEFAULT_PALETTE
from bokeh.charts.properties import ColumnLabel
from bokeh.charts.utils import marker_types
from bokeh.charts.data_source import ChartDataSource
from bokeh.charts.stats import Bins
from bokeh.core.enums import DashPattern
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import (HasProps, String, List, Instance, Either, Any, Dict,
Bool, Override)
class AttrSpec(HasProps):
"""A container for assigning attributes to values and retrieving them as needed.
A special function this provides is automatically handling cases where the provided
iterator is too short compared to the distinct values provided.
Once created as attr_spec, you can do attr_spec[data_label], where data_label must
be a one dimensional tuple of values, representing the unique group in the data.
See the :meth:`AttrSpec.setup` method for the primary way to provide an existing
AttrSpec with data and column values and update all derived property values.
"""
data = Instance(ColumnDataSource)
iterable = List(Any, default=None)
attrname = String(help='Name of the attribute the spec provides.')
columns = Either(ColumnLabel, List(ColumnLabel), help="""
The label or list of column labels that correspond to the columns that will be
used to find all distinct values (single column) or combination of values (
multiple columns) to then assign a unique attribute to. If not enough unique
attribute values are found, then the attribute values will be cycled.
""")
default = Any(default=None, help="""
The default value for the attribute, which is used if no column is assigned to
the attribute for plotting. If the default value is not provided, the first
value in the `iterable` property is used.
""")
attr_map = Dict(Any, Any, help="""
Created by the attribute specification when `iterable` and `data` are
available. The `attr_map` will include a mapping between the distinct value(s)
found in `columns` and the attribute value that has been assigned.
""")
items = Any(default=None, help="""
The attribute specification calculates this list of distinct values that are
found in `columns` of `data`.
""")
sort = Bool(default=True, help="""
A boolean flag to tell the attribute specification to sort `items`, when it is
calculated. This affects which value of `iterable` is assigned to each distinct
value in `items`.
""")
ascending = Bool(default=True, help="""
A boolean flag to tell the attribute specification how to sort `items` if the
`sort` property is set to `True`. The default setting for `ascending` is `True`.
""")
bins = Instance(Bins, help="""
If an attribute spec is binning data, so that we can map one value in the
`iterable` to one value in `items`, then this attribute will contain an instance
of the Bins stat. This is used to create unique labels for each bin, which is
then used for `items` instead of the actual unique values in `columns`.
""")
def __init__(self, columns=None, df=None, iterable=None, default=None,
items=None, **properties):
"""Create a lazy evaluated attribute specification.
Args:
columns: a list of column labels
df(:class:`~pandas.DataFrame`): the data source for the attribute spec.
iterable: an iterable of distinct attribute values
default: a value to use as the default attribute when no columns are passed
items: the distinct values in columns. If items is provided as input,
then the values provided are used instead of being calculated. This can
be used to force a specific order for assignment.
**properties: other properties to pass to parent :class:`HasProps`
"""
properties['columns'] = self._ensure_list(columns)
if df is not None:
properties['data'] = ColumnDataSource(df)
if default is None and iterable is not None:
default_iter = copy(iterable)
properties['default'] = next(iter(default_iter))
elif default is not None:
properties['default'] = default
if iterable is not None:
properties['iterable'] = iterable
if items is not None:
properties['items'] = items
super(AttrSpec, self).__init__(**properties)
if self.default is None and self.iterable is not None:
self.default = next(iter(copy(self.iterable)))
if self.data is not None and self.columns is not None:
if df is None:
df = self.data.to_df()
self._generate_items(df, columns=self.columns)
if self.items is not None and self.iterable is not None:
self.attr_map = self._create_attr_map()
@staticmethod
def _ensure_list(attr):
"""Always returns a list with the provided value. Returns the value if a list."""
if isinstance(attr, str):
return [attr]
elif isinstance(attr, tuple):
return list(attr)
else:
return attr
@staticmethod
def _ensure_tuple(attr):
"""Return tuple with the provided value. Returns the value if a tuple."""
if not isinstance(attr, tuple):
return (attr,)
else:
return attr
def _setup_default(self):
"""Stores the first value of iterable into `default` property."""
self.default = next(self._setup_iterable())
def _setup_iterable(self):
"""Default behavior is to copy and cycle the provided iterable."""
return cycle(copy(self.iterable))
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if self.sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
df = df.sort_values(by=columns, ascending=self.ascending)
except AttributeError:
df = df.sort(columns=columns, ascending=self.ascending)
items = df[columns].drop_duplicates()
self.items = [tuple(x) for x in items.to_records(index=False)]
def _create_attr_map(self, df=None, columns=None):
"""Creates map between unique values and available attributes."""
if df is not None and columns is not None:
self._generate_items(df, columns)
iterable = self._setup_iterable()
return {item: next(iterable) for item in self._item_tuples()}
def _item_tuples(self):
return [self._ensure_tuple(item) for item in self.items]
def set_columns(self, columns):
"""Set columns property and update derived properties as needed."""
columns = self._ensure_list(columns)
if all([col in self.data.column_names for col in columns]):
self.columns = columns
else:
# we have input values other than columns
# assume this is now the iterable at this point
self.iterable = columns
self._setup_default()
def setup(self, data=None, columns=None):
"""Set the data and update derived properties as needed."""
if data is not None:
self.data = data
if columns is not None and self.data is not None:
self.set_columns(columns)
if self.columns is not None and self.data is not None:
self.attr_map = self._create_attr_map(self.data.to_df(), self.columns)
def update_data(self, data):
self.setup(data=data, columns=self.columns)
def __getitem__(self, item):
"""Lookup the attribute to use for the given unique group label."""
if not self.attr_map:
return self.default
elif self._ensure_tuple(item) not in self.attr_map.keys():
# make sure we have attr map
self.setup()
return self.attr_map[self._ensure_tuple(item)]
@property
def series(self):
if not self.attr_map:
return pd.Series()
else:
index = pd.MultiIndex.from_tuples(self._item_tuples(), names=self.columns)
return pd.Series(list(self.attr_map.values()), index=index)
class ColorAttr(AttrSpec):
"""An attribute specification for mapping unique data values to colors.
.. note::
Should be expanded to support more complex coloring options.
"""
attrname = Override(default='color')
iterable = Override(default=DEFAULT_PALETTE)
bin = Bool(default=False)
def __init__(self, **kwargs):
iterable = kwargs.pop('palette', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(ColorAttr, self).__init__(**kwargs)
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(source=ColumnDataSource(df), column=columns[0],
bins=len(self.iterable), aggregate=False)
if self.sort:
self.bins.sort(ascending=self.ascending)
self.items = [bin.label[0] for bin in self.bins]
else:
raise ValueError('Binned colors can only be created for one column of \
numerical data.')
def add_bin_labels(self, data):
col = self.columns[0]
# save original values into new column
data._data[col + '_values'] = data._data[col]
for bin in self.bins:
# set all rows associated to each bin to the bin label being mapped to colors
data._data.ix[data._data[col + '_values'].isin(bin.values),
col] = bin.label[0]
data._data[col] = pd.Categorical(data._data[col], categories=list(self.items),
ordered=self.sort)
class MarkerAttr(AttrSpec):
"""An attribute specification for mapping unique data values to markers."""
attrname = Override(default='marker')
iterable = Override(default=list(marker_types.keys()))
def __init__(self, **kwargs):
iterable = kwargs.pop('markers', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(MarkerAttr, self).__init__(**kwargs)
dashes = DashPattern._values
class DashAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='dash')
iterable = Override(default=dashes)
def __init__(self, **kwargs):
iterable = kwargs.pop('dash', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(DashAttr, self).__init__(**kwargs)
class IdAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='id')
def _setup_iterable(self):
return iter(range(0, len(self.items)))
class CatAttr(AttrSpec):
"""An attribute specification for mapping unique data values to labels.
.. note::
this is a special attribute specification, which is used for defining which
labels are used for one aspect of a chart (grouping) vs another (stacking or
legend)
"""
attrname = Override(default='nest')
def __init__(self, **kwargs):
super(CatAttr, self).__init__(**kwargs)
def _setup_iterable(self):
return iter(self.items)
def get_levels(self, columns):
"""Provides a list of levels the attribute represents."""
if self.columns is not None:
levels = [columns.index(col) for col in self.columns]
return levels
else:
return []
""" Attribute Spec Functions
Convenient functions for producing attribute specifications. These would be
the interface used by end users when providing attribute specs as inputs
to the Chart.
"""
def color(columns=None, palette=None, bin=False, **kwargs):
"""Produces a ColorAttr specification for coloring groups of data based on columns.
Args:
columns (str or list(str), optional): a column or list of columns for coloring
palette (list(str), optional): a list of colors to use for assigning to unique
values in `columns`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `ColorAttr` object
"""
if palette is not None:
kwargs['palette'] = palette
kwargs['columns'] = columns
kwargs['bin'] = bin
return ColorAttr(**kwargs)
def marker(columns=None, markers=None, **kwargs):
""" Specifies detailed configuration for a marker attribute.
Args:
columns (list or str):
markers (list(str) or str): a custom list of markers. Must exist within
:data:`marker_types`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `MarkerAttr` object
"""
if markers is not None:
kwargs['markers'] = markers
kwargs['columns'] = columns
return MarkerAttr(**kwargs)
def cat(columns=None, cats=None, sort=True, ascending=True, **kwargs):
""" Specifies detailed configuration for a chart attribute that uses categoricals.
Args:
columns (list or str): the columns used to generate the categorical variable
cats (list, optional): overrides the values derived from columns
sort (bool, optional): whether to sort the categorical values (default=True)
ascending (bool, optional): whether to sort the categorical values (default=True)
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `CatAttr` object
"""
if cats is not None:
kwargs['cats'] = cats
kwargs['columns'] = columns
kwargs['sort'] = sort
kwargs['ascending'] = ascending
return CatAttr(**kwargs)
|
bsd-3-clause
|
yonglehou/scikit-learn
|
sklearn/ensemble/tests/test_gradient_boosting.py
|
127
|
37672
|
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import warnings
import numpy as np
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset.
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
# Check input parameter validation.
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII Example 12.7.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
# Check consistency on dataset boston house prices with least squares
# and least absolute deviation.
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate(
(None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
# Check consistency on dataset iris.
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
# Test input checks (shape and type of X and y).
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
# X has wrong shape
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
# test if max_features is valid.
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=-0.1)
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1,
max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, 1)
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_staged_functions_defensive():
# test that staged_functions make defensive copies
rng = np.random.RandomState(0)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(np.int) + 1 # don't predict zeros
for estimator in [GradientBoostingRegressor(),
GradientBoostingClassifier()]:
estimator.fit(X, y)
for func in ['predict', 'decision_function', 'predict_proba']:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert_true(np.all(staged_result[0] != 0))
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
# Check if quantile loss with alpha=0.5 equals lad.
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
# Test if warm start with zero n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k + 1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
# Test if ZeroEstimator works for regression.
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
# Test if ZeroEstimator works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test preceedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
# Predict probabilities.
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_almost_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('huber', 'ls', 'lad', 'quantile'):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_greater(gb.predict([[1, 0]])[0], 0.5)
def test_non_uniform_weights_toy_min_weight_leaf():
# Regression test for issue #4447
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
gb = GradientBoostingRegressor(n_estimators=5, min_weight_fraction_leaf=0.1)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
assert_almost_equal(gb.estimators_[0, 0].splitter.min_weight_leaf, 0.2)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
|
bsd-3-clause
|
lewisc/spark-tk
|
regression-tests/sparktkregtests/testcases/dicom/dicom_export_dcm_test.py
|
11
|
4051
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""tests export_dcm functionality"""
import unittest
from sparktkregtests.lib import sparktk_test
import numpy
from lxml import etree
class ExportDicomTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""import dicom data for testing"""
super(ExportDicomTest, self).setUp()
self.dataset = self.get_file("dicom_uncompressed")
self.dicom = self.context.dicom.import_dcm(self.dataset)
self.xml_directory = self.get_local_dataset("dicom_xml/")
self.image_directory = self.get_local_dataset("dicom_uncompressed/")
@unittest.skip("Sparktk: dicom export to dcm throws error")
def test_export_to_dcm(self):
# first we will set aside the original data so that we can use it to
# compare with later
original_metadata = self.dicom.metadata.to_pandas()["metadata"]
original_formatted = []
# we will remove the bulkdata tag from our original metadata
# since the bulkdata tag records source of dicom and should
# be ignored during comparison
for metadata in original_metadata:
ascii_metadata = metadata.encode("ascii", "ignore")
xml_root = etree.fromstring(ascii_metadata)
bulk_data_tag = xml_root.xpath("//BulkData")[0]
bulk_data_tag.getparent().remove(bulk_data_tag)
original_formatted.append(etree.tostring(xml_root))
original_imagedata = self.dicom.pixeldata.to_pandas()["imagematrix"]
# now we export the dicom object
# we use our QA libraries to generate a path
# we save the dicom to that path
dcm_export_path = self.get_export_file(self.get_name("DICOM_EXPORT"))
self.dicom.export_to_dcm(dcm_export_path)
# Now we will load back the data we just saved into a new dicom object
# so that we can ensure the data is the same
loaded_dicom = self.context.dicom.import_dcm(dcm_export_path)
# get the loaded metadata and imagedata
loaded_metadata = loaded_dicom.metadata.to_pandas()["metadata"]
loaded_imagedata = loaded_dicom.pixeldata.to_pandas()["imagematrix"]
# ensure that the loaded metadata and imagedata is of the same len
# as the original data,
# then iterate through the records and ensure they are the same
self.assertEqual(len(loaded_metadata), len(original_metadata))
self.assertEqual(len(loaded_imagedata), len(original_imagedata))
for actual in loaded_metadata:
# for each metadata we will remove the bulkdata tag before we
# compare it with the original data since the bulk data tag
# records the source for the dcm and may differ since
# we are loading from a different location than the original dicom
actual_root = etree.fromstring(actual.encode("ascii", "ignore"))
actual_bulk_data_tag = actual_root.xpath("//BulkData")[0]
actual_bulk_data_tag.getparent().remove(actual_bulk_data_tag)
actual = etree.tostring(actual_root)
self.assertTrue(actual in original_formatted)
for actual in loaded_imagedata:
result = any(numpy.array_equal(actual, original) for original in original_imagedata)
self.assertTrue(result)
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
lazywei/scikit-learn
|
examples/ensemble/plot_adaboost_twoclass.py
|
347
|
3268
|
"""
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
|
bsd-3-clause
|
lakshayg/tensorflow
|
tensorflow/examples/learn/text_classification_character_rnn.py
|
38
|
4036
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
priyanshsaxena/techmeet
|
backup_stuff/text_classification/prepare_train_data.py
|
2
|
2844
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import re
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import pysentiment as ps
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
def stemming(tokenized_docs_no_stopwords):
wnl = WordNetLemmatizer()
porter = PorterStemmer()
snowball = SnowballStemmer('english')
wordnet = WordNetLemmatizer()
preprocessed_docs = []
final_doc = []
for doc in tokenized_docs_no_stopwords:
final_doc.append(porter.stem(doc))
return final_doc
def sentiment(text):
hiv4 = ps.HIV4()
score = hiv4.get_score(text)
return score
def data_cleaning(text):
tokenized_text = word_tokenize(text)
# ----------------------------------- Remove punctuation--------------------------------------------------------------------
regex = re.compile('[%s]' % re.escape(string.punctuation)) #see documentation here: http://docs.python.org/2/library/string.html
tokenized_text_no_punctuation = []
new_review = []
for token in tokenized_text:
new_token = regex.sub(u'', token)
if not new_token == u'':
new_token = new_token.lower()
new_review.append(new_token)
tokenized_text_no_punctuation = new_review
# -------------------------------------------- Remove stopwords ----------------------------------------------------------
new_term_vector = []
for doc in tokenized_text_no_punctuation:
if not doc in stopwords.words('english'):
new_term_vector.append(doc)
text_no_stopwords = new_term_vector
text_no_stopwords = stemming(text_no_stopwords)
# print (text_no_stopwords)
return text_no_stopwords
def process(company_name , article):
# ----- check if company name is present ----------
company_name = company_name.lower()
fh_syn=open("companies.txt","r")
list_company_names = fh_syn.read().split('\n')
fh_syn.close()
company_name_present = 0
if company_name in list_company_names:
company_name_present = 1
article = data_cleaning(article)
company_name = data_cleaning(company_name)
score = sentiment(article)
fh_syn=open("words.txt","r")
list_of_relevant_words=fh_syn.read().split('\n')
fh_syn.close()
cnt = 0;
for i in article:
if i in list_of_relevant_words:
cnt = cnt + 1
cnt = float(cnt)/len(article)
# form of ---> vector = [company_name_present , cnt , polarity , positive , negative , subjectivity]
vector = [company_name_present , cnt]
score = score.values()
vector.append(score[0])
return vector
if __name__ == '__main__':
company_name = "McDonalds"
head_lines = "Discerning Google Stock Is as Simple as a Trend Line"
article = "Google!!! simple walking profiling stock$ has^!@#%%^&**(())_ been, a great. performing investment susan"
vector = process(company_name , article)
|
gpl-3.0
|
numpy/numpy-refactor
|
doc/source/conf.py
|
4
|
8615
|
# -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
#master_doc = 'index'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print version, release
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'scipy.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "%s v%s Manual (DRAFT)" % (project, version)
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = 'scipyshiny_small.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': 'indexsidebar.html'
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'index': 'indexcontent.html',
}
# If false, no module index is generated.
html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
#html_file_suffix = '.html'
# Output file base name for HTML help builder.
htmlhelp_basename = 'numpy'
# Pngmath should try to align formulas properly
pngmath_use_preview = True
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
import matplotlib
matplotlib.rcParams.update({
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.