repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
bigdataelephants/scikit-learn
|
examples/exercises/plot_cv_digits.py
|
232
|
1206
|
"""
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn import cross_validation, datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
|
bsd-3-clause
|
abhishekgahlot/scikit-learn
|
examples/classification/plot_classification_probability.py
|
242
|
2624
|
"""
===============================
Plot classification probability
===============================
Plot the classification probability for different classifiers. We use a 3
class dataset, and we classify it with a Support Vector classifier, L1
and L2 penalized logistic regression with either a One-Vs-Rest or multinomial
setting.
The logistic regression is not a multiclass classifier out of the box. As
a result it can identify only the first class.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, 0:2] # we only take the first two features for visualization
y = iris.target
n_features = X.shape[1]
C = 1.0
# Create different classifiers. The logistic regression cannot do
# multiclass out of the box.
classifiers = {'L1 logistic': LogisticRegression(C=C, penalty='l1'),
'L2 logistic (OvR)': LogisticRegression(C=C, penalty='l2'),
'Linear SVC': SVC(kernel='linear', C=C, probability=True,
random_state=0),
'L2 logistic (Multinomial)': LogisticRegression(
C=C, solver='lbfgs', multi_class='multinomial'
)}
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * 2, n_classifiers * 2))
plt.subplots_adjust(bottom=.2, top=.95)
xx = np.linspace(3, 9, 100)
yy = np.linspace(1, 5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
for index, (name, classifier) in enumerate(classifiers.items()):
classifier.fit(X, y)
y_pred = classifier.predict(X)
classif_rate = np.mean(y_pred.ravel() == y.ravel()) * 100
print("classif_rate for %s : %f " % (name, classif_rate))
# View probabilities=
probas = classifier.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
plt.subplot(n_classifiers, n_classes, index * n_classes + k + 1)
plt.title("Class %d" % k)
if k == 0:
plt.ylabel(name)
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),
extent=(3, 9, 1, 5), origin='lower')
plt.xticks(())
plt.yticks(())
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1], marker='o', c='k')
ax = plt.axes([0.15, 0.04, 0.7, 0.05])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
|
bsd-3-clause
|
hansbrenna/NetCDF_postprocessor
|
restart_file_modifier.py
|
1
|
1549
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 10:05:03 2015
@author: hanbre
"""
from __future__ import print_function
import sys
import numpy as np
import pandas as pd
import xray
import netCDF4
def read_data(id_in):
data = xray.open_dataset(id_in)
return data
def modify_data(ds,le,la,lo,value):
ds.HCL[dict(lat=la,lon=lo,lev=le)]=ds.HCL[dict(lat=la,lon=lo,lev=le)]*100
ds.HBR[dict(lat=la,lon=lo,lev=le)]=ds.HBR[dict(lat=la,lon=lo,lev=le)]*100
return ds
id_in = sys.argv[1]
case_id = id_in.split('/')
le = int(sys.argv[2]); la = int(sys.argv[3]); lo = int(sys.argv[4])
value = float(sys.argv[5])
ds = read_data(id_in)
print(ds.HCL[dict(lat=la,lon=lo,lev=le)].values)
ds=modify_data(ds,le,la,lo,value)
print(ds.HCL[dict(lat=la,lon=lo,lev=le)].values)
if 'modified' in case_id[0] or 'modified' in case_id[1]:
ds.to_netcdf(path='{0}/modified/{1}'.format(case_id[0],case_id[2]),mode='w',)
f1=open('{0}/modified/{1}_README'.format(case_id[0],case_id[2]),'a')
print( 'a')
else:
ds.to_netcdf(path='{0}/modified/{1}'.format(case_id[0],case_id[1]),mode='w',)
f1=open('{0}/modified/{1}_README'.format(case_id[0],case_id[1]),'a')
print( 'b')
print('file {0}/modified/{1} has been modified by program restart_file_modifier.py'.format(case_id[0],case_id[1]),file=f1)
print('The changes take effect at lev={0}, lat={1}, lon={2} and the changed values are HCL={3} and HBR={4}'.format(le,la,lo,ds.HCL[dict(lat=la,lon=lo,lev=le)].values,ds.HBR[dict(lat=la,lon=lo,lev=le)].values),file=f1)
f1.close()
|
gpl-3.0
|
devanshdalal/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <[email protected]>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
wiki2014/Learning-Summary
|
alps/cts/apps/CameraITS/tests/sensor_fusion/test_sensor_fusion.py
|
1
|
17359
|
# Copyright 2014 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import its.image
import its.device
import its.objects
import its.caps
import time
import math
import pylab
import os.path
import matplotlib
import matplotlib.pyplot
import json
import Image
import numpy
import cv2
import bisect
import scipy.spatial
import sys
NAME = os.path.basename(__file__).split(".")[0]
# Capture 210 VGA frames (which is 7s at 30fps)
N = 210
W,H = 640,480
FEATURE_MARGIN = H * 0.20 / 2 # Only take feature points from the center 20%
# so that the rotation measured have much less
# of rolling shutter effect
MIN_FEATURE_PTS = 30 # Minimum number of feature points required to
# perform rotation analysis
MAX_CAM_FRM_RANGE_SEC = 9.0 # Maximum allowed camera frame range. When this
# number is significantly larger than 7 seconds,
# usually system is in some busy/bad states.
MIN_GYRO_SMP_RATE = 100.0 # Minimum gyro sample rate
FEATURE_PARAMS = dict( maxCorners = 240,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
LK_PARAMS = dict( winSize = (15, 15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
10, 0.03))
# Constants to convert between different time units (for clarity).
SEC_TO_NSEC = 1000*1000*1000.0
SEC_TO_MSEC = 1000.0
MSEC_TO_NSEC = 1000*1000.0
MSEC_TO_SEC = 1/1000.0
NSEC_TO_SEC = 1/(1000*1000*1000.0)
NSEC_TO_MSEC = 1/(1000*1000.0)
# Pass/fail thresholds.
THRESH_MAX_CORR_DIST = 0.005
THRESH_MAX_SHIFT_MS = 2
THRESH_MIN_ROT = 0.001
# lens facing
FACING_FRONT = 0
FACING_BACK = 1
FACING_EXTERNAL = 2
def main():
"""Test if image and motion sensor events are well synchronized.
The instructions for running this test are in the SensorFusion.pdf file in
the same directory as this test.
The command-line argument "replay" may be optionally provided. Without this
argument, the test will collect a new set of camera+gyro data from the
device and then analyze it (and it will also dump this data to files in the
current directory). If the "replay" argument is provided, then the script
will instead load the dumped data from a previous run and analyze that
instead. This can be helpful for developers who are digging for additional
information on their measurements.
"""
# Collect or load the camera+gyro data. All gyro events as well as camera
# timestamps are in the "events" dictionary, and "frames" is a list of
# RGB images as numpy arrays.
if "replay" not in sys.argv:
events, frames = collect_data()
else:
events, frames = load_data()
# Sanity check camera timestamps are enclosed by sensor timestamps
# This will catch bugs where camera and gyro timestamps go completely out
# of sync
cam_times = get_cam_times(events["cam"])
min_cam_time = min(cam_times) * NSEC_TO_SEC
max_cam_time = max(cam_times) * NSEC_TO_SEC
gyro_times = [e["time"] for e in events["gyro"]]
min_gyro_time = min(gyro_times) * NSEC_TO_SEC
max_gyro_time = max(gyro_times) * NSEC_TO_SEC
if not (min_cam_time > min_gyro_time and max_cam_time < max_gyro_time):
print "Test failed: camera timestamps [%f,%f] " \
"are not enclosed by gyro timestamps [%f, %f]" % (
min_cam_time, max_cam_time, min_gyro_time, max_gyro_time)
assert(0)
cam_frame_range = max_cam_time - min_cam_time
gyro_time_range = max_gyro_time - min_gyro_time
gyro_smp_per_sec = len(gyro_times) / gyro_time_range
print "Camera frame range", max_cam_time - min_cam_time
print "Gyro samples per second", gyro_smp_per_sec
assert(cam_frame_range < MAX_CAM_FRM_RANGE_SEC)
assert(gyro_smp_per_sec > MIN_GYRO_SMP_RATE)
# Compute the camera rotation displacements (rad) between each pair of
# adjacent frames.
cam_rots = get_cam_rotations(frames, events["facing"])
if max(abs(cam_rots)) < THRESH_MIN_ROT:
print "Device wasn't moved enough"
assert(0)
# Find the best offset (time-shift) to align the gyro and camera motion
# traces; this function integrates the shifted gyro data between camera
# samples for a range of candidate shift values, and returns the shift that
# result in the best correlation.
offset = get_best_alignment_offset(cam_times, cam_rots, events["gyro"])
# Plot the camera and gyro traces after applying the best shift.
cam_times = cam_times + offset*SEC_TO_NSEC
gyro_rots = get_gyro_rotations(events["gyro"], cam_times)
plot_rotations(cam_rots, gyro_rots)
# Pass/fail based on the offset and also the correlation distance.
dist = scipy.spatial.distance.correlation(cam_rots, gyro_rots)
print "Best correlation of %f at shift of %.2fms"%(dist, offset*SEC_TO_MSEC)
assert(dist < THRESH_MAX_CORR_DIST)
assert(abs(offset) < THRESH_MAX_SHIFT_MS*MSEC_TO_SEC)
def get_best_alignment_offset(cam_times, cam_rots, gyro_events):
"""Find the best offset to align the camera and gyro traces.
Uses a correlation distance metric between the curves, where a smaller
value means that the curves are better-correlated.
Args:
cam_times: Array of N camera times, one for each frame.
cam_rots: Array of N-1 camera rotation displacements (rad).
gyro_events: List of gyro event objects.
Returns:
Offset (seconds) of the best alignment.
"""
# Measure the corr. dist. over a shift of up to +/- 50ms (0.5ms step size).
# Get the shift corresponding to the best (lowest) score.
candidates = numpy.arange(-50,50.5,0.5).tolist()
dists = []
for shift in candidates:
times = cam_times + shift*MSEC_TO_NSEC
gyro_rots = get_gyro_rotations(gyro_events, times)
dists.append(scipy.spatial.distance.correlation(cam_rots, gyro_rots))
best_corr_dist = min(dists)
best_shift = candidates[dists.index(best_corr_dist)]
print "Best shift without fitting is ", best_shift, "ms"
# Fit a curve to the corr. dist. data to measure the minima more
# accurately, by looking at the correlation distances within a range of
# +/- 10ms from the measured best score; note that this will use fewer
# than the full +/- 10 range for the curve fit if the measured score
# (which is used as the center of the fit) is within 10ms of the edge of
# the +/- 50ms candidate range.
i = dists.index(best_corr_dist)
candidates = candidates[i-20:i+21]
dists = dists[i-20:i+21]
a,b,c = numpy.polyfit(candidates, dists, 2)
exact_best_shift = -b/(2*a)
if abs(best_shift - exact_best_shift) > 2.0 or a <= 0 or c <= 0:
print "Test failed; bad fit to time-shift curve"
print "best_shift %f, exact_best_shift %f, a %f, c %f" % (best_shift,
exact_best_shift, a, c)
assert(0)
xfit = numpy.arange(candidates[0], candidates[-1], 0.05).tolist()
yfit = [a*x*x+b*x+c for x in xfit]
fig = matplotlib.pyplot.figure()
pylab.plot(candidates, dists, 'r', label="data")
pylab.plot(xfit, yfit, 'b', label="fit")
pylab.plot([exact_best_shift+x for x in [-0.1,0,0.1]], [0,0.01,0], 'b')
pylab.xlabel("Relative horizontal shift between curves (ms)")
pylab.ylabel("Correlation distance")
pylab.legend()
matplotlib.pyplot.savefig("%s_plot_shifts.png" % (NAME))
return exact_best_shift * MSEC_TO_SEC
def plot_rotations(cam_rots, gyro_rots):
"""Save a plot of the camera vs. gyro rotational measurements.
Args:
cam_rots: Array of N-1 camera rotation measurements (rad).
gyro_rots: Array of N-1 gyro rotation measurements (rad).
"""
# For the plot, scale the rotations to be in degrees.
scale = 360/(2*math.pi)
fig = matplotlib.pyplot.figure()
cam_rots = cam_rots * scale
gyro_rots = gyro_rots * scale
pylab.plot(range(len(cam_rots)), cam_rots, 'r', label="camera")
pylab.plot(range(len(gyro_rots)), gyro_rots, 'b', label="gyro")
pylab.legend()
pylab.xlabel("Camera frame number")
pylab.ylabel("Angular displacement between adjacent camera frames (deg)")
pylab.xlim([0, len(cam_rots)])
matplotlib.pyplot.savefig("%s_plot.png" % (NAME))
def get_gyro_rotations(gyro_events, cam_times):
"""Get the rotation values of the gyro.
Integrates the gyro data between each camera frame to compute an angular
displacement.
Args:
gyro_events: List of gyro event objects.
cam_times: Array of N camera times, one for each frame.
Returns:
Array of N-1 gyro rotation measurements (rad).
"""
all_times = numpy.array([e["time"] for e in gyro_events])
all_rots = numpy.array([e["z"] for e in gyro_events])
gyro_rots = []
# Integrate the gyro data between each pair of camera frame times.
for icam in range(len(cam_times)-1):
# Get the window of gyro samples within the current pair of frames.
tcam0 = cam_times[icam]
tcam1 = cam_times[icam+1]
igyrowindow0 = bisect.bisect(all_times, tcam0)
igyrowindow1 = bisect.bisect(all_times, tcam1)
sgyro = 0
# Integrate samples within the window.
for igyro in range(igyrowindow0, igyrowindow1):
vgyro = all_rots[igyro+1]
tgyro0 = all_times[igyro]
tgyro1 = all_times[igyro+1]
deltatgyro = (tgyro1 - tgyro0) * NSEC_TO_SEC
sgyro += vgyro * deltatgyro
# Handle the fractional intervals at the sides of the window.
for side,igyro in enumerate([igyrowindow0-1, igyrowindow1]):
vgyro = all_rots[igyro+1]
tgyro0 = all_times[igyro]
tgyro1 = all_times[igyro+1]
deltatgyro = (tgyro1 - tgyro0) * NSEC_TO_SEC
if side == 0:
f = (tcam0 - tgyro0) / (tgyro1 - tgyro0)
sgyro += vgyro * deltatgyro * (1.0 - f)
else:
f = (tcam1 - tgyro0) / (tgyro1 - tgyro0)
sgyro += vgyro * deltatgyro * f
gyro_rots.append(sgyro)
gyro_rots = numpy.array(gyro_rots)
return gyro_rots
def get_cam_rotations(frames, facing):
"""Get the rotations of the camera between each pair of frames.
Takes N frames and returns N-1 angular displacements corresponding to the
rotations between adjacent pairs of frames, in radians.
Args:
frames: List of N images (as RGB numpy arrays).
Returns:
Array of N-1 camera rotation measurements (rad).
"""
gframes = []
for frame in frames:
frame = (frame * 255.0).astype(numpy.uint8)
gframes.append(cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY))
rots = []
ymin = H/2 - FEATURE_MARGIN
ymax = H/2 + FEATURE_MARGIN
for i in range(1,len(gframes)):
gframe0 = gframes[i-1]
gframe1 = gframes[i]
p0 = cv2.goodFeaturesToTrack(gframe0, mask=None, **FEATURE_PARAMS)
# p0's shape is N * 1 * 2
mask = (p0[:,0,1] >= ymin) & (p0[:,0,1] <= ymax)
p0_filtered = p0[mask]
if len(p0_filtered) < MIN_FEATURE_PTS:
print "Not enough feature points in frame", i
print "Need at least %d features, got %d" % (
MIN_FEATURE_PTS, len(p0_filtered))
assert(0)
p1,st,_ = cv2.calcOpticalFlowPyrLK(gframe0, gframe1, p0_filtered, None,
**LK_PARAMS)
tform = procrustes_rotation(p0_filtered[st==1], p1[st==1])
if facing == FACING_BACK:
rot = -math.atan2(tform[0, 1], tform[0, 0])
elif facing == FACING_FRONT:
rot = math.atan2(tform[0, 1], tform[0, 0])
else:
print "Unknown lens facing", facing
assert(0)
rots.append(rot)
if i == 1:
# Save a debug visualization of the features that are being
# tracked in the first frame.
frame = frames[i]
for x,y in p0_filtered[st==1]:
cv2.circle(frame, (x,y), 3, (100,100,255), -1)
its.image.write_image(frame, "%s_features.png"%(NAME))
return numpy.array(rots)
def get_cam_times(cam_events):
"""Get the camera frame times.
Args:
cam_events: List of (start_exposure, exposure_time, readout_duration)
tuples, one per captured frame, with times in nanoseconds.
Returns:
frame_times: Array of N times, one corresponding to the "middle" of
the exposure of each frame.
"""
# Assign a time to each frame that assumes that the image is instantly
# captured in the middle of its exposure.
starts = numpy.array([start for start,exptime,readout in cam_events])
exptimes = numpy.array([exptime for start,exptime,readout in cam_events])
readouts = numpy.array([readout for start,exptime,readout in cam_events])
frame_times = starts + (exptimes + readouts) / 2.0
return frame_times
def load_data():
"""Load a set of previously captured data.
Returns:
events: Dictionary containing all gyro events and cam timestamps.
frames: List of RGB images as numpy arrays.
"""
with open("%s_events.txt"%(NAME), "r") as f:
events = json.loads(f.read())
n = len(events["cam"])
frames = []
for i in range(n):
img = Image.open("%s_frame%03d.png"%(NAME,i))
w,h = img.size[0:2]
frames.append(numpy.array(img).reshape(h,w,3) / 255.0)
return events, frames
def collect_data():
"""Capture a new set of data from the device.
Captures both motion data and camera frames, while the user is moving
the device in a proscribed manner.
Returns:
events: Dictionary containing all gyro events and cam timestamps.
frames: List of RGB images as numpy arrays.
"""
with its.device.ItsSession() as cam:
props = cam.get_camera_properties()
its.caps.skip_unless(its.caps.sensor_fusion(props) and
its.caps.manual_sensor(props) and
props['android.lens.facing'] != FACING_EXTERNAL)
print "Starting sensor event collection"
cam.start_sensor_events()
# Sleep a while for gyro events to stabilize.
time.sleep(0.5)
# TODO: Ensure that OIS is disabled; set to DISABLE and wait some time.
# Capture the frames.
facing = props['android.lens.facing']
if facing != FACING_FRONT and facing != FACING_BACK:
print "Unknown lens facing", facing
assert(0)
fmt = {"format":"yuv", "width":W, "height":H}
s,e,_,_,_ = cam.do_3a(get_results=True, do_af=False)
req = its.objects.manual_capture_request(s, e)
print "Capturing %dx%d with sens. %d, exp. time %.1fms" % (
W, H, s, e*NSEC_TO_MSEC)
caps = cam.do_capture([req]*N, fmt)
# Get the gyro events.
print "Reading out sensor events"
gyro = cam.get_sensor_events()["gyro"]
print "Number of gyro samples", len(gyro)
# Combine the events into a single structure.
print "Dumping event data"
starts = [c["metadata"]["android.sensor.timestamp"] for c in caps]
exptimes = [c["metadata"]["android.sensor.exposureTime"] for c in caps]
readouts = [c["metadata"]["android.sensor.rollingShutterSkew"]
for c in caps]
events = {"gyro": gyro, "cam": zip(starts,exptimes,readouts),
"facing": facing}
with open("%s_events.txt"%(NAME), "w") as f:
f.write(json.dumps(events))
# Convert the frames to RGB.
print "Dumping frames"
frames = []
for i,c in enumerate(caps):
img = its.image.convert_capture_to_rgb_image(c)
frames.append(img)
its.image.write_image(img, "%s_frame%03d.png"%(NAME,i))
return events, frames
def procrustes_rotation(X, Y):
"""
Procrustes analysis determines a linear transformation (translation,
reflection, orthogonal rotation and scaling) of the points in Y to best
conform them to the points in matrix X, using the sum of squared errors
as the goodness of fit criterion.
Args:
X, Y: Matrices of target and input coordinates.
Returns:
The rotation component of the transformation that maps X to Y.
"""
X0 = (X-X.mean(0)) / numpy.sqrt(((X-X.mean(0))**2.0).sum())
Y0 = (Y-Y.mean(0)) / numpy.sqrt(((Y-Y.mean(0))**2.0).sum())
U,s,Vt = numpy.linalg.svd(numpy.dot(X0.T, Y0),full_matrices=False)
return numpy.dot(Vt.T, U.T)
if __name__ == '__main__':
main()
|
gpl-3.0
|
Vimos/scikit-learn
|
benchmarks/bench_sample_without_replacement.py
|
397
|
8008
|
"""
Benchmarks for sampling without replacement of integer.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import operator
import matplotlib.pyplot as plt
import numpy as np
import random
from sklearn.externals.six.moves import xrange
from sklearn.utils.random import sample_without_replacement
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_sample(sampling, n_population, n_samples):
gc.collect()
# start time
t_start = datetime.now()
sampling(n_population, n_samples)
delta = (datetime.now() - t_start)
# stop time
time = compute_time(t_start, delta)
return time
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-population",
dest="n_population", default=100000, type=int,
help="Size of the population to sample from.")
op.add_option("--n-step",
dest="n_steps", default=5, type=int,
help="Number of step interval between 0 and n_population.")
default_algorithms = "custom-tracking-selection,custom-auto," \
"custom-reservoir-sampling,custom-pool,"\
"python-core-sample,numpy-permutation"
op.add_option("--algorithm",
dest="selected_algorithm",
default=default_algorithms,
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. \nAvailable: %default")
# op.add_option("--random-seed",
# dest="random_seed", default=13, type=int,
# help="Seed used by the random number generators.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
selected_algorithm = opts.selected_algorithm.split(',')
for key in selected_algorithm:
if key not in default_algorithms.split(','):
raise ValueError("Unknown sampling algorithm \"%s\" not in (%s)."
% (key, default_algorithms))
###########################################################################
# List sampling algorithm
###########################################################################
# We assume that sampling algorithm has the following signature:
# sample(n_population, n_sample)
#
sampling_algorithm = {}
###########################################################################
# Set Python core input
sampling_algorithm["python-core-sample"] = \
lambda n_population, n_sample: \
random.sample(xrange(n_population), n_sample)
###########################################################################
# Set custom automatic method selection
sampling_algorithm["custom-auto"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="auto",
random_state=random_state)
###########################################################################
# Set custom tracking based method
sampling_algorithm["custom-tracking-selection"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="tracking_selection",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-reservoir-sampling"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="reservoir_sampling",
random_state=random_state)
###########################################################################
# Set custom reservoir based method
sampling_algorithm["custom-pool"] = \
lambda n_population, n_samples, random_state=None: \
sample_without_replacement(n_population,
n_samples,
method="pool",
random_state=random_state)
###########################################################################
# Numpy permutation based
sampling_algorithm["numpy-permutation"] = \
lambda n_population, n_sample: \
np.random.permutation(n_population)[:n_sample]
###########################################################################
# Remove unspecified algorithm
sampling_algorithm = dict((key, value)
for key, value in sampling_algorithm.items()
if key in selected_algorithm)
###########################################################################
# Perform benchmark
###########################################################################
time = {}
n_samples = np.linspace(start=0, stop=opts.n_population,
num=opts.n_steps).astype(np.int)
ratio = n_samples / opts.n_population
print('Benchmarks')
print("===========================")
for name in sorted(sampling_algorithm):
print("Perform benchmarks for %s..." % name, end="")
time[name] = np.zeros(shape=(opts.n_steps, opts.n_times))
for step in xrange(opts.n_steps):
for it in xrange(opts.n_times):
time[name][step, it] = bench_sample(sampling_algorithm[name],
opts.n_population,
n_samples[step])
print("done")
print("Averaging results...", end="")
for name in sampling_algorithm:
time[name] = np.mean(time[name], axis=1)
print("done\n")
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Sampling algorithm performance:")
print("===============================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
fig = plt.figure('scikit-learn sample w/o replacement benchmark results')
plt.title("n_population = %s, n_times = %s" %
(opts.n_population, opts.n_times))
ax = fig.add_subplot(111)
for name in sampling_algorithm:
ax.plot(ratio, time[name], label=name)
ax.set_xlabel('ratio of n_sample / n_population')
ax.set_ylabel('Time (s)')
ax.legend()
# Sort legend labels
handles, labels = ax.get_legend_handles_labels()
hl = sorted(zip(handles, labels), key=operator.itemgetter(1))
handles2, labels2 = zip(*hl)
ax.legend(handles2, labels2, loc=0)
plt.show()
|
bsd-3-clause
|
adrian-soto/Waterpack
|
examples/H-b_network/analyze.py
|
1
|
1706
|
import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
Afile='trash.dat'
A = np.loadtxt(Afile)
A = A.astype(int)
G=nx.from_numpy_matrix(A)
plt.matshow(A)
#plt.show()
NCC=nx.number_connected_components(G)
print "# of connected components =", NCC
nx.draw(G, with_labels=True)
# Prepare subplots
fig = plt.figure()
axD=fig.add_subplot(221)
axA=fig.add_subplot(222)
axL=fig.add_subplot(223)
axC=fig.add_subplot(224)
edges=np.arange(0,max(nx.degree(G).values())+2)
axD.hist(sorted(nx.degree(G).values()), edges)
axD.set_xlabel('Node degree')
axD.set_ylabel('Frequency')
axD.title.set_text('Node degree distribution of A')
A=nx.to_numpy_matrix(G)
axA.plot(np.sort(np.linalg.eigvals( A ) ) )
axA.set_xlim(0,np.shape(A)[0])
axA.set_xlabel('eigenvector index')
axA.set_ylabel('eigenvalue')
axA.title.set_text('Eigenvalue spectrum of A')
L=nx.normalized_laplacian_matrix(G)
axL.plot(np.sort(np.linalg.eigvals( L.todense() ) ) )
axL.set_xlim(0,np.shape(L)[0])
axL.set_xlabel('eigenvector')
axL.set_ylabel('eigenvalue')
axL.title.set_text('Eigenvalue spectrum of L_n')
#plt.show()
C=nx.degree_centrality(G)
axC.plot(np.sort(C.values()) )
axC.set_xlim(0,np.shape(L)[0])
axC.set_xlabel('node index')
axC.set_ylabel('centrality')
axC.title.set_text('Degree centrality spectrum')
plt.tight_layout()
#plt.show()
A1 =A.astype(int)
A2 =np.dot(A1 , A1)
A3 =np.dot(A2 , A1)
A4 =np.dot(A3 , A1)
A5 =np.dot(A4 , A1)
A6 =np.dot(A5 , A1)
A7 =np.dot(A6 , A1)
A8 =np.dot(A7 , A1)
A9 =np.dot(A8 , A1)
A10=np.dot(A9 , A1)
Nmol=A.shape
Nmol=Nmol[0]
print Nmol
for i in range(0, Nmol):
print i, A1[i,i], A2[i,i], A3[i,i], A4[i,i], A5[i,i], A6[i,i], A7[i,i], A8[i,i], A9[i,i], A10[i,i]
|
gpl-2.0
|
untom/scikit-learn
|
sklearn/ensemble/partial_dependence.py
|
251
|
15097
|
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD 3 clause
from itertools import count
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..externals.six.moves import map, range, zip
from ..utils import check_array
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(0. <= x <= 1. for x in percentiles):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluated (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier(random_state=0).fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs) # doctest: +SKIP
(array([[-4.52..., 4.52...]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = check_array(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in range(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are arranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Read more in the :ref:`User Guide <partial_dependence>`.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used to create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = check_array(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = [str(i) for i in range(gbrt.n_features)]
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, six.string_types):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral,) + six.string_types):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
l = []
# explicit loop so "i" is bound for exception below
for i in fxs:
l.append(feature_names[i])
names.append(l)
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution,
percentiles=percentiles)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in zip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(list(map(np.size, axes))).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
|
bsd-3-clause
|
nrhine1/scikit-learn
|
examples/cluster/plot_digits_agglomeration.py
|
377
|
1694
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
|
bsd-3-clause
|
LohithBlaze/scikit-learn
|
examples/ensemble/plot_random_forest_embedding.py
|
286
|
3531
|
"""
=========================================================
Hashing feature transformation using Totally Random Trees
=========================================================
RandomTreesEmbedding provides a way to map data to a
very high-dimensional, sparse representation, which might
be beneficial for classification.
The mapping is completely unsupervised and very efficient.
This example visualizes the partitions given by several
trees and shows how the transformation can also be used for
non-linear dimensionality reduction or non-linear classification.
Points that are neighboring often share the same leaf of a tree and therefore
share large parts of their hashed representation. This allows to
separate two concentric circles simply based on the principal components of the
transformed data.
In high-dimensional spaces, linear classifiers often achieve
excellent accuracy. For sparse binary data, BernoulliNB
is particularly well-suited. The bottom row compares the
decision boundary obtained by BernoulliNB in the transformed
space with an ExtraTreesClassifier forests learned on the
original data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_circles
from sklearn.ensemble import RandomTreesEmbedding, ExtraTreesClassifier
from sklearn.decomposition import TruncatedSVD
from sklearn.naive_bayes import BernoulliNB
# make a synthetic dataset
X, y = make_circles(factor=0.5, random_state=0, noise=0.05)
# use RandomTreesEmbedding to transform data
hasher = RandomTreesEmbedding(n_estimators=10, random_state=0, max_depth=3)
X_transformed = hasher.fit_transform(X)
# Visualize result using PCA
pca = TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
# Learn a Naive Bayes classifier on the transformed data
nb = BernoulliNB()
nb.fit(X_transformed, y)
# Learn an ExtraTreesClassifier for comparison
trees = ExtraTreesClassifier(max_depth=3, n_estimators=10, random_state=0)
trees.fit(X, y)
# scatter plot of original and reduced data
fig = plt.figure(figsize=(9, 8))
ax = plt.subplot(221)
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_title("Original Data (2d)")
ax.set_xticks(())
ax.set_yticks(())
ax = plt.subplot(222)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y, s=50)
ax.set_title("PCA reduction (2d) of transformed data (%dd)" %
X_transformed.shape[1])
ax.set_xticks(())
ax.set_yticks(())
# Plot the decision in original space. For that, we will assign a color to each
# point in the mesh [x_min, m_max] x [y_min, y_max].
h = .01
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# transform grid using RandomTreesEmbedding
transformed_grid = hasher.transform(np.c_[xx.ravel(), yy.ravel()])
y_grid_pred = nb.predict_proba(transformed_grid)[:, 1]
ax = plt.subplot(223)
ax.set_title("Naive Bayes on Transformed data")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
# transform grid using ExtraTreesClassifier
y_grid_pred = trees.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
ax = plt.subplot(224)
ax.set_title("ExtraTrees predictions")
ax.pcolormesh(xx, yy, y_grid_pred.reshape(xx.shape))
ax.scatter(X[:, 0], X[:, 1], c=y, s=50)
ax.set_ylim(-1.4, 1.4)
ax.set_xlim(-1.4, 1.4)
ax.set_xticks(())
ax.set_yticks(())
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
blond-admin/BLonD
|
__EXAMPLES/mpi_main_files/EX_05_Wake_impedance.py
|
2
|
12145
|
# Copyright 2014-2017 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
SPS simulation with intensity effects in time and frequency domains using
a table of resonators. The input beam has been cloned to show that the two
methods are equivalent (compare the two figure folders). Note that to create an
exact clone of the beam, the option seed=0 in the generation has been used.
This script shows also an example of how to use the class SliceMonitor (check
the corresponding h5 files).
:Authors: **Danilo Quartullo**
'''
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
from blond.input_parameters.ring import Ring
from blond.input_parameters.rf_parameters import RFStation
from blond.trackers.tracker import RingAndRFTracker
from blond.beam.distributions import bigaussian
from blond.monitors.monitors import BunchMonitor
from blond.beam.profile import Profile, CutOptions, FitOptions
from blond.impedances.impedance import InducedVoltageTime, InducedVoltageFreq
from blond.impedances.impedance import InducedVoltageResonator, TotalInducedVoltage
from blond.impedances.induced_voltage_analytical import analytical_gaussian_resonator
from blond.beam.beam import Beam, Proton
from blond.plots.plot import Plot
from blond.plots.plot_impedance import plot_induced_voltage_vs_bin_centers
from blond.impedances.impedance_sources import Resonators
import os
from blond.utils import bmath as bm
from blond.utils.mpi_config import worker, mpiprint
bm.use_mpi()
print = mpiprint
this_directory = os.path.dirname(os.path.realpath(__file__)) + '/'
try:
os.mkdir(this_directory + '../mpi_output_files')
except:
pass
try:
os.mkdir(this_directory + '../mpi_output_files/EX_05_fig')
except:
pass
# SIMULATION PARAMETERS -------------------------------------------------------
# Beam parameters
n_particles = 1e10
n_macroparticles = 5*1e6
tau_0 = 2e-9 # [s]
# Machine and RF parameters
gamma_transition = 1/np.sqrt(0.00192) # [1]
C = 6911.56 # [m]
# Tracking details
n_turns = 2
dt_plt = 1
# Derived parameters
sync_momentum = 25.92e9 # [eV / c]
momentum_compaction = 1 / gamma_transition**2 # [1]
# Cavities parameters
n_rf_systems = 1
harmonic_number = 4620
voltage_program = 0.9e6 # [V]
phi_offset = 0.0
# DEFINE RING------------------------------------------------------------------
general_params = Ring(C, momentum_compaction,
sync_momentum, Proton(), n_turns)
general_params_freq = Ring(C, momentum_compaction,
sync_momentum, Proton(), n_turns)
general_params_res = Ring(C, momentum_compaction,
sync_momentum, Proton(), n_turns)
RF_sct_par = RFStation(general_params, [harmonic_number],
[voltage_program], [phi_offset], n_rf_systems)
RF_sct_par_freq = RFStation(general_params_freq,
[harmonic_number], [voltage_program],
[phi_offset], n_rf_systems)
RF_sct_par_res = RFStation(general_params_res,
[harmonic_number], [voltage_program],
[phi_offset], n_rf_systems)
my_beam = Beam(general_params, n_macroparticles, n_particles)
my_beam_freq = Beam(general_params_freq, n_macroparticles, n_particles)
my_beam_res = Beam(general_params_res, n_macroparticles, n_particles)
ring_RF_section = RingAndRFTracker(RF_sct_par, my_beam)
ring_RF_section_freq = RingAndRFTracker(RF_sct_par_freq, my_beam_freq)
ring_RF_section_res = RingAndRFTracker(RF_sct_par_res, my_beam_res)
# DEFINE BEAM------------------------------------------------------------------
bigaussian(general_params, RF_sct_par, my_beam, tau_0/4,
seed=1)
bigaussian(general_params_freq, RF_sct_par_freq, my_beam_freq,
tau_0/4, seed=1)
bigaussian(general_params_res, RF_sct_par_res, my_beam_res,
tau_0/4, seed=1)
number_slices = 2**8
cut_options = CutOptions(cut_left= 0, cut_right=2*np.pi, n_slices=number_slices,
RFSectionParameters=RF_sct_par, cuts_unit = 'rad')
slice_beam = Profile(my_beam, cut_options, FitOptions(fit_option='gaussian'))
cut_options_freq = CutOptions(cut_left= 0, cut_right=2*np.pi, n_slices=number_slices,
RFSectionParameters=RF_sct_par_freq, cuts_unit = 'rad')
slice_beam_freq = Profile(my_beam_freq, cut_options_freq, FitOptions(fit_option='gaussian'))
cut_options_res = CutOptions(cut_left= 0, cut_right=2*np.pi, n_slices=number_slices,
RFSectionParameters=ring_RF_section_res, cuts_unit = 'rad')
slice_beam_res = Profile(my_beam_res, cut_options_res, FitOptions(fit_option='gaussian'))
slice_beam.track()
slice_beam_freq.track()
slice_beam_res.track()
# LOAD IMPEDANCE TABLE--------------------------------------------------------
table = np.loadtxt(this_directory + '../input_files/EX_05_new_HQ_table.dat', comments = '!')
R_shunt = table[:, 2] * 10**6
f_res = table[:, 0] * 10**9
Q_factor = table[:, 1]
resonator = Resonators(R_shunt, f_res, Q_factor)
ind_volt_time = InducedVoltageTime(my_beam, slice_beam, [resonator])
ind_volt_freq = InducedVoltageFreq(my_beam_freq, slice_beam_freq, [resonator], 1e5)
ind_volt_res = InducedVoltageResonator(my_beam_res,slice_beam_res,resonator)
tot_vol = TotalInducedVoltage(my_beam, slice_beam, [ind_volt_time])
tot_vol_freq = TotalInducedVoltage(my_beam_freq, slice_beam_freq,
[ind_volt_freq])
tot_vol_res = TotalInducedVoltage(my_beam_res, slice_beam_res,
[ind_volt_res])
# Analytic result-----------------------------------------------------------
VindGauss = np.zeros(len(slice_beam.bin_centers))
for r in range(len(Q_factor)):
# Notice that the time-argument of inducedVoltageGauss is shifted by
# mean(my_slices.bin_centers), because the analytical equation assumes the
# Gauss to be centered at t=0, but the line density is centered at
# mean(my_slices.bin_centers)
tmp = analytical_gaussian_resonator(tau_0/4, \
Q_factor[r],R_shunt[r],2*np.pi*f_res[r], \
slice_beam.bin_centers - np.mean(slice_beam.bin_centers), \
my_beam.intensity)
VindGauss += tmp.real
# ACCELERATION MAP-------------------------------------------------------------
map_ = [tot_vol] + [ring_RF_section] + [slice_beam]
map_freq = [tot_vol_freq] + [ring_RF_section_freq] + [slice_beam_freq]
map_res = [tot_vol_res] + [ring_RF_section_res] + [slice_beam_res]
if worker.isMaster:
# MONITOR----------------------------------------------------------------------
bunchmonitor = BunchMonitor(general_params, ring_RF_section, my_beam,
this_directory + '../mpi_output_files/EX_05_output_data',
Profile=slice_beam, buffer_time=1)
bunchmonitor_freq = BunchMonitor(general_params_freq, ring_RF_section_freq,
my_beam_freq, this_directory + '../mpi_output_files/EX_05_output_data_freq',
Profile=slice_beam_freq, buffer_time=1)
bunchmonitor_res = BunchMonitor(general_params_res, ring_RF_section_res,
my_beam_res, this_directory + '../mpi_output_files/EX_05_output_data_res',
Profile=slice_beam_res, buffer_time=1)
# PLOTS
format_options = {'dirname': this_directory + '../mpi_output_files/EX_05_fig/1', 'linestyle': '.'}
plots = Plot(general_params, RF_sct_par, my_beam, dt_plt, n_turns, 0,
0.0014*harmonic_number, -1.5e8, 1.5e8, xunit='rad',
separatrix_plot=True, Profile=slice_beam,
h5file=this_directory + '../mpi_output_files/EX_05_output_data',
histograms_plot=True, sampling=50, format_options=format_options)
format_options = {'dirname': this_directory + '../mpi_output_files/EX_05_fig/2', 'linestyle': '.'}
plots_freq = Plot(general_params_freq, RF_sct_par_freq, my_beam_freq, dt_plt,
n_turns, 0, 0.0014*harmonic_number, -1.5e8, 1.5e8,
xunit='rad', separatrix_plot=True, Profile=slice_beam_freq,
h5file=this_directory + '../mpi_output_files/EX_05_output_data_freq',
histograms_plot=True, sampling=50,
format_options=format_options)
format_options = {'dirname': this_directory + '../mpi_output_files/EX_05_fig/3', 'linestyle': '.'}
plots_res = Plot(general_params_res, RF_sct_par_res, my_beam_res, dt_plt,
n_turns, 0, 0.0014*harmonic_number, -1.5e8, 1.5e8,
xunit='rad', separatrix_plot=True, Profile=slice_beam_res,
h5file=this_directory + '../mpi_output_files/EX_05_output_data_res',
histograms_plot=True, sampling=50,
format_options=format_options)
map_ += [bunchmonitor, plots]
map_freq += [bunchmonitor_freq, plots_freq]
map_res += [bunchmonitor_res, plots_res]
# For testing purposes
test_string = ''
test_string += '{:<17}\t{:<17}\t{:<17}\t{:<17}\n'.format(
'mean_dE', 'std_dE', 'mean_dt', 'std_dt')
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(my_beam.dE), np.std(my_beam.dE), np.mean(my_beam.dt), np.std(my_beam.dt))
# TRACKING + PLOTS-------------------------------------------------------------
my_beam.split()
my_beam_freq.split()
my_beam_res.split()
for i in np.arange(1, n_turns+1):
print(i)
for m in map_:
m.track()
for m in map_freq:
m.track()
for m in map_res:
m.track()
# Plots
if (i % dt_plt) == 0 and (worker.isMaster):
plot_induced_voltage_vs_bin_centers(i, general_params, tot_vol,
style='.', dirname=this_directory + '../mpi_output_files/EX_05_fig/1')
plot_induced_voltage_vs_bin_centers(i, general_params_freq,
tot_vol_freq, style='.', dirname=this_directory + '../mpi_output_files/EX_05_fig/2')
plot_induced_voltage_vs_bin_centers(i, general_params_res,
tot_vol_res, style='.', dirname=this_directory + '../mpi_output_files/EX_05_fig/3')
my_beam.gather()
my_beam_freq.gather()
my_beam_res.gather()
worker.finalize()
# Plotting induced voltages---------------------------------------------------
plt.clf()
plt.ylabel("induced voltage [arb. unit]")
plt.xlabel("time [ns]")
plt.plot(1e9*slice_beam.bin_centers,tot_vol.induced_voltage,label='Time')
plt.plot(1e9*slice_beam_freq.bin_centers,tot_vol_freq.induced_voltage,\
label='Freq')
plt.plot(1e9*slice_beam_res.bin_centers,tot_vol_res.induced_voltage,\
label='Resonator')
plt.plot(1e9*slice_beam.bin_centers,VindGauss,label='Analytic')
plt.legend()
dirname=this_directory + '../mpi_output_files/EX_05_fig'
fign = dirname +'/comparison_induced_voltage.png'
plt.savefig(fign)
# For testing purposes
test_string += '{:+10.10e}\t{:+10.10e}\t{:+10.10e}\t{:+10.10e}\n'.format(
np.mean(my_beam.dE), np.std(my_beam.dE), np.mean(my_beam.dt), np.std(my_beam.dt))
with open(this_directory + '../mpi_output_files/EX_05_test_data.txt', 'w') as f:
f.write(test_string)
print("Done!")
|
gpl-3.0
|
sanketloke/scikit-learn
|
examples/classification/plot_classifier_comparison.py
|
45
|
5123
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=====================
Classifier comparison
=====================
A comparison of a several classifiers in scikit-learn on synthetic datasets.
The point of this example is to illustrate the nature of decision boundaries
of different classifiers.
This should be taken with a grain of salt, as the intuition conveyed by
these examples does not necessarily carry over to real datasets.
Particularly in high-dimensional spaces, data can more easily be separated
linearly and the simplicity of classifiers such as naive Bayes and linear SVMs
might lead to better generalization than is achieved by other classifiers.
The plots show training points in solid colors and testing points
semi-transparent. The lower right shows the classification accuracy on the test
set.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Gaussian Process",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
|
bsd-3-clause
|
Fireblend/scikit-learn
|
sklearn/utils/tests/test_estimator_checks.py
|
202
|
3757
|
import scipy.sparse as sp
import numpy as np
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.utils.testing import assert_raises_regex, assert_true
from sklearn.utils.estimator_checks import check_estimator
from sklearn.utils.estimator_checks import check_estimators_unfitted
from sklearn.linear_model import LogisticRegression
from sklearn.utils.validation import check_X_y, check_array
class CorrectNotFittedError(ValueError):
"""Exception class to raise if estimator is used before fitting.
Like NotFittedError, it inherits from ValueError, but not from
AttributeError. Used for testing only.
"""
class BaseBadClassifier(BaseEstimator, ClassifierMixin):
def fit(self, X, y):
return self
def predict(self, X):
return np.ones(X.shape[0])
class NoCheckinPredict(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
return self
class NoSparseClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc'])
if sp.issparse(X):
raise ValueError("Nonsensical Error")
return self
def predict(self, X):
X = check_array(X)
return np.ones(X.shape[0])
class CorrectNotFittedErrorClassifier(BaseBadClassifier):
def fit(self, X, y):
X, y = check_X_y(X, y)
self.coef_ = np.ones(X.shape[1])
return self
def predict(self, X):
if not hasattr(self, 'coef_'):
raise CorrectNotFittedError("estimator is not fitted yet")
X = check_array(X)
return np.ones(X.shape[0])
def test_check_estimator():
# tests that the estimator actually fails on "bad" estimators.
# not a complete test of all checks, which are very extensive.
# check that we have a set_params and can clone
msg = "it does not implement a 'get_params' methods"
assert_raises_regex(TypeError, msg, check_estimator, object)
# check that we have a fit method
msg = "object has no attribute 'fit'"
assert_raises_regex(AttributeError, msg, check_estimator, BaseEstimator)
# check that fit does input validation
msg = "TypeError not raised by fit"
assert_raises_regex(AssertionError, msg, check_estimator, BaseBadClassifier)
# check that predict does input validation (doesn't accept dicts in input)
msg = "Estimator doesn't check for NaN and inf in predict"
assert_raises_regex(AssertionError, msg, check_estimator, NoCheckinPredict)
# check for sparse matrix input handling
msg = "Estimator type doesn't seem to fail gracefully on sparse data"
# the check for sparse input handling prints to the stdout,
# instead of raising an error, so as not to remove the original traceback.
# that means we need to jump through some hoops to catch it.
old_stdout = sys.stdout
string_buffer = StringIO()
sys.stdout = string_buffer
try:
check_estimator(NoSparseClassifier)
except:
pass
finally:
sys.stdout = old_stdout
assert_true(msg in string_buffer.getvalue())
# doesn't error on actual estimator
check_estimator(LogisticRegression)
def test_check_estimators_unfitted():
# check that a ValueError/AttributeError is raised when calling predict
# on an unfitted estimator
msg = "AttributeError or ValueError not raised by predict"
assert_raises_regex(AssertionError, msg, check_estimators_unfitted,
"estimator", NoSparseClassifier)
# check that CorrectNotFittedError inherit from either ValueError
# or AttributeError
check_estimators_unfitted("estimator", CorrectNotFittedErrorClassifier)
|
bsd-3-clause
|
Minhmo/tardis
|
tardis/model.py
|
1
|
18244
|
# This module contains the model class
import logging
import os
import numpy as np
import pandas as pd
from astropy import constants, units as u
from util import intensity_black_body
from tardis.plasma.standard_plasmas import LegacyPlasmaArray
logger = logging.getLogger(__name__)
c = constants.c.cgs.value
h = constants.h.cgs.value
kb = constants.k_B.cgs.value
class Radial1DModel(object):
"""
Class to hold the states of the individual shells (the state of the plasma (as a `~plasma.BasePlasma`-object or one of its subclasses),
, the plasma parameters (e.g. temperature, dilution factor), the dimensions of the shell).
Parameters
----------
tardis_configuration : `tardis.config_reader.Configuration`
velocities : `np.ndarray`
an array with n+1 (for n shells) velocities (in cm/s) for each of the boundaries (velocities[0] describing
the inner boundary and velocities[-1] the outer boundary
densities : `np.ndarray`
an array with n densities - being the density mid-shell (assumed for the whole shell)
abundances : `list` or `dict`
a dictionary for uniform abundances throughout all shells, e.g. dict(Fe=0.5, Si=0.5)
For a different abundance for each shell list of abundance dictionaries.
time_explosion : `float`
time since explosion in seconds
atom_data : `~tardis.atom_data.AtomData` class or subclass
Containing the atom data needed for the plasma calculations
ws : `None` or `list`-like
ws can only be specified for plasma_type 'nebular'. If `None` is specified at first initialization the class
calculates an initial geometric dilution factor. When giving a list positive values will be accepted, whereas
negative values trigger the usage of the geometric calculation
plasma_type : `str`
plasma type currently supports 'lte' (using `tardis.plasma.LTEPlasma`)
or 'nebular' (using `tardis.plasma.NebularPlasma`)
initial_t_rad : `float`-like or `list`-like
initial radiative temperature for each shell, if a scalar is specified it initializes with a uniform
temperature for all shells
"""
@classmethod
def from_h5(cls, buffer_or_fname):
raise NotImplementedError("This is currently not implemented")
def __init__(self, tardis_config):
#final preparation for configuration object
self.tardis_config = tardis_config
self.atom_data = tardis_config.atom_data
selected_atomic_numbers = self.tardis_config.abundances.index
self.atom_data.prepare_atom_data(
selected_atomic_numbers,
line_interaction_type=tardis_config.plasma.line_interaction_type,
nlte_species=tardis_config.plasma.nlte.species)
if tardis_config.plasma.ionization == 'nebular':
if not self.atom_data.has_zeta_data:
raise ValueError("Requiring Recombination coefficients Zeta "
"for 'nebular' plasma ionization")
self.t_inner = tardis_config.plasma.t_inner
self.ws = self.calculate_geometric_w(
tardis_config.structure.r_middle,
tardis_config.structure.r_inner[0])
if tardis_config.plasma.t_rads is None:
self.t_rads = self._init_t_rad(
self.t_inner, tardis_config.structure.v_inner[0], self.v_middle)
else:
self.t_rads = tardis_config.plasma.t_rads
heating_rate_data_file = getattr(
tardis_config.plasma, 'heating_rate_data_file', None)
self.plasma_array = LegacyPlasmaArray(
tardis_config.number_densities, tardis_config.atom_data,
tardis_config.supernova.time_explosion.to('s').value,
nlte_config=tardis_config.plasma.nlte,
delta_treatment=tardis_config.plasma.delta_treatment,
ionization_mode=tardis_config.plasma.ionization,
excitation_mode=tardis_config.plasma.excitation,
line_interaction_type=tardis_config.plasma.line_interaction_type,
link_t_rad_t_electron=0.9,
helium_treatment=tardis_config.plasma.helium_treatment,
heating_rate_data_file=heating_rate_data_file,
v_inner=tardis_config.structure.v_inner,
v_outer=tardis_config.structure.v_outer)
self.spectrum = TARDISSpectrum(
tardis_config.spectrum.frequency, tardis_config.supernova.distance)
self.spectrum_virtual = TARDISSpectrum(
tardis_config.spectrum.frequency, tardis_config.supernova.distance)
self.spectrum_reabsorbed = TARDISSpectrum(
tardis_config.spectrum.frequency, tardis_config.supernova.distance)
self.calculate_j_blues(init_detailed_j_blues=True)
self.update_plasmas(initialize_nlte=True)
@property
def line_interaction_type(self):
return self._line_interaction_type
@line_interaction_type.setter
def line_interaction_type(self, value):
if value in ['scatter', 'downbranch', 'macroatom']:
self._line_interaction_type = value
self.tardis_config.plasma.line_interaction_type = value
#final preparation for atom_data object - currently building data
self.atom_data.prepare_atom_data(
self.tardis_config.number_densities.columns,
line_interaction_type=self.line_interaction_type,
max_ion_number=None,
nlte_species=self.tardis_config.plasma.nlte.species)
else:
raise ValueError('line_interaction_type can only be '
'"scatter", "downbranch", or "macroatom"')
@property
def t_inner(self):
return self._t_inner
@property
def v_middle(self):
structure = self.tardis_config.structure
return 0.5 * (structure.v_inner + structure.v_outer)
@t_inner.setter
def t_inner(self, value):
self._t_inner = value
self.luminosity_inner = (
4 * np.pi * constants.sigma_sb.cgs *
self.tardis_config.structure.r_inner[0] ** 2
* self.t_inner ** 4).to('erg/s')
self.time_of_simulation = (1.0 * u.erg / self.luminosity_inner)
self.j_blues_norm_factor = (
constants.c.cgs * self.tardis_config.supernova.time_explosion /
(4 * np.pi * self.time_of_simulation *
self.tardis_config.structure.volumes))
@staticmethod
def calculate_geometric_w(r, r_inner):
return 0.5 * (1 - np.sqrt(1 - (r_inner ** 2 / r ** 2).to(1).value))
@staticmethod
def _init_t_rad(t_inner, v_boundary, v_middle):
lambda_wien_inner = constants.b_wien / t_inner
return constants.b_wien / (
lambda_wien_inner * (1 + (v_middle - v_boundary) / constants.c))
def calculate_j_blues(self, init_detailed_j_blues=False):
nus = self.atom_data.lines.nu.values
radiative_rates_type = self.tardis_config.plasma.radiative_rates_type
w_epsilon = self.tardis_config.plasma.w_epsilon
if radiative_rates_type == 'blackbody':
logger.info('Calculating J_blues for radiative_rates_type=lte')
j_blues = intensity_black_body(nus[np.newaxis].T, self.t_rads.value)
self.j_blues = pd.DataFrame(
j_blues, index=self.atom_data.lines.index,
columns=np.arange(len(self.t_rads)))
elif radiative_rates_type == 'dilute-blackbody' or init_detailed_j_blues:
logger.info('Calculating J_blues for radiative_rates_type=dilute-blackbody')
j_blues = self.ws * intensity_black_body(nus[np.newaxis].T, self.t_rads.value)
self.j_blues = pd.DataFrame(
j_blues, index=self.atom_data.lines.index,
columns=np.arange(len(self.t_rads)))
elif radiative_rates_type == 'detailed':
logger.info('Calculating J_blues for radiate_rates_type=detailed')
self.j_blues = pd.DataFrame(
self.j_blue_estimators *
self.j_blues_norm_factor.value,
index=self.atom_data.lines.index,
columns=np.arange(len(self.t_rads)))
for i in xrange(self.tardis_config.structure.no_of_shells):
zero_j_blues = self.j_blues[i] == 0.0
self.j_blues[i][zero_j_blues] = (
w_epsilon * intensity_black_body(
self.atom_data.lines.nu[zero_j_blues].values,
self.t_rads.value[i]))
else:
raise ValueError('radiative_rates_type type unknown - %s', radiative_rates_type)
def update_plasmas(self, initialize_nlte=False):
self.plasma_array.update_radiationfield(
self.t_rads.value, self.ws, self.j_blues,
self.tardis_config.plasma.nlte, initialize_nlte=initialize_nlte,
n_e_convergence_threshold=0.05)
if self.tardis_config.plasma.line_interaction_type in ('downbranch',
'macroatom'):
self.transition_probabilities = (
self.plasma_array.transition_probabilities)
def save_spectra(self, fname):
self.spectrum.to_ascii(fname)
self.spectrum_virtual.to_ascii('virtual_' + fname)
def to_hdf5(self, buffer_or_fname, path='', close_h5=True):
"""
This allows the model to be written to an HDF5 file for later analysis. Currently, the saved properties
are specified hard coded in include_from_model_in_hdf5. This is a dict where the key corresponds to the
name of the property and the value describes the type. If the value is None the property can be dumped
to hdf via its attribute to_hdf or by converting it to a pd.DataFrame. For more complex properties
which can not simply be dumped to an hdf file the dict can contain a function which is called with
the parameters key, path, and hdf_store. This function then should dump the data to the given
hdf_store object. To dump properties of sub-properties of the model, you can use a dict as value.
This dict is then treated in the same way as described above.
Parameters
----------
buffer_or_fname: buffer or ~str
buffer or filename for HDF5 file (see pandas.HDFStore for description)
path: ~str, optional
path in the HDF5 file
close_h5: ~bool
close the HDF5 file or not.
"""
# Functions to save properties of the model without to_hdf attribute and no simple conversion to a pd.DataFrame.
#This functions are always called with the parameters key, path and, hdf_store.
def _save_luminosity_density(key, path, hdf_store):
luminosity_density = pd.DataFrame.from_dict(dict(wave=self.spectrum.wavelength.value,
flux=self.spectrum.luminosity_density_lambda.value))
luminosity_density.to_hdf(hdf_store, os.path.join(path, key))
def _save_spectrum_virtual(key, path, hdf_store):
if self.spectrum_virtual.luminosity_density_lambda is not None:
luminosity_density_virtual = pd.DataFrame.from_dict(dict(wave=self.spectrum_virtual.wavelength.value,
flux=self.spectrum_virtual.luminosity_density_lambda.value))
luminosity_density_virtual.to_hdf(hdf_store, os.path.join(path, key))
def _save_configuration_dict(key, path, hdf_store):
configuration_dict = dict(t_inner=self.t_inner.value,time_of_simulation=self.time_of_simulation)
configuration_dict_path = os.path.join(path, 'configuration')
pd.Series(configuration_dict).to_hdf(hdf_store, configuration_dict_path)
include_from_plasma_ = {'level_number_density': None, 'ion_number_density': None, 'tau_sobolevs': None,
'electron_densities': None,
't_rad': None, 'w': None}
include_from_runner_ = {'virt_packet_last_interaction_type': None, 'virt_packet_last_line_interaction_in_id': None,
'virt_packet_last_line_interaction_out_id': None, 'virt_packet_last_interaction_in_nu': None,
'virt_packet_nus': None, 'virt_packet_energies': None}
include_from_model_in_hdf5 = {'plasma_array': include_from_plasma_, 'j_blues': None,
'runner': include_from_runner_,
'last_line_interaction_in_id': None,
'last_line_interaction_out_id': None,
'last_line_interaction_shell_id': None, 'montecarlo_nu': None,
'luminosity_density': _save_luminosity_density,
'luminosity_density_virtual': _save_spectrum_virtual,
'configuration_dict': _save_configuration_dict,
'last_line_interaction_angstrom': None}
if isinstance(buffer_or_fname, basestring):
hdf_store = pd.HDFStore(buffer_or_fname)
elif isinstance(buffer_or_fname, pd.HDFStore):
hdf_store = buffer_or_fname
else:
raise IOError('Please specify either a filename or an HDFStore')
logger.info('Writing to path %s', path)
def _get_hdf5_path(path, property_name):
return os.path.join(path, property_name)
def _to_smallest_pandas(object):
try:
return pd.Series(object)
except Exception:
return pd.DataFrame(object)
def _save_model_property(object, property_name, path, hdf_store):
property_path = _get_hdf5_path(path, property_name)
try:
object.to_hdf(hdf_store, property_path)
except AttributeError:
_to_smallest_pandas(object).to_hdf(hdf_store, property_path)
for key in include_from_model_in_hdf5:
if include_from_model_in_hdf5[key] is None:
_save_model_property(getattr(self, key), key, path, hdf_store)
elif callable(include_from_model_in_hdf5[key]):
include_from_model_in_hdf5[key](key, path, hdf_store)
else:
try:
for subkey in include_from_model_in_hdf5[key]:
if include_from_model_in_hdf5[key][subkey] is None:
_save_model_property(getattr(getattr(self, key), subkey), subkey, os.path.join(path, key),
hdf_store)
elif callable(include_from_model_in_hdf5[key][subkey]):
include_from_model_in_hdf5[key][subkey](subkey, os.path.join(path, key), hdf_store)
else:
logger.critical('Can not save %s', str(os.path.join(path, key, subkey)))
except:
logger.critical('An error occurred while dumping %s to HDF.', str(os.path.join(path, key)))
hdf_store.flush()
if close_h5:
hdf_store.close()
else:
return hdf_store
class TARDISSpectrum(object):
"""
TARDIS Spectrum object
"""
def __init__(self, frequency, distance=None):
self._frequency = frequency
self.wavelength = self.frequency.to('angstrom', u.spectral())
self.distance = distance
self.delta_frequency = frequency[1] - frequency[0]
self._flux_nu = np.zeros_like(frequency.value) * u.Unit('erg / (s Hz cm^2)')
self._flux_lambda = np.zeros_like(frequency.value) * u.Unit('erg / (s Angstrom cm^2)')
self.luminosity_density_nu = np.zeros_like(self.frequency) * u.Unit('erg / (s Hz)')
self.luminosity_density_lambda = np.zeros_like(self.frequency) * u.Unit('erg / (s Angstrom)')
@property
def frequency(self):
return self._frequency[:-1]
@property
def flux_nu(self):
if self.distance is None:
raise AttributeError('supernova distance not supplied - flux calculation impossible')
else:
return self._flux_nu
@property
def flux_lambda(self):
if self.distance is None:
raise AttributeError('supernova distance not supplied - flux calculation impossible')
return self._flux_lambda
def update_luminosity(self, spectrum_luminosity):
self.luminosity_density_nu = (spectrum_luminosity / self.delta_frequency).to('erg / (s Hz)')
self.luminosity_density_lambda = self.f_nu_to_f_lambda(self.luminosity_density_nu.value) \
* u.Unit('erg / (s Angstrom)')
if self.distance is not None:
self._flux_nu = (self.luminosity_density_nu / (4 * np.pi * self.distance.to('cm')**2))
self._flux_lambda = self.f_nu_to_f_lambda(self.flux_nu.value) * u.Unit('erg / (s Angstrom cm^2)')
def f_nu_to_f_lambda(self, f_nu):
return f_nu * self.frequency.value**2 / constants.c.cgs.value / 1e8
def plot(self, ax, mode='wavelength'):
if mode == 'wavelength':
ax.plot(self.wavelength.value, self.flux_lambda.value)
ax.set_xlabel('Wavelength [%s]' % self.wavelength.unit._repr_latex_())
ax.set_ylabel('Flux [%s]' % self.flux_lambda.unit._repr_latex_())
def to_ascii(self, fname, mode='luminosity_density'):
if mode == 'luminosity_density':
np.savetxt(fname, zip(self.wavelength.value, self.luminosity_density_lambda.value))
elif mode == 'flux':
np.savetxt(fname, zip(self.wavelength.value, self.flux_lambda.value))
else:
raise NotImplementedError('only mode "luminosity_density" and "flux" are implemented')
|
bsd-3-clause
|
jeicher/cobrapy
|
setup.py
|
1
|
7781
|
from os.path import isfile, abspath, dirname, join
from sys import argv, path
# To temporarily modify sys.path
SETUP_DIR = abspath(dirname(__file__))
try:
from setuptools import setup, find_packages
except ImportError:
path.insert(0, SETUP_DIR)
import ez_setup
path.pop(0)
ez_setup.use_setuptools()
from setuptools import setup, find_packages
# for running parallel tests due to a bug in python 2.7.3
# http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing
except:
None
# import version to get the version string
path.insert(0, join(SETUP_DIR, "cobra"))
from version import get_version, update_release_version
path.pop(0)
version = get_version(pep440=True)
# If building something for distribution, ensure the VERSION
# file is up to date
if "sdist" in argv or "bdist_wheel" in argv:
update_release_version()
# cython is optional for building. The c file can be used directly. However,
# for certain functions, the c file must be generated, which requires cython.
try:
from Cython.Build import cythonize
from distutils.version import StrictVersion
import Cython
try:
cython_version = StrictVersion(Cython.__version__)
except ValueError:
raise ImportError("Cython version not parseable")
else:
if cython_version < StrictVersion("0.21"):
raise ImportError("Cython version too old to use")
except ImportError:
cythonize = None
for k in ["sdist", "develop"]:
if k in argv:
raise Exception("Cython >= 0.21 required for " + k)
# Begin constructing arguments for building
setup_kwargs = {}
# for building the cglpk solver
try:
from distutils.extension import Extension
from distutils.command.build_ext import build_ext
from os import name
from platform import system
class FailBuild(build_ext):
"""allow building of the C extension to fail"""
def run(self):
try:
build_ext.run(self)
except Exception as e:
warn(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except:
None
build_args = {}
setup_kwargs["cmdclass"] = {"build_ext": FailBuild}
# MAC OS X needs some additional configuration tweaks
# Build should be run with the python.org python
# Cython will output C which could generate warnings in clang
# due to the addition of additional unneeded functions. Because
# this is a known phenomenon, these warnings are silenced to
# make other potential warnings which do signal errors stand
# out.
if system() == "Darwin":
build_args["extra_compile_args"] = ["-Wno-unused-function"]
build_args["libraries"] = ["glpk"]
# It is possible to statically link libglpk to the built extension. This
# allows for simplified installation without the need to install libglpk to
# the system, and is also usueful when installing a particular version of
# glpk which conflicts with thesystem version. A static libglpk.a can be
# built by running configure with the export CLFAGS="-fPIC" and copying the
# file from src/.libs to either the default lib directory or to the build
# directory. For an example script, see
# https://gist.github.com/aebrahim/94a2b231d86821f7f225
include_dirs = []
library_dirs = []
if isfile("libglpk.a"):
library_dirs.append(abspath("."))
if isfile("glpk.h"):
include_dirs.append(abspath("."))
# if the glpk files are not in the current directory attempt to
# auto-detect their location by finding the location of the glpsol
# command
if name == "posix" and len(include_dirs) == 0 and len(library_dirs) == 0:
from subprocess import check_output
try:
glpksol_path = check_output(["which", "glpsol"],
universal_newlines=True).strip()
glpk_path = abspath(join(dirname(glpksol_path), ".."))
include_dirs.append(join(glpk_path, "include"))
library_dirs.append(join(glpk_path, "lib"))
except Exception as e:
print('Could not autodetect include and library dirs: ' + str(e))
if len(include_dirs) > 0:
build_args["include_dirs"] = include_dirs
if len(library_dirs) > 0:
build_args["library_dirs"] = library_dirs
# use cython if present, otherwise use c file
if cythonize:
ext_modules = cythonize([Extension("cobra.solvers.cglpk",
["cobra/solvers/cglpk.pyx"],
**build_args)],
force=True)
else:
ext_modules = [Extension("cobra.solvers.cglpk",
["cobra/solvers/cglpk.c"], **build_args)]
except Exception as e:
print('Could not build CGLPK: {}'.format(e))
ext_modules = None
extras = {
'matlab': ["pymatbridge"],
'sbml': ["python-libsbml", "lxml"],
'array': ["numpy>=1.6", "scipy>=0.11.0"],
'test': ["pytest", "pytest-benchmark"],
'display': ["matplotlib", "palettable", "pandas>=0.17.0", "tabulate"]
}
all_extras = {'Cython>=0.21'}
for extra in extras.values():
all_extras.update(extra)
extras["all"] = sorted(list(all_extras))
# If using bdist_wininst, the installer will not get dependencies like
# a setuptools installation does. Therefore, for the one external dependency,
# which is six.py, we can just download it here and include it in the
# installer.
# The file six.py will need to be manually downloaded and placed in the
# same directory as setup.py.
if "bdist_wininst" in argv:
setup_kwargs["py_modules"] = ["six"]
try:
with open('README.rst') as handle:
readme = handle.read()
with open('INSTALL.rst') as handle:
install = handle.read()
setup_kwargs["long_description"] = readme + "\n\n" + install
except:
setup_kwargs["long_description"] = ''
setup(
name="cobra",
version=version,
packages=find_packages(exclude=['cobra.oven', 'cobra.oven*']),
setup_requires=[],
install_requires=["six"],
tests_require=["jsonschema > 2.5"],
extras_require=extras,
ext_modules=ext_modules,
package_data={
'': ['test/data/*',
'VERSION',
'mlab/matlab_scripts/*m']},
author="Daniel Robert Hyduke <[email protected]>, "
"Ali Ebrahim <[email protected]>",
author_email="[email protected]",
description="COBRApy is a package for constraints-based modeling of "
"biological networks",
license="LGPL/GPL v2+",
keywords="metabolism biology linear programming optimization flux"
" balance analysis fba",
url="https://opencobra.github.io/cobrapy",
test_suite="cobra.test.suite",
download_url='https://pypi.python.org/pypi/cobra',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2'
' or later (LGPLv2+)',
'License :: OSI Approved :: GNU General Public License v2'
' or later (GPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Cython',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
platforms="GNU/Linux, Mac OS X >= 10.7, Microsoft Windows >= 7",
**setup_kwargs)
|
lgpl-2.1
|
jrkerns/pylinac
|
pylinac/vmat.py
|
1
|
19256
|
# -*- coding: utf-8 -*-
"""The VMAT module consists of the class VMAT, which is capable of loading an EPID DICOM Open field image and MLC field image and analyzing the
images according to the Varian RapidArc QA tests and procedures, specifically the Dose-Rate & Gantry-Speed (DRGS)
and Dose-Rate & MLC speed (DRMLC) tests.
Features:
* **Do both tests** - Pylinac can handle either DRGS or DRMLC tests.
* **Automatic offset correction** - Older VMAT tests had the ROIs offset, newer ones are centered. No worries, pylinac finds the ROIs automatically.
* **Automatic open/DMLC identification** - Pass in both images--don't worry about naming. Pylinac will automatically identify the right images.
"""
import dataclasses
import enum
import typing
from dataclasses import dataclass
from io import BytesIO
from typing import Union, List, Tuple, Sequence, Optional
import argue
import matplotlib.pyplot as plt
import numpy as np
from .core import image
from .core.geometry import Point, Rectangle
from .core.image import ImageLike
from .core.io import get_url, TemporaryZipDirectory, retrieve_demo_file
from .core.pdf import PylinacCanvas
from .core.profile import SingleProfile, Interpolation, Edge
from .core.utilities import open_path, ResultBase
from .settings import get_dicom_cmap
class ImageType(enum.Enum):
"""The image type options"""
DMLC = 'dmlc' #:
OPEN = 'open' #:
PROFILE = 'profile' #:
@dataclass
class SegmentResult:
"""An individual segment/ROI result"""
passed: bool #:
x_position_mm: float #:
r_corr: float #:
r_dev: float #:
center_x_y: float #:
@dataclass
class VMATResult(ResultBase):
"""This class should not be called directly. It is returned by the ``results_data()`` method.
It is a dataclass under the hood and thus comes with all the dunder magic.
Use the following attributes as normal class attributes."""
test_type: str #:
tolerance_percent: float #:
max_deviation_percent: float #:
abs_mean_deviation: float #:
passed: bool #:
segment_data: typing.Iterable[SegmentResult] #:
class VMATBase:
_url_suffix: str
_result_header: str
_result_short_header: str
SEGMENT_X_POSITIONS_MM: Tuple
dmlc_image: image.DicomImage
open_image: image.DicomImage
segments: List
_tolerance: float
def __init__(self, image_paths: Sequence[Union[str, BytesIO]]):
"""
Parameters
----------
image_paths : iterable (list, tuple, etc)
A sequence of paths to the image files.
"""
if len(image_paths) != 2:
raise ValueError("Exactly 2 images (open, DMLC) must be passed")
image1, image2 = self._load_images(image_paths)
image1, image2 = self._check_img_inversion(image1, image2)
self._identify_images(image1, image2)
self.segments = []
self._tolerance = 0
@classmethod
def from_url(cls, url: str):
"""Load a ZIP archive from a URL. Must follow the naming convention.
Parameters
----------
url : str
Must point to a valid URL that is a ZIP archive of two VMAT images.
"""
zfile = get_url(url)
return cls.from_zip(zfile)
@classmethod
def from_zip(cls, path: str):
"""Load VMAT images from a ZIP file that contains both images. Must follow the naming convention.
Parameters
----------
path : str
Path to the ZIP archive which holds the VMAT image files.
"""
with TemporaryZipDirectory(path) as tmpzip:
image_files = image.retrieve_image_files(tmpzip)
return cls(image_paths=image_files)
@classmethod
def from_demo_images(cls):
"""Construct a VMAT instance using the demo images."""
demo_file = retrieve_demo_file(url=cls._url_suffix)
return cls.from_zip(demo_file)
@argue.bounds(tolerance=(0, 8))
def analyze(self, tolerance: Union[float, int] = 1.5, segment_size_mm: Tuple = (5, 100)):
"""Analyze the open and DMLC field VMAT images, according to 1 of 2 possible tests.
Parameters
----------
tolerance : float, int, optional
The tolerance of the sample deviations in percent. Default is 1.5.
Must be between 0 and 8.
segment_size_mm : tuple(int, int)
The (width, height) of the ROI segments in mm.
"""
self._tolerance = tolerance/100
"""Analysis"""
points = self._calculate_segment_centers()
Segment._nominal_width_mm = segment_size_mm[0]
Segment._nominal_height_mm = segment_size_mm[1]
self._construct_segments(points)
@staticmethod
def _load_images(image_paths: Sequence[Union[str, BytesIO]]) -> Tuple[ImageLike, ImageLike]:
image1 = image.load(image_paths[0])
image2 = image.load(image_paths[1])
image1.ground()
image2.ground()
return image1, image2
@staticmethod
def _check_img_inversion(image1: ImageLike, image2: ImageLike) -> Tuple[ImageLike, ImageLike]:
"""Check that the images are correctly inverted."""
for image in [image1, image2]:
image.check_inversion()
return image1, image2
def _identify_images(self, image1: ImageLike, image2: ImageLike):
"""Identify which image is the DMLC and which is the open field."""
profile1, profile2 = self._median_profiles((image1, image2))
field_profile1 = profile1.field_data()['field values']
field_profile2 = profile2.field_data()['field values']
if np.std(field_profile1) > np.std(field_profile2):
self.dmlc_image = image1
self.open_image = image2
else:
self.dmlc_image = image2
self.open_image = image1
def results(self) -> str:
"""A string of the summary of the analysis results.
Returns
-------
str
The results string showing the overall result and deviation statistics by segment.
"""
if self.passed:
passfail_str = 'PASS'
else:
passfail_str = 'FAIL'
string = f'{self._result_header}\nTest Results (Tol. +/-{self._tolerance*100:2.2}%): {passfail_str}\n'
string += f'Max Deviation: {self.max_r_deviation:2.3}%\nAbsolute Mean Deviation: {self.avg_abs_r_deviation:2.3}%'
return string
def results_data(self, as_dict=False) -> Union[VMATResult, dict]:
"""Present the results data and metadata as a dataclass or dict.
The default return type is a dataclass."""
segment_data = []
for idx, segment in enumerate(self.segments):
segment_data.append(SegmentResult(passed=segment.passed,
r_corr=segment.r_corr,
r_dev=segment.r_dev,
center_x_y=segment.center.as_array(),
x_position_mm=self.SEGMENT_X_POSITIONS_MM[idx]))
data = VMATResult(
test_type=self._result_header,
tolerance_percent=self._tolerance*100,
max_deviation_percent=self.max_r_deviation,
abs_mean_deviation=self.avg_abs_r_deviation,
passed=self.passed,
segment_data=segment_data,
)
if as_dict:
return dataclasses.asdict(data)
return data
def _calculate_segment_centers(self) -> List[Point]:
"""Construct the center points of the segments based on the field center and known x-offsets."""
points = []
dmlc_prof, _ = self._median_profiles((self.dmlc_image, self.open_image))
x_field_center = dmlc_prof.beam_center()['index (rounded)']
for x_offset_mm in self.SEGMENT_X_POSITIONS_MM:
y = self.open_image.center.y
x_offset_pixels = x_offset_mm * self.open_image.dpmm
x = x_field_center + x_offset_pixels
points.append(Point(x, y))
return points
def _construct_segments(self, points: List[Point]):
for point in points:
segment = Segment(point, self.open_image, self.dmlc_image, self._tolerance)
self.segments.append(segment)
# post-analysis to update R_corr values
self._update_r_corrs()
def _update_r_corrs(self):
"""After the Segment constructions, the R_corr must be set for each segment."""
avg_r_corr = np.array([segment.r_corr for segment in self.segments]).mean()
for segment in self.segments:
segment.r_dev = ((segment.r_corr / avg_r_corr) * 100) - 100
@property
def passed(self) -> bool:
return all(segment.passed for segment in self.segments)
@property
def r_devs(self) -> np.ndarray:
"""Return the deviations of all segments as an array."""
return np.array([segment.r_dev for segment in self.segments])
@property
def avg_abs_r_deviation(self) -> float:
"""Return the average of the absolute R_deviation values."""
return np.abs(self.r_devs).mean()
@property
def avg_r_deviation(self) -> float:
"""Return the average of the R_deviation values, including the sign."""
return self.r_devs.mean()
@property
def max_r_deviation(self) -> float:
"""Return the value of the maximum R_deviation segment."""
return np.max(np.abs(self.r_devs))
def plot_analyzed_image(self, show: bool=True):
"""Plot the analyzed images. Shows the open and dmlc images with the segments drawn; also plots the median
profiles of the two images for visual comparison.
Parameters
----------
show : bool
Whether to actually show the image.
"""
fig, axes = plt.subplots(ncols=3, sharex=True)
subimages = (ImageType.OPEN, ImageType.DMLC, ImageType.PROFILE)
titles = ('Open', 'DMLC', 'Median Profiles')
for subimage, axis, title in zip(subimages, axes, titles):
self._plot_analyzed_subimage(subimage=subimage, ax=axis, show=False)
axis.set_title(title)
axis.set_ylabel('Normalized Response')
axis.legend(loc='lower center')
if show:
plt.tight_layout(h_pad=1.5)
plt.show()
def _save_analyzed_subimage(self, filename: Union[str, BytesIO], subimage: ImageType, **kwargs):
"""Save the analyzed images as a png file.
Parameters
----------
filename : str, file-object
Where to save the file to.
kwargs
Passed to matplotlib.
"""
self._plot_analyzed_subimage(subimage=subimage, show=False)
plt.savefig(filename, **kwargs)
def _plot_analyzed_subimage(self, subimage: ImageType, show: bool=True, ax: Optional[plt.Axes]=None):
"""Plot an individual piece of the VMAT analysis.
Parameters
----------
subimage : str
Specifies which image to plot.
show : bool
Whether to actually plot the image.
ax : matplotlib Axes, None
If None (default), creates a new figure to plot to, otherwise plots to the given axes.
"""
plt.ioff()
if ax is None:
fig, ax = plt.subplots()
# plot DMLC or OPEN image
if subimage in (ImageType.DMLC, ImageType.OPEN):
if subimage == ImageType.DMLC:
img = self.dmlc_image
elif subimage == ImageType.OPEN:
img = self.open_image
ax.imshow(img, cmap=get_dicom_cmap())
self._draw_segments(ax)
plt.sca(ax)
plt.axis('off')
plt.tight_layout()
# plot profile
elif subimage == ImageType.PROFILE:
dmlc_prof, open_prof = self._median_profiles((self.dmlc_image, self.open_image))
ax.plot(dmlc_prof.values, label='DMLC')
ax.plot(open_prof.values, label='Open')
ax.autoscale(axis='x', tight=True)
ax.legend(loc=8, fontsize='large')
ax.grid()
if show:
plt.show()
def _draw_segments(self, axis: plt.Axes):
"""Draw the segments onto a plot.
Parameters
----------
axis : matplotlib.axes.Axes
The plot to draw the objects on.
"""
for segment in self.segments:
color = segment.get_bg_color()
segment.plot2axes(axis, edgecolor=color)
@staticmethod
def _median_profiles(images) -> Tuple[SingleProfile, SingleProfile]:
"""Return two median profiles from the open and dmlc image. For visual comparison."""
profile1 = SingleProfile(np.mean(images[0], axis=0), interpolation=Interpolation.NONE, edge_detection_method=Edge.INFLECTION_DERIVATIVE)
profile1.stretch()
profile2 = SingleProfile(np.mean(images[1], axis=0), interpolation=Interpolation.NONE, edge_detection_method=Edge.INFLECTION_DERIVATIVE)
profile2.stretch()
# normalize the profiles to approximately the same value
norm_val = np.percentile(profile1.values, 90)
profile1.normalize(norm_val)
norm_val = np.percentile(profile2.values, 90)
profile2.normalize(norm_val)
return profile1, profile2
def publish_pdf(self, filename: str, notes: str=None, open_file: bool=False, metadata: Optional[dict]=None):
"""Publish (print) a PDF containing the analysis, images, and quantitative results.
Parameters
----------
filename : (str, file-like object}
The file to write the results to.
notes : str, list of strings
Text; if str, prints single line.
If list of strings, each list item is printed on its own line.
open_file : bool
Whether to open the file using the default program after creation.
metadata : dict
Extra data to be passed and shown in the PDF. The key and value will be shown with a colon.
E.g. passing {'Author': 'James', 'Unit': 'TrueBeam'} would result in text in the PDF like:
--------------
Author: James
Unit: TrueBeam
--------------
"""
canvas = PylinacCanvas(filename=filename, page_title=f"{self._result_short_header} VMAT Analysis", metadata=metadata)
for y, x, width, img in zip((9, 9, -2), (1, 11, 3), (9, 9, 14), (ImageType.OPEN, ImageType.DMLC, ImageType.PROFILE)):
data = BytesIO()
self._save_analyzed_subimage(data, subimage=img)
canvas.add_image(data, location=(x, y), dimensions=(width, 18))
# canvas.add_text(text=f"{img} Image", location=(x + 2, y + 10), font_size=18)
canvas.add_text(text='Open Image', location=(4, 22), font_size=18)
canvas.add_text(text=f'{self.open_image.base_path}', location=(4, 21.5))
canvas.add_text(text='DMLC Image', location=(14, 22), font_size=18)
canvas.add_text(text=f'{self.dmlc_image.base_path}', location=(14, 21.5))
canvas.add_text(text='Median profiles', location=(8, 12), font_size=18)
text = [f'{self._result_header} VMAT results:',
f'Source-to-Image Distance (mm): {self.open_image.sid:2.0f}',
f'Tolerance (%): {self._tolerance*100:2.1f}',
f'Absolute mean deviation (%): {self.avg_abs_r_deviation:2.2f}',
f'Maximum deviation (%): {self.max_r_deviation:2.2f}',
]
canvas.add_text(text=text, location=(10, 25.5))
if notes is not None:
canvas.add_text(text="Notes:", location=(1, 5.5), font_size=14)
canvas.add_text(text=notes, location=(1, 5))
canvas.finish()
if open_file:
open_path(filename)
class DRGS(VMATBase):
"""Class representing a Dose-Rate, Gantry-speed VMAT test. Will accept, analyze, and return the results."""
_url_suffix = 'drgs.zip'
_result_header = 'Dose Rate & Gantry Speed'
_result_short_header = 'DR/GS'
SEGMENT_X_POSITIONS_MM = (-60, -40, -20, 0, 20, 40, 60)
@staticmethod
def run_demo():
"""Run the demo for the Dose Rate & Gantry Speed test."""
vmat = DRGS.from_demo_images()
vmat.analyze() # old images (rev1, not new rev2's), which are offset
print(vmat.results())
vmat.plot_analyzed_image()
class DRMLC(VMATBase):
"""Class representing a Dose-Rate, MLC speed VMAT test. Will accept, analyze, and return the results."""
_url_suffix = 'drmlc.zip'
_result_header = 'Dose Rate & MLC Speed'
_result_short_header = 'DR/MLCS'
SEGMENT_X_POSITIONS_MM = (-45, -15, 15, 45)
@staticmethod
def run_demo():
"""Run the demo for the MLC leaf speed test."""
vmat = DRMLC.from_demo_images()
vmat.analyze()
print(vmat.results())
vmat.plot_analyzed_image()
class Segment(Rectangle):
"""A class for holding and analyzing segment data of VMAT tests.
For VMAT tests, there are either 4 or 7 'segments', which represents a section of the image that received
radiation under the same conditions.
Attributes
----------
r_dev : float
The reading deviation (R_dev) from the average readings of all the segments. See RTD for equation info.
r_corr : float
The corrected reading (R_corr) of the pixel values. See RTD for explanation and equation info.
passed : boolean
Specifies where the segment reading deviation was under tolerance.
"""
# width of the segment (i.e. parallel to MLC motion) in pixels under reference conditions
_nominal_width_mm: int
_nominal_height_mm: int
def __init__(self, center_point: Point, open_image: image.DicomImage, dmlc_image: image.DicomImage,
tolerance: Union[float, int]):
self.r_dev: float = 0.0 # is assigned after all segments constructed
self._tolerance = tolerance
self._open_image = open_image
self._dmlc_image = dmlc_image
width = self._nominal_width_mm * dmlc_image.dpmm
height = self._nominal_height_mm * dmlc_image.dpmm
super().__init__(width, height, center=center_point, as_int=True)
@property
def r_corr(self) -> float:
"""Return the ratio of the mean pixel values of DMLC/OPEN images."""
dmlc_value = self._dmlc_image.array[self.bl_corner.y:self.bl_corner.y + self.height,
self.bl_corner.x: self.bl_corner.x + self.width].mean()
open_value = self._open_image.array[self.bl_corner.y:self.bl_corner.y + self.height,
self.bl_corner.x: self.bl_corner.x + self.width].mean()
ratio = (dmlc_value / open_value) * 100
return ratio
@property
def passed(self) -> bool:
"""Return whether the segment passed or failed."""
return abs(self.r_dev) < self._tolerance * 100
def get_bg_color(self) -> str:
"""Get the background color of the segment when plotted, based on the pass/fail status."""
return 'blue' if self.passed else 'red'
|
mit
|
applecool/DataScience
|
Azure ML Studio Experiments/Energy Efficiencies/VisualizeEE.py
|
1
|
3171
|
## The main function with a single argument, a Pandas data frame
## from the first input port of the Execute Python Script module.
def azureml_main(frame1):
## import libraries
import matplotlib
matplotlib.use('agg') # Set backend
from pandas.tools.plotting import scatter_matrix
import pandas.tools.rplot as rplot
import matplotlib.pyplot as plt
import numpy as np
## Create a pair-wise scatter plot
Azure = False
## If in Azure, frame1 is passed to function
if(Azure == False):
frame1 = eeframe
fig1 = plt.figure(1, figsize=(10, 10))
ax = fig1.gca()
scatter_matrix(frame1, alpha=0.3,
diagonal='kde', ax = ax)
plt.show()
if(Azure == True): fig1.savefig('scatter1.png')
## Create conditioned scatter plots.
col_list = ["Relative Compactness",
"Surface Area",
"Wall Area",
"Roof Area",
'Glazing Area',
"Glazing Area Distribution"]
indx = 0
for col in col_list:
if(frame1[col].dtype in [np.int64, np.int32, np.float64]):
indx += 1
fig = plt.figure(figsize = (12,6))
fig.clf()
ax = fig.gca()
plot = rplot.RPlot(frame1, x = col, y = 'Heating Load')
plot.add(rplot.TrellisGrid(['Overall Height','Orientation']))
plot.add(rplot.GeomScatter())
plot.add(rplot.GeomPolyFit(degree=2))
ax.set_xlabel(col)
ax.set_ylabel('Heating Load')
plot.render(plt.gcf())
if(Azure == True): fig.savefig('scatter' + col + '.png')
## Histograms of features by Overall Height
col_list = ["Relative Compactness",
"Surface Area",
"Wall Area",
"Roof Area",
'Glazing Area',
"Glazing Area Distribution",
"Heating Load"]
for col in col_list:
temp7 = frame1.ix[frame1['Overall Height'] == 7, col].as_matrix()
temp35 = frame1.ix[frame1['Overall Height'] == 3.5, col].as_matrix()
fig = plt.figure(figsize = (12,6))
fig.clf()
ax7 = fig.add_subplot(1, 2, 1)
ax35 = fig.add_subplot(1, 2, 2)
ax7.hist(temp7, bins = 20)
ax7.set_title('Histogram of ' +col + '\n for for Overall Height of 7')
ax35.hist(temp35, bins = 20)
ax35.set_title('Histogram of ' +col + '\n for for Overall Height of 3.5')
if(Azure == True): fig.savefig('hists_' + col + '.png')
## Create boxplots.
for col in col_list:
if(frame1[col].dtype in [np.int64, np.int32, np.float64]):
fig = plt.figure(figsize = (6,6))
fig.clf()
ax = fig.gca()
frame1[[col, 'Overall Height']].boxplot(column = [col], ax = ax, by = ['Overall Height'])
ax.set_xlabel('')
if(Azure == True): fig.savefig('box_' + col + '.png')
## In Azure, the function returns the data frame
return frame1
|
mit
|
hernick-qc/dRonin
|
python/ins/compare.py
|
11
|
5497
|
from cins import CINS
from pyins import PyINS
import unittest
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import math
import ins
VISUALIZE = False
class CompareFunctions(unittest.TestCase):
def setUp(self):
self.c_sim = CINS()
self.py_sim = PyINS()
self.c_sim.prepare()
self.py_sim.prepare()
def run_static(self, accel=[0.0,0.0,-PyINS.GRAV],
gyro=[0.0,0.0,0.0], mag=[400,0,1600],
pos=[0,0,0], vel=[0,0,0],
noise=False, STEPS=200000):
""" simulate a static set of inputs and measurements
"""
c_sim = self.c_sim
py_sim = self.py_sim
dT = 1.0 / 666.0
numpy.random.seed(1)
c_history = numpy.zeros((STEPS,16))
c_history_rpy = numpy.zeros((STEPS,3))
py_history = numpy.zeros((STEPS,16))
py_history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
print `k`
ng = numpy.zeros(3,)
na = numpy.zeros(3,)
np = numpy.zeros(3,)
nv = numpy.zeros(3,)
nm = numpy.zeros(3,)
if noise:
ng = numpy.random.randn(3,) * 1e-3
na = numpy.random.randn(3,) * 1e-3
np = numpy.random.randn(3,) * 1e-3
nv = numpy.random.randn(3,) * 1e-3
nm = numpy.random.randn(3,) * 10.0
c_sim.predict(gyro+ng, accel+na, dT=dT)
py_sim.predict(gyro+ng, accel+na, dT=dT)
times[k] = k * dT
c_history[k,:] = c_sim.state
c_history_rpy[k,:] = quat_rpy(c_sim.state[6:10])
py_history[k,:] = py_sim.state
py_history_rpy[k,:] = quat_rpy(py_sim.state[6:10])
if False and k % 60 == 59:
c_sim.correction(pos=pos+np)
py_sim.correction(pos=pos+np)
if False and k % 60 == 59:
c_sim.correction(vel=vel+nv)
py_sim.correction(vel=vel+nv)
if True and k % 20 == 8:
c_sim.correction(baro=-pos[2]+np[2])
py_sim.correction(baro=-pos[2]+np[2])
if True and k % 20 == 15:
c_sim.correction(mag=mag+nm)
py_sim.correction(mag=mag+nm)
self.assertState(c_sim.state, py_sim.state)
if VISUALIZE:
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
k = STEPS
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],c_history[0:k:4,0:3])
ax[0][0].set_title('Position')
plt.sca(ax[0][0])
plt.ylabel('m')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],c_history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylabel('m/s')
#plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],c_history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
plt.sca(ax[1][0])
plt.ylabel('Angle (Deg)')
plt.xlabel('Time (s)')
#plt.ylim(-1.1,1.1)
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],c_history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.sca(ax[1][1])
plt.ylabel('Bias (rad/s)')
plt.xlabel('Time (s)')
plt.suptitle(unittest.TestCase.shortDescription(self))
plt.show()
return sim.state, history, times
def assertState(self, c_state, py_state):
""" check that the state is near a desired position
"""
# check position
self.assertAlmostEqual(c_state[0],py_state[0],places=1)
self.assertAlmostEqual(c_state[1],py_state[1],places=1)
self.assertAlmostEqual(c_state[2],py_state[2],places=1)
# check velocity
self.assertAlmostEqual(c_state[3],py_state[3],places=1)
self.assertAlmostEqual(c_state[4],py_state[4],places=1)
self.assertAlmostEqual(c_state[5],py_state[5],places=1)
# check attitude
self.assertAlmostEqual(c_state[0],py_state[0],places=0)
self.assertAlmostEqual(c_state[1],py_state[1],places=0)
self.assertAlmostEqual(c_state[2],py_state[2],places=0)
self.assertAlmostEqual(c_state[3],py_state[3],places=0)
# check bias terms (gyros and accels)
self.assertAlmostEqual(c_state[10],py_state[10],places=2)
self.assertAlmostEqual(c_state[11],py_state[11],places=2)
self.assertAlmostEqual(c_state[12],py_state[12],places=2)
self.assertAlmostEqual(c_state[13],py_state[13],places=2)
self.assertAlmostEqual(c_state[14],py_state[14],places=2)
self.assertAlmostEqual(c_state[15],py_state[15],places=2)
def test_face_west(self):
""" test convergence to face west
"""
mag = [0,-400,1600]
state, history, times = self.run_static(mag=mag, STEPS=50000)
self.assertState(state,rpy=[0,0,90])
if __name__ == '__main__':
selected_test = None
if selected_test is not None:
VISUALIZE = True
suite = unittest.TestSuite()
suite.addTest(CompareFunctions(selected_test))
unittest.TextTestRunner().run(suite)
else:
unittest.main()
|
gpl-3.0
|
jereze/scikit-learn
|
sklearn/metrics/tests/test_pairwise.py
|
71
|
25104
|
import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import _parallel_pairwise
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses sklearn metric, cityblock (function) is scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# "cosine" uses sklearn metric, cosine (function) is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unkown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels():
# Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "sigmoid", "polynomial", "linear", "chi2",
"additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {}
kwds['gamma'] = 0.1
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean sklearn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
|
bsd-3-clause
|
kohr-h/odl
|
examples/tomo/checks/check_axes_parallel3d_fp.py
|
2
|
6353
|
"""Parallel 3D example for checking that orientations are handled correctly.
Due to differing axis conventions between ODL and the ray transform
back-ends, a check is needed to confirm that the translation steps are
done correctly.
All pairs of plots of ODL projections and NumPy axis sums should look
the same in the sense that they should show the same features in the
right arrangement (not flipped, rotated, etc.).
"""
# %% Set up the things that never change
import matplotlib.pyplot as plt
import numpy as np
import odl
# Set back-end here (for `None` the fastest available is chosen)
impl = None
# Set a volume shift. This should move the projections in the same direction.
shift = np.array([0.0, 25.0, 0.0])
vol_shape = (100, 150, 200)
vol_max_pt = np.array(vol_shape, dtype=float) / 2
vol_min_pt = -vol_max_pt
reco_space = odl.uniform_discr(vol_min_pt + shift, vol_max_pt + shift,
vol_shape, dtype='float32')
phantom = odl.phantom.indicate_proj_axis(reco_space)
assert np.allclose(reco_space.cell_sides, 1)
# Check projections at 0, 90, 180 and 270 degrees
grid = odl.RectGrid([0, np.pi / 2, np.pi, 3 * np.pi / 2])
angle_partition = odl.uniform_partition_fromgrid(grid)
# Make detector large enough to cover the object
det_size = np.floor(1.1 * np.sqrt(np.sum(np.square(vol_shape))))
det_shape = (int(det_size), int(det_size))
det_max_pt = np.array([det_size / 2, det_size / 2])
det_min_pt = -det_max_pt
detector_partition = odl.uniform_partition(det_min_pt, det_max_pt, det_shape)
assert np.allclose(detector_partition.cell_sides, 1)
# Sum manually using Numpy
sum_along_x = np.sum(phantom, axis=0)
sum_along_y = np.sum(phantom, axis=1)
sum_along_z = np.sum(phantom, axis=2)
# %% Test case 1: axis = [0, 0, 1] -- setup
geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition,
axis=[0, 0, 1])
# Check initial configuration
assert np.allclose(geometry.det_axes_init[0], [1, 0, 0])
assert np.allclose(geometry.det_axes_init[1], [0, 0, 1])
assert np.allclose(geometry.det_pos_init, [0, 1, 0])
# Create projections
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl)
proj_data = ray_trafo(phantom)
# %% axis = [0, 0, 1], projection along y axis
# Axes in this image are (x, z). This corresponds to
# axis = [0, 0, 1], 0 degrees
proj_data.show(indices=[0, None, None],
title='Projection at 0 Degrees, Axis [0, 0, 1], u = x, v = z')
sum_along_y.show('Sum Along Y Axis')
# Check axes in geometry
axes_sum_y = geometry.det_axes(np.deg2rad(0))
assert np.allclose(axes_sum_y[0], [1, 0, 0])
assert np.allclose(axes_sum_y[1], [0, 0, 1])
# %% axis = [0, 0, 1], projection along x axis
# Axes in this image are (y, z). This corresponds to
# axis = [0, 0, 1], 90 degrees
proj_data.show(indices=[1, None, None],
title='Projection at 90 Degrees, Axis [0, 0, 1], u = y, v = z')
sum_along_x.show('Sum Along X Axis')
# Check axes in geometry
axes_sum_x = geometry.det_axes(np.deg2rad(90))
assert np.allclose(axes_sum_x[0], [0, 1, 0])
assert np.allclose(axes_sum_x[1], [0, 0, 1])
# %% Test case 2: axis = [0, 1, 0] -- setup
geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition,
axis=[0, 1, 0])
# Check initial configuration
assert np.allclose(geometry.det_axes_init[0], [1, 0, 0])
assert np.allclose(geometry.det_axes_init[1], [0, 1, 0])
assert np.allclose(geometry.det_pos_init, [0, 0, -1])
# Create projections
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl)
proj_data = ray_trafo(phantom)
# %% axis = [0, 1, 0], projection along z axis
# Axes in this image are (x, y). This corresponds to:
# axis = [0, 1, 0], 0 degrees
proj_data.show(indices=[0, None, None],
title='Projection at 0 Degrees, Axis [0, 1, 0], u = x, v = y')
sum_along_z.show('Sum Along Z Axis')
# Check geometry axes
axes_sum_z = geometry.det_axes(np.deg2rad(0))
assert np.allclose(axes_sum_z[0], [1, 0, 0])
assert np.allclose(axes_sum_z[1], [0, 1, 0])
# %% axis = [0, 1, 0], projection along x axis
# Axes in this image are (z, y). This corresponds to
# axis = [0, 1, 0], 270 degrees
proj_data.show(indices=[3, None, None],
title='Projection at 270 Degrees, Axis [0, 1, 0], u = z, v = y')
fig, ax = plt.subplots()
ax.imshow(sum_along_x, cmap='bone', origin='lower')
ax.set_xlabel('z')
ax.set_ylabel('y')
plt.title('Sum Along X Axis, Transposed')
plt.show()
# Check geometry axes
axes_sum_x_T = geometry.det_axes(np.deg2rad(270))
assert np.allclose(axes_sum_x_T[0], [0, 0, 1])
assert np.allclose(axes_sum_x_T[1], [0, 1, 0])
# %% Test case 3: axis = [1, 0, 0] -- setup
geometry = odl.tomo.Parallel3dAxisGeometry(angle_partition, detector_partition,
axis=[1, 0, 0])
# Check initial configuration
assert np.allclose(geometry.det_axes_init[0], [0, 0, -1])
assert np.allclose(geometry.det_axes_init[1], [1, 0, 0])
assert np.allclose(geometry.det_pos_init, [0, 1, 0])
# Create projections
ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl=impl)
proj_data = ray_trafo(phantom)
# %% axis = [1, 0, 0], projection along z axis
# Axes in this image are (y, x). This corresponds to
# axis = [1, 0, 0], 90 degrees
proj_data.show(indices=[1, None, None],
title='Projection at 90 Degrees, Axis [1, 0, 0], u = y, v = x')
fig, ax = plt.subplots()
ax.imshow(sum_along_z, cmap='bone', origin='lower')
ax.set_xlabel('y')
ax.set_ylabel('x')
plt.title('Sum Along Z Axis, Transposed')
plt.show()
# Check geometry axes
axes_sum_z_T = geometry.det_axes(np.deg2rad(90))
assert np.allclose(axes_sum_z_T[0], [0, 1, 0])
assert np.allclose(axes_sum_z_T[1], [1, 0, 0])
# %% axis = [1, 0, 0], projection along y axis
# Axes in this image are (z, x). This corresponds to
# axis = [1, 0, 0], 180 degrees
proj_data.show(indices=[2, None, None],
title='Projection at 180 Degrees, Axis [1, 0, 0], u = z, v = x')
fig, ax = plt.subplots()
ax.imshow(sum_along_y, cmap='bone', origin='lower')
ax.set_xlabel('z')
ax.set_ylabel('x')
plt.title('Sum Along Y Axis, Transposed')
plt.show()
# Check geometry axes
axes_sum_y = geometry.det_axes(np.deg2rad(180))
assert np.allclose(axes_sum_y[0], [0, 0, 1])
assert np.allclose(axes_sum_y[1], [1, 0, 0])
|
mpl-2.0
|
AntoineRiaud/Tweezer_design
|
Tweezer_design/filtered_der.py
|
1
|
1285
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 15 22:33:49 2016
@author: Antoine
"""
from numpy import append as npappend
from numpy import diff,interp
from numpy import pi as np_pi
#from scipy.fftpack import diff as fftpackdiff
from scipy.signal import butter as signalbutter
from scipy.signal import filtfilt as signalfiltfilt
import matplotlib.pyplot as plt
from copy import deepcopy
twopi = 2*np_pi
def periodic_derivative(x,y,max_periods):
plot = False
Ns =len(x)
b,a = signalbutter(8,2.0*max_periods/Ns)
ymid =interp(x+0.5*(x[1]-x[0]),x,y,period=2*np_pi)
yder = diff(ymid)/diff(x)
#yder = Ns/(max(x)-min(x))*fftpackdiff(y,1,Ns)
yder_filt = deepcopy(yder)
x_filt = deepcopy(x)
x_filt = npappend(x_filt,x_filt[-1]+x_filt[1]-x_filt[0])
yder_filt = signalfiltfilt(b,a,npappend(yder_filt,yder_filt[0]))
if plot:
plt.figure(1)
plt.subplot(311)
plt.plot(x, y)
plt.subplot(312)
plt.plot(x[0:-1],yder)
plt.subplot(313)
plt.plot(x_filt[0:-1],yder_filt)
plt.show()
return yder_filt
#x = numpy.array(range(100))
#y = numpy.sin(twopi*x/100)+numpy.sin(twopi*x/10)
#periodic_derivative(x,y,4)
|
gpl-3.0
|
marco-mariotti/selenoprofiles
|
libraries/networkx/readwrite/gml.py
|
1
|
11393
|
"""
Read graphs in GML format.
"GML, the G>raph Modelling Language, is our proposal for a portable
file format for graphs. GML's key features are portability, simple
syntax, extensibility and flexibility. A GML file consists of a
hierarchical key-value lists. Graphs can be annotated with arbitrary
data structures. The idea for a common file format was born at the
GD'95; this proposal is the outcome of many discussions. GML is the
standard file format in the Graphlet graph editor system. It has been
overtaken and adapted by several other systems for drawing graphs."
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Requires pyparsing: http://pyparsing.wikispaces.com/
Format
------
See http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
for format specification.
Example graphs in GML format:
http://www-personal.umich.edu/~mejn/netdata/
"""
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2008-2010 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['read_gml', 'parse_gml', 'generate_gml', 'write_gml']
import networkx as nx
from networkx.exception import NetworkXException, NetworkXError
from networkx.utils import _get_fh, is_string_like
def read_gml(path,encoding='UTF-8',labels=True):
"""Read graph in GML format from path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to read from.
encoding : string, optional
Text encoding.
labels : bool, optional
If True use the GML node label attribute for node names otherwise use
the node id.
Returns
-------
G : MultiGraph or MultiDiGraph
Raises
------
ImportError
If the pyparsing module is not available.
See Also
--------
write_gml, parse_gml
Notes
-----
Requires pyparsing: http://pyparsing.wikispaces.com/
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_gml(G,'test.gml')
>>> H=nx.read_gml('test.gml')
"""
fh=_get_fh(path,'rb')
lines=(line.decode(encoding) for line in fh)
G=parse_gml(lines,labels=labels)
fh.close()
return G
def parse_gml(lines, labels=True):
"""Parse GML graph from a string or iterable.
Parameters
----------
lines : string or iterable
Data in GML format.
labels : bool, optional
If True use the GML node label attribute for node names otherwise use
the node id.
Returns
-------
G : MultiGraph or MultiDiGraph
Raises
------
ImportError
If the pyparsing module is not available.
See Also
--------
write_gml, read_gml
Notes
-----
This stores nested GML attributes as dictionaries in the
NetworkX graph, node, and edge attribute structures.
Requires pyparsing: http://pyparsing.wikispaces.com/
References
----------
GML specification:
http://www.infosun.fim.uni-passau.de/Graphlet/GML/gml-tr.html
"""
try:
from pyparsing import ParseException
except ImportError:
try:
from matplotlib.pyparsing import ParseException
except:
raise ImportError('Import Error: not able to import pyparsing:',
'http://pyparsing.wikispaces.com/')
try:
data = "".join(lines)
gml = pyparse_gml()
tokens =gml.parseString(data)
except ParseException as err:
print((err.line))
print((" "*(err.column-1) + "^"))
print(err)
raise
# function to recursively make dicts of key/value pairs
def wrap(tok):
listtype=type(tok)
result={}
for k,v in tok:
if type(v)==listtype:
result[k]=wrap(v)
else:
result[k]=v
return result
# Set flag
multigraph=False
# but assume multigraphs to start
if tokens.directed==1:
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
for k,v in tokens.asList():
if k=="node":
vdict=wrap(v)
node=vdict['id']
G.add_node(node,attr_dict=vdict)
elif k=="edge":
vdict=wrap(v)
source=vdict.pop('source')
target=vdict.pop('target')
if G.has_edge(source,target):
multigraph=True
G.add_edge(source,target,attr_dict=vdict)
# switch to Graph or DiGraph if no parallel edges were found.
if not multigraph:
if G.is_directed():
G=nx.DiGraph(G)
else:
G=nx.Graph(G)
if labels:
mapping=dict((n,d['label']) for n,d in G.node.items())
G=nx.relabel_nodes(G,mapping)
return G
def pyparse_gml():
"""A pyparsing tokenizer for GML graph format.
This is not intended to be called directly.
See Also
--------
write_gml, read_gml, parse_gml
"""
try:
from pyparsing import \
Literal, CaselessLiteral, Word, Forward,\
ZeroOrMore, Group, Dict, Optional, Combine,\
ParseException, restOfLine, White, alphas, alphanums, nums,\
OneOrMore,quotedString,removeQuotes,dblQuotedString, Regex
except ImportError:
try:
from matplotlib.pyparsing import \
Literal, CaselessLiteral, Word, Forward,\
ZeroOrMore, Group, Dict, Optional, Combine,\
ParseException, restOfLine, White, alphas, alphanums, nums,\
OneOrMore,quotedString,removeQuotes,dblQuotedString, Regex
except:
raise ImportError('pyparsing not found',
'http://pyparsing.wikispaces.com/')
lbrack = Literal("[").suppress()
rbrack = Literal("]").suppress()
pound = ("#")
comment = pound + Optional( restOfLine )
integer = Word(nums+'-').setParseAction(lambda s,l,t:[ int(t[0])])
real = Regex(r"[+-]?\d+\.\d*([eE][+-]?\d+)?").setParseAction(
lambda s,l,t:[ float(t[0]) ])
key = Word(alphas,alphanums+'_')
value_atom = (real | integer | Word(alphanums) | dblQuotedString)
value = Forward() # to be defined later with << operator
keyvalue = Group(key+value)
value << (value_atom | Group( lbrack + ZeroOrMore(keyvalue) + rbrack ))
node = Group(Literal("node") + lbrack + Group(OneOrMore(keyvalue)) + rbrack)
edge = Group(Literal("edge") + lbrack + Group(OneOrMore(keyvalue)) + rbrack)
creator = Group(Literal("Creator")+ Optional( restOfLine ))
version = Group(Literal("Version")+ Optional( restOfLine ))
graphkey = Literal("graph").suppress()
graph = Dict (Optional(creator)+Optional(version)+\
graphkey + lbrack + ZeroOrMore( (node|edge|keyvalue) ) + rbrack )
graph.ignore(comment)
return graph
def generate_gml(G):
"""Generate a single entry of the graph G in GML format.
Parameters
----------
G : NetworkX graph
Yields
------
lines: string
Lines in GML format.
Notes
-----
This implementation does not support all Python data types as GML
data. Nodes, node attributes, edge attributes, and graph
attributes must be either dictionaries or single stings or
numbers. If they are not an attempt is made to represent them as
strings. For example, a list as edge data
G[1][2]['somedata']=[1,2,3], will be represented in the GML file
as::
edge [
source 1
target 2
somedata "[1, 2, 3]"
]
"""
# recursively make dicts into gml brackets
dicttype=type({})
def listify(d,indent,indentlevel):
result='[ \n'
dicttype=type({})
for k,v in d.items():
if type(v)==dicttype:
v=listify(v,indent,indentlevel+1)
result += indentlevel*indent+"%s %s\n"%(k,v)
return result+indentlevel*indent+"]"
# check for attributes or assign empty dict
if hasattr(G,'graph_attr'):
graph_attr=G.graph_attr
else:
graph_attr={}
if hasattr(G,'node_attr'):
node_attr=G.node_attr
else:
node_attr={}
indent=2*' '
count=iter(range(len(G)))
node_id={}
yield "graph ["
if G.is_directed():
yield indent+"directed 1"
# write graph attributes
for k,v in list(G.graph.items()):
if type(v)==dicttype:
v=listify(v,indent,2)
elif not is_string_like(v):
v='"%s"'%v
yield indent+"%s %s"%(k,v)
# write nodes
for n in G:
yield indent+"node ["
# get id or assign number
nid=G.node[n].get('id',next(count))
node_id[n]=nid
yield 2*indent+"id %s"%nid
yield 2*indent+"label %s"%n
if n in G:
for k,v in list(G.node[n].items()):
if k=='id': continue
if type(v)==dicttype:
v=listify(v,indent,3)
elif not is_string_like(v):
v='"%s"'%v
yield 2*indent+"%s %s"%(k,v)
yield indent+"]"
# write edges
for u,v,edgedata in G.edges_iter(data=True):
# try to guess what is on the edge and do something reasonable
yield indent+"edge ["
yield 2*indent+"source %s"%node_id[u]
yield 2*indent+"target %s"%node_id[v]
for k,v in list(edgedata.items()):
if k=='source': continue
if k=='target': continue
if type(v)==dicttype:
v=listify(v,indent,3)
elif not is_string_like(v):
v='"%s"'%v
yield 2*indent+"%s %s"%(k,v)
yield indent+"]"
yield "]"
def write_gml(G, path):
"""
Write the graph G in GML format to the file or file handle path.
Parameters
----------
path : filename or filehandle
The filename or filehandle to write. Filenames ending in
.gz or .gz2 will be compressed.
See Also
--------
read_gml, parse_gml
Notes
-----
GML specifications indicate that the file should only use
7bit ASCII text encoding.iso8859-1 (latin-1).
This implementation does not support all Python data types as GML
data. Nodes, node attributes, edge attributes, and graph
attributes must be either dictionaries or single stings or
numbers. If they are not an attempt is made to represent them as
strings. For example, a list as edge data
G[1][2]['somedata']=[1,2,3], will be represented in the GML file
as::
edge [
source 1
target 2
somedata "[1, 2, 3]"
]
Examples
---------
>>> G=nx.path_graph(4)
>>> nx.write_gml(G,"test.gml")
Filenames ending in .gz or .bz2 will be compressed.
>>> nx.write_gml(G,"test.gml.gz")
"""
fh=_get_fh(path,mode='wb')
for line in generate_gml(G):
line+='\n'
fh.write(line.encode('latin-1'))
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import pyparsing
except:
try:
import matplotlib.pyparsing
except:
raise SkipTest("pyparsing not available")
|
gpl-2.0
|
OpenWeavers/openanalysis
|
openanalysis/searching.py
|
2
|
5161
|
import matplotlib.pyplot as plt
import numpy as np
__all__ = ['SearchAnalyzer', 'SearchingAlgorithm']
class SearchingAlgorithm:
"""
Base class for all Searching algorithms
Increment the number of basic comparisions, 'self.count' in the inner-most
loop of your algorithmic implementation every time the control enters the
loop to obtain correct visualization
"""
def __init__(self, name):
"""
Constructor
:param name: Name of Searching algorithm being implemented
"""
self.count = 0
self.name = name
def search(self, arr, key):
"""
The core search method
:param arr: numpy array, in witch the search is performed
:param key: the element to be searched
:return: True if key in arr else False
"""
self.count = 0
pass
# Do search in derived classes
class SearchAnalyzer:
"""
Class for Visualizing Search algorithms
"""
def __init__(self, searcher):
"""
Constructor for visualizer
:param searcher: Implementation of a Searching Algorithm
"""
self.searcher = searcher() # Instantiate
self.fig = plt.figure()
def analyze(self, maxpts=1000, progress=True):
"""
Plots the running time of sorting algorithm
Checks for 3 cases, Already Sorted array, reverse sorted array and Shuffled array
Analysis is done by inputting integer arrays with size staring from 100, and varying
upto maxpts in the steps of 100, and counting the number of basic operations
:param maxpts: Maximum number of element in the array, using witch analysis is done
:param progress: Boolean indicating whether to show the progress bar or not
"""
# x Number of elements
# y[0] number of comparisons when First Element is the key
# y[1] number of comparisons when Middle Element is the key
# y[2] number of comparisons when key is not present in the array
# y[3] number of comparisons when key is a randomly chosen element
x, y = np.array([0]), [np.array([0]), np.array([0]), np.array([0]), np.array([0])]
labels = ['First Element is the key', 'Middle Element is the key',
'Key not in array', 'Key at random position in the array']
print('Please wait while analyzing {} Algorithm'.format(self.searcher.name))
if progress:
import progressbar
count = 0
max_count = (maxpts - 100) // 100
bar = progressbar.ProgressBar(max_value=max_count)
for i in range(100, maxpts, 100):
if progress:
count += 1
bar.update(count)
x = np.vstack((x, [i]))
arr = np.arange(0, i, 1)
keys = [0, i // 2, i + 1, np.random.randint(0, i)]
for j in range(4):
self.searcher.search(arr, keys[j])
y[j] = np.vstack((y[j], [self.searcher.count]))
plt.suptitle(self.searcher.name + " Analysis", size=19)
for i in range(4):
plt.subplot(2, 2, i + 1)
plt.title(labels[i])
plt.xlabel("No. of Elements")
plt.ylabel("No. of Basic Operations")
plt.scatter(x, y[i])
plt.tight_layout()
plt.show()
@staticmethod
def compare(algorithms, pts=2000, maxrun=5, progress=True):
"""
Compares the given list of Searching algorithms and Plots a bar chart
:param algorithms: List of Searching algorithms
:param pts: Number of elements in testing array
:param maxrun: Number of iterations to take average
:param progress: Whether to show Progress bar or not
"""
arr = np.arange(pts)
algorithms = [x() for x in algorithms]
operations = {x.name: 0 for x in algorithms}
print('Please wait while comparing Searching Algorithms')
if progress:
import progressbar
count = 0
max_count = maxrun * len(algorithms)
bar = progressbar.ProgressBar(max_value=max_count)
for _ in range(maxrun):
key = np.random.randint(0, 2000)
for algorithm in algorithms:
if progress:
count += 1
bar.update(count)
algorithm.search(arr, key)
operations[algorithm.name] += algorithm.count
operations = [(k, v / maxrun) for k, v in operations.items()]
plt.suptitle('Searching Algorithm Comparision\nAveraged over {} loops'.format(maxrun))
rects = plt.bar(left=np.arange(len(operations)), height=[y for (x, y) in operations])
plt.xticks(np.arange(len(operations)), [x for (x, y) in operations])
ax = plt.axes()
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * height,
'%d' % int(height),
ha='center', va='bottom')
plt.ylabel('Average number of basic operations')
plt.show()
|
gpl-3.0
|
estnltk/textclassifier
|
textclassifier/tests/test_report.py
|
1
|
1349
|
from __future__ import unicode_literals, print_function, absolute_import
from ..classifier import ClfBase
from ..settings import Settings
from ..reportgenerator import ReportGenerator, ReportGeneratorData
from ..featureextractor import FeatureExtractor
from ..paths import TEST_PATH
import unittest
import os
import pandas as pd
class ReportGeneratorAcceptanceTest(unittest.TestCase):
def test_generation(self):
# 1. read the settings
settings = Settings.read(os.path.join(TEST_PATH, 'weather.def'),
os.path.join(TEST_PATH, 'weather.txt'))
# 2. load the dataframe
dataframe = pd.read_excel(os.path.join(TEST_PATH, 'weather.xlsx'), 'Sheet1')
# 3. generate cross-validation statistics
base = ClfBase(FeatureExtractor(settings, dataframe))
self.assertEqual(len(base.cv_stats), 4)
for stat in base.cv_stats:
self.assertTrue(stat is not None)
# 4. train classifier to obrain coeficcients
clf = base.get_new_classifier()
clf.fit(base._fe.X, base._fe.y)
# 5. generate report
repgen = ReportGenerator(ReportGeneratorData(base, clf.coef_))
self.assertTrue(len(repgen.classification_report) > 100)
self.assertTrue(len(repgen.misclassified_data) > 100)
|
gpl-2.0
|
kenshay/ImageScript
|
ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/tests/test_dataframe.py
|
2
|
110599
|
import textwrap
from distutils.version import LooseVersion
from itertools import product
from operator import add
import pandas as pd
import pandas.util.testing as tm
import numpy as np
import pytest
import dask
import dask.array as da
import dask.dataframe as dd
from dask.base import compute_as_if_collection
from dask.compatibility import PY2
from dask.utils import put_lines, M
from dask.dataframe.core import repartition_divisions, aca, _concat, Scalar
from dask.dataframe import methods
from dask.dataframe.utils import (assert_eq, make_meta, assert_max_deps,
PANDAS_VERSION)
if PANDAS_VERSION >= '0.20.0':
from pandas.io.formats import format as pandas_format
else:
from pandas.formats import format as pandas_format
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]},
index=[0, 1, 3]),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]},
index=[5, 6, 8]),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]},
index=[9, 9, 9])}
meta = make_meta({'a': 'i8', 'b': 'i8'}, index=pd.Index([], 'i8'))
d = dd.DataFrame(dsk, 'x', meta, [0, 5, 9, 9])
full = d.compute()
def test_Dataframe():
expected = pd.Series([2, 3, 4, 5, 6, 7, 8, 9, 10],
index=[0, 1, 3, 5, 6, 8, 9, 9, 9],
name='a')
assert_eq(d['a'] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
assert_eq(d[d['b'] > 2], full[full['b'] > 2])
assert_eq(d[['a', 'b']], full[['a', 'b']])
assert_eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert_eq(d.head(2), full.head(2))
assert_eq(d.head(3), full.head(3))
assert_eq(d.head(2), dsk[('x', 0)].head(2))
assert_eq(d['a'].head(2), full['a'].head(2))
assert_eq(d['a'].head(3), full['a'].head(3))
assert_eq(d['a'].head(2), dsk[('x', 0)]['a'].head(2))
assert (sorted(d.head(2, compute=False).dask) ==
sorted(d.head(2, compute=False).dask))
assert (sorted(d.head(2, compute=False).dask) !=
sorted(d.head(3, compute=False).dask))
assert_eq(d.tail(2), full.tail(2))
assert_eq(d.tail(3), full.tail(3))
assert_eq(d.tail(2), dsk[('x', 2)].tail(2))
assert_eq(d['a'].tail(2), full['a'].tail(2))
assert_eq(d['a'].tail(3), full['a'].tail(3))
assert_eq(d['a'].tail(2), dsk[('x', 2)]['a'].tail(2))
assert (sorted(d.tail(2, compute=False).dask) ==
sorted(d.tail(2, compute=False).dask))
assert (sorted(d.tail(2, compute=False).dask) !=
sorted(d.tail(3, compute=False).dask))
def test_head_npartitions():
assert_eq(d.head(5, npartitions=2), full.head(5))
assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert_eq(d.head(5, npartitions=-1), full.head(5))
assert_eq(d.head(7, npartitions=-1), full.head(7))
assert_eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
def test_head_npartitions_warn():
with pytest.warns(None):
d.head(100)
with pytest.warns(None):
d.head(7)
with pytest.warns(None):
d.head(7, npartitions=2)
def test_index_head():
assert_eq(d.index.head(2), full.index[:2])
assert_eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert_eq((d + 1), full + 1)
def test_Index():
for case in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D',
periods=10))]:
ddf = dd.from_pandas(case, 3)
assert_eq(ddf.index, case.index)
pytest.raises(AttributeError, lambda: ddf.index.index)
def test_Scalar():
val = np.int64(1)
s = Scalar({('a', 0): val}, 'a', 'i8')
assert hasattr(s, 'dtype')
assert 'dtype' in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp('2001-01-01')
s = Scalar({('a', 0): val}, 'a', val)
assert not hasattr(s, 'dtype')
assert 'dtype' not in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_attributes():
assert 'a' in dir(d)
assert 'foo' not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({'a b c': [1, 2, 3]}), npartitions=2)
assert 'a b c' not in dir(df)
df = dd.from_pandas(pd.DataFrame({'a': [1, 2], 5: [1, 2]}), npartitions=2)
assert 'a' in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(tm.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(['a', 'b']))
tm.assert_index_equal(d[['b', 'a']].columns, pd.Index(['b', 'a']))
assert d['a'].name == 'a'
assert (d['a'] + 1).name == 'a'
assert (d['a'] + d['b']).name is None
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == 'x'
assert ddf.index.compute().name == 'x'
@pytest.mark.parametrize(
'npartitions',
[1, pytest.mark.xfail(2, reason='pandas join removes freq')]
)
def test_timezone_freq(npartitions):
s_naive = pd.Series(pd.date_range('20130101', periods=10))
s_aware = pd.Series(pd.date_range('20130101', periods=10, tz='US/Eastern'))
pdf = pd.DataFrame({'tz': s_aware, 'notz': s_naive})
ddf = dd.from_pandas(pdf, npartitions=npartitions)
assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq
def test_rename_columns():
# GH 819
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7],
'b': [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ['x', 'y']
df.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with pytest.raises(ValueError) as err:
ddf.columns = [1, 2, 3, 4]
assert msg in str(err.value)
# Multi-index columns
df = pd.DataFrame({('A', '0') : [1, 2, 2, 3], ('B', 1) : [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df.columns = ['x', 'y']
ddf.columns = ['x', 'y']
tm.assert_index_equal(ddf.columns, pd.Index(['x', 'y']))
tm.assert_index_equal(ddf._meta.columns, pd.Index(['x', 'y']))
assert_eq(ddf, df)
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
s.name = 'renamed'
ds.name = 'renamed'
assert s.name == 'renamed'
assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = 'renamed'
dind.name = 'renamed'
assert ind.name == 'renamed'
assert_eq(dind, ind)
def test_rename_series_method():
# Series name
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name='x')
ds = dd.from_pandas(s, 2)
assert_eq(ds.rename('y'), s.rename('y'))
assert ds.name == 'x' # no mutation
assert_eq(ds.rename(), s.rename())
ds.rename('z', inplace=True)
s.rename('z', inplace=True)
assert ds.name == 'z'
assert_eq(ds, s)
# Series index
s = pd.Series(['a', 'b', 'c', 'd', 'e', 'f', 'g'], name='x')
ds = dd.from_pandas(s, 2)
for is_sorted in [True, False]:
res = ds.rename(lambda x: x ** 2, sorted_index=is_sorted)
assert_eq(res, s.rename(lambda x: x ** 2))
assert res.known_divisions == is_sorted
res = ds.rename(s, sorted_index=is_sorted)
assert_eq(res, s.rename(s))
assert res.known_divisions == is_sorted
with pytest.raises(ValueError):
ds.rename(lambda x: -x, sorted_index=True)
assert_eq(ds.rename(lambda x: -x), s.rename(lambda x: -x))
res = ds.rename(ds)
assert_eq(res, s.rename(s))
assert not res.known_divisions
ds2 = ds.clear_divisions()
res = ds2.rename(lambda x: x**2, sorted_index=True)
assert_eq(res, s.rename(lambda x: x**2))
assert not res.known_divisions
res = ds.rename(lambda x: x**2, inplace=True, sorted_index=True)
assert res is ds
s.rename(lambda x: x**2, inplace=True)
assert_eq(ds, s)
def test_describe():
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(20)) * 4)
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20})
ds = dd.from_pandas(s, 4)
ddf = dd.from_pandas(df, 4)
assert_eq(s.describe(), ds.describe())
assert_eq(df.describe(), ddf.describe())
assert_eq(s.describe(), ds.describe(split_every=2))
assert_eq(df.describe(), ddf.describe(split_every=2))
assert ds.describe(split_every=2)._name != ds.describe()._name
assert ddf.describe(split_every=2)._name != ddf.describe()._name
# remove string columns
df = pd.DataFrame({'a': list(range(20)) * 4, 'b': list(range(4)) * 20,
'c': list('abcd') * 20})
ddf = dd.from_pandas(df, 4)
assert_eq(df.describe(), ddf.describe())
assert_eq(df.describe(), ddf.describe(split_every=2))
def test_describe_empty():
# https://github.com/dask/dask/issues/2326
ddf = dd.from_pandas(pd.DataFrame({"A": ['a', 'b']}), 2)
with pytest.raises(ValueError) as rec:
ddf.describe()
assert 'DataFrame contains only non-numeric data.' in str(rec)
with pytest.raises(ValueError) as rec:
ddf.A.describe()
assert 'Cannot compute ``describe`` on object dtype.' in str(rec)
def test_cumulative():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
df_out = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
ddf_out = dd.from_pandas(df_out, 5)
assert_eq(ddf.cumsum(), df.cumsum())
assert_eq(ddf.cumprod(), df.cumprod())
assert_eq(ddf.cummin(), df.cummin())
assert_eq(ddf.cummax(), df.cummax())
assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))
assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))
assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))
assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))
# testing out parameter if out parameter supported
if LooseVersion(np.__version__) >= '1.13.0':
np.cumsum(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumsum())
np.cumprod(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumprod())
ddf.cummin(out=ddf_out)
assert_eq(ddf_out, df.cummin())
ddf.cummax(out=ddf_out)
assert_eq(ddf_out, df.cummax())
np.cumsum(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumsum(axis=1))
np.cumprod(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumprod(axis=1))
ddf.cummin(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummin(axis=1))
ddf.cummax(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummax(axis=1))
assert_eq(ddf.a.cumsum(), df.a.cumsum())
assert_eq(ddf.a.cumprod(), df.a.cumprod())
assert_eq(ddf.a.cummin(), df.a.cummin())
assert_eq(ddf.a.cummax(), df.a.cummax())
# With NaNs
df = pd.DataFrame({'a': [1, 2, np.nan, 4, 5, 6, 7, 8],
'b': [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],
'c': [np.nan] * 8})
ddf = dd.from_pandas(df, 3)
assert_eq(df.cumsum(), ddf.cumsum())
assert_eq(df.cummin(), ddf.cummin())
assert_eq(df.cummax(), ddf.cummax())
assert_eq(df.cumprod(), ddf.cumprod())
assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))
assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))
assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))
assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))
assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))
assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))
assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))
assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))
assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))
assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))
assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))
assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))
def test_dropna():
df = pd.DataFrame({'x': [np.nan, 2, 3, 4, np.nan, 6],
'y': [1, 2, np.nan, 4, np.nan, np.nan],
'z': [1, 2, 3, 4, np.nan, np.nan]},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.x.dropna(), df.x.dropna())
assert_eq(ddf.y.dropna(), df.y.dropna())
assert_eq(ddf.z.dropna(), df.z.dropna())
assert_eq(ddf.dropna(), df.dropna())
assert_eq(ddf.dropna(how='all'), df.dropna(how='all'))
assert_eq(ddf.dropna(subset=['x']), df.dropna(subset=['x']))
assert_eq(ddf.dropna(subset=['y', 'z']), df.dropna(subset=['y', 'z']))
assert_eq(ddf.dropna(subset=['y', 'z'], how='all'),
df.dropna(subset=['y', 'z'], how='all'))
@pytest.mark.parametrize('lower, upper', [(2, 5), (2.5, 3.5)])
def test_clip(lower, upper):
df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf = dd.from_pandas(df, 3)
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
ds = dd.from_pandas(s, 3)
assert_eq(ddf.clip(lower=lower, upper=upper),
df.clip(lower=lower, upper=upper))
assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))
assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))
assert_eq(ds.clip(lower=lower, upper=upper),
s.clip(lower=lower, upper=upper))
assert_eq(ds.clip(lower=lower), s.clip(lower=lower))
assert_eq(ds.clip(upper=upper), s.clip(upper=upper))
assert_eq(ddf.clip_lower(lower), df.clip_lower(lower))
assert_eq(ddf.clip_lower(upper), df.clip_lower(upper))
assert_eq(ddf.clip_upper(lower), df.clip_upper(lower))
assert_eq(ddf.clip_upper(upper), df.clip_upper(upper))
assert_eq(ds.clip_lower(lower), s.clip_lower(lower))
assert_eq(ds.clip_lower(upper), s.clip_lower(upper))
assert_eq(ds.clip_upper(lower), s.clip_upper(lower))
assert_eq(ds.clip_upper(upper), s.clip_upper(upper))
def test_squeeze():
df = pd.DataFrame({'x': [1, 3, 6]})
df2 = pd.DataFrame({'x':[0]})
s = pd.Series({'test': 0, 'b': 100})
ddf = dd.from_pandas(df, 3)
ddf2 = dd.from_pandas(df2, 3)
ds = dd.from_pandas(s, 2)
assert_eq(df.squeeze(), ddf.squeeze())
assert_eq(pd.Series([0], name='x'), ddf2.squeeze())
assert_eq(ds.squeeze(), s.squeeze())
with pytest.raises(NotImplementedError) as info:
ddf.squeeze(axis=0)
msg = "{0} does not support squeeze along axis 0".format(type(ddf))
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis=2)
msg = 'No axis {0} for object type {1}'.format(2, type(ddf))
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis='test')
msg = 'No axis test for object type {0}'.format(type(ddf))
assert msg in str(info.value)
def test_where_mask():
pdf1 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]})
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 4, 2, 6, 2, 3, 1, 6, 2],
'c': [5, 6, 7, 8, 9, 10, 11, 12, 13]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8])
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame({'a': [True, False, True] * 3,
'b': [False, False, True] * 3,
'c': [False] * 9,
'd': [True] * 9},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13])
ddf6 = dd.from_pandas(pdf6, 2)
cases = [(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]),
pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6)]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert_eq(ddf.where(ddcond), pdf.where(pdcond))
assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b),
full.a + full.b)
assert_eq(dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1),
full.a + full.b + 1)
def test_map_partitions():
assert_eq(d.map_partitions(lambda df: df, meta=d), full)
assert_eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
assert_eq(result, full.sum(axis=1))
assert_eq(d.map_partitions(lambda df: 1), pd.Series([1, 1, 1], dtype=np.int64),
check_divisions=False)
x = Scalar({('x', 0): 1}, 'x', int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_names():
func = lambda x: x
assert (sorted(dd.map_partitions(func, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, meta=d).dask))
assert (sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) ==
sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask))
func = lambda x, y: x
assert (sorted(dd.map_partitions(func, d, d, meta=d).dask) ==
sorted(dd.map_partitions(func, d, d, meta=d).dask))
def test_map_partitions_column_info():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert_eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = dd.map_partitions(lambda df: df.x + 1, a, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_method_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == 'i8'
b = a.map_partitions(lambda df: df.x + 1, meta=('x', 'i8'))
assert isinstance(b, dd.Series)
assert b.name == 'x'
assert b.dtype == 'i8'
def test_map_partitions_keeps_kwargs_readable():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
# NOTE: we'd like to ensure that we keep the keyword arguments readable
# in the dask graph
assert "['x', 5]" in str(b.dask)
assert_eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_metadata_inference_single_partition_aligned_args():
# https://github.com/dask/dask/issues/3034
# Previously broadcastable series functionality broke this
df = pd.DataFrame({'x': [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=1)
def check(df, df_x):
assert len(df) == len(df_x)
assert len(df) > 0
return df
res = dd.map_partitions(check, ddf, ddf.x)
assert_eq(res, ddf)
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
sol = full.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.a.drop_duplicates()
res2 = d.a.drop_duplicates(split_every=2)
sol = full.a.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.index.drop_duplicates()
res2 = d.index.drop_duplicates(split_every=2)
sol = full.index.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
with pytest.raises(NotImplementedError):
d.drop_duplicates(keep=False)
def test_drop_duplicates_subset():
df = pd.DataFrame({'x': [1, 2, 3, 1, 2, 3],
'y': ['a', 'a', 'b', 'b', 'c', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{'keep': 'first'}, {'keep': 'last'}]:
assert_eq(df.x.drop_duplicates(**kwarg),
ddf.x.drop_duplicates(**kwarg))
for ss in [['x'], 'y', ['x', 'y']]:
assert_eq(df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg))
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list('abcde'))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert_eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert_eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert_eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert_eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert_eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert_eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with pytest.raises(ValueError):
ddf.get_partition(-1)
with pytest.raises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert (d.ndim == 2)
assert (d.a.ndim == 1)
assert (d.index.ndim == 1)
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_value_counts():
df = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts()
expected = df.x.value_counts()
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_unique():
pdf = pd.DataFrame({'x': [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
'y': ['a', 'c', 'b', np.nan, 'c',
'b', 'a', 'd', np.nan, 'a']})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name='y'))
assert_eq(ddf.x.unique(split_every=2),
pd.Series(pdf.x.unique(), name='x'))
assert_eq(ddf.y.unique(split_every=2),
pd.Series(pdf.y.unique(), name='y'))
assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name
def test_isin():
# Series test
assert_eq(d.a.isin([0, 1, 2]), full.a.isin([0, 1, 2]))
assert_eq(d.a.isin(pd.Series([0, 1, 2])),
full.a.isin(pd.Series([0, 1, 2])))
# DataFrame test
assert_eq(d.isin([0, 1, 2]), full.isin([0, 1, 2]))
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
def test_size():
assert_eq(d.size, full.size)
assert_eq(d.a.size, full.a.size)
assert_eq(d.index.size, full.index.size)
def test_shape():
result = d.shape
assert_eq((result[0].compute(),result[1]), (len(full),len(full.columns)))
assert_eq(dd.compute(result)[0], (len(full),len(full.columns)))
result = d.a.shape
assert_eq(result[0].compute(), len(full.a))
assert_eq(dd.compute(result)[0], (len(full.a),))
def test_nbytes():
assert_eq(d.a.nbytes, full.a.nbytes)
assert_eq(d.index.nbytes, full.index.nbytes)
def test_quantile():
# series / multiple
result = d.b.quantile([.3, .7])
exp = full.b.quantile([.3, .7]) # result may different
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == 0
assert 5 < result.iloc[1] < 6
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([.3, .7])
exp = s.quantile([.3, .7])
assert len(result) == 2
assert result.divisions == (.3, .7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert 1 < result.iloc[0] < 2
assert 7 < result.iloc[1] < 8
# series / single
result = d.b.quantile(.5)
exp = full.b.quantile(.5) # result may different
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert 4 < result < 6
def test_quantile_missing():
df = pd.DataFrame({"A": [0, np.nan, 2]})
ddf = dd.from_pandas(df, 2)
expected = df.quantile()
result = ddf.quantile()
assert_eq(result, expected)
expected = df.A.quantile()
result = ddf.A.quantile()
assert_eq(result, expected)
def test_empty_quantile():
result = d.b.quantile([])
exp = full.b.quantile([])
assert result.divisions == (None, None)
assert result.name == 'b'
assert result.compute().name == 'b'
assert_eq(result, exp)
def test_dataframe_quantile():
# column X is for test column order and result division
df = pd.DataFrame({'A': np.arange(20),
'X': np.arange(20, 40),
'B': np.arange(10, 30),
'C': ['a', 'b', 'c', 'd'] * 5},
columns=['A', 'X', 'B', 'C'])
ddf = dd.from_pandas(df, 3)
result = ddf.quantile()
assert result.npartitions == 1
assert result.divisions == ('A', 'X')
result = result.compute()
assert isinstance(result, pd.Series)
assert result.name == 0.5
tm.assert_index_equal(result.index, pd.Index(['A', 'X', 'B']))
assert (result > pd.Series([16, 36, 26], index=['A', 'X', 'B'])).all()
assert (result < pd.Series([17, 37, 27], index=['A', 'X', 'B'])).all()
result = ddf.quantile([0.25, 0.75])
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(['A', 'X', 'B']))
minexp = pd.DataFrame([[1, 21, 11], [17, 37, 27]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result > minexp).all().all()
maxexp = pd.DataFrame([[2, 22, 12], [18, 38, 28]],
index=[0.25, 0.75], columns=['A', 'X', 'B'])
assert (result < maxexp).all().all()
assert_eq(ddf.quantile(axis=1), df.quantile(axis=1))
pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1))
def test_index():
assert_eq(d.index, full.index)
def test_assign():
d_unknown = dd.from_pandas(full, npartitions=3, sort=False)
assert not d_unknown.known_divisions
res = d.assign(c=1,
d='string',
e=d.a.sum(),
f=d.a + d.b,
g=lambda x: x.a + x.b,
dt=pd.Timestamp(2018, 2, 13))
res_unknown = d_unknown.assign(c=1,
d='string',
e=d_unknown.a.sum(),
f=d_unknown.a + d_unknown.b,
g=lambda x: x.a + x.b,
dt=pd.Timestamp(2018, 2, 13))
sol = full.assign(c=1,
d='string',
e=full.a.sum(),
f=full.a + full.b,
g=lambda x: x.a + x.b,
dt=pd.Timestamp(2018, 2, 13))
assert_eq(res, sol)
assert_eq(res_unknown, sol)
res = d.assign(c=full.a + 1)
assert_eq(res, full.assign(c=full.a + 1))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
d_unknown.assign(c=full.a + 1)
# unsupported type
with pytest.raises(TypeError):
d.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
d_unknown.assign(foo=d.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
d.assign(foo=d_unknown.a)
def test_assign_callable():
df = dd.from_pandas(pd.DataFrame({"A": range(10)}), npartitions=2)
a = df.assign(B=df.A.shift())
b = df.assign(B=lambda x: x.A.shift())
assert_eq(a, b)
def test_map():
assert_eq(d.a.map(lambda x: x + 1), full.a.map(lambda x: x + 1))
lk = dict((v, v + 1) for v in full.a.values)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
lk = pd.Series(lk)
assert_eq(d.a.map(lk), full.a.map(lk))
assert_eq(d.b.map(lk), full.b.map(lk))
assert_eq(d.b.map(lk, meta=d.b), full.b.map(lk))
assert_eq(d.b.map(lk, meta=('b', 'i8')), full.b.map(lk))
pytest.raises(TypeError, lambda: d.a.map(d.b))
def test_concat():
x = _concat([pd.DataFrame(columns=['a', 'b']),
pd.DataFrame(columns=['a', 'b'])])
assert list(x.columns) == ['a', 'b']
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert_eq(e, f)
assert_eq(d.a, type(d.a)(*d.a._args))
assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, 'x', meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {('x', 0): pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}),
('x', 1): pd.DataFrame({'a': [4, 5, 6], 'b': [3, 2, 1]}),
('x', 2): pd.DataFrame({'a': [7, 8, 9], 'b': [0, 0, 0]})}
meta = make_meta({'a': 'i8', 'b': 'i8'})
d = dd.DataFrame(dsk, 'x', meta, [None, None, None, None])
full = d.compute(scheduler='sync')
assert_eq(d.a.sum(), full.a.sum())
assert_eq(d.a + d.b + 1, full.a + full.b + 1)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
# DataFrame
res1, res2 = ddf1a.align(ddf1b, join=join)
exp1, exp2 = df1a.align(df1b, join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# DataFrame with fill_value
res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)
exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a['A'].align(ddf1b['B'], join=join, fill_value=1)
exp1, exp2 = df1a['A'].align(df1b['B'], join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
@pytest.mark.parametrize('join', ['inner', 'outer', 'left', 'right'])
def test_align_axis(join):
df1a = pd.DataFrame({'A': np.random.randn(10),
'B': np.random.randn(10),
'C': np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11])
df1b = pd.DataFrame({'B': np.random.randn(10),
'C': np.random.randn(10),
'D': np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13])
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)
exp1, exp2 = df1a.align(df1b, join=join, axis=0)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)
exp1, exp2 = df1a.align(df1b, join=join, axis=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='index')
exp1, exp2 = df1a.align(df1b, join=join, axis='index')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis='columns')
exp1, exp2 = df1a.align(df1b, join=join, axis='columns')
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# invalid
with pytest.raises(ValueError):
ddf1a.align(ddf1b, join=join, axis='XXX')
with pytest.raises(ValueError):
ddf1a['A'].align(ddf1b['B'], join=join, axis=1)
def test_combine():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
first = lambda a, b: a
# DataFrame
for dda, ddb, a, b in [(ddf1, ddf2, df1, df2),
(ddf1.A, ddf2.A, df1.A, df2.A),
(ddf1.B, ddf2.B, df1.B, df2.B)]:
for func, fill_value in [(add, None), (add, 100), (first, None)]:
sol = a.combine(b, func, fill_value=fill_value)
assert_eq(dda.combine(ddb, func, fill_value=fill_value), sol)
assert_eq(dda.combine(b, func, fill_value=fill_value), sol)
assert_eq(ddf1.combine(ddf2, add, overwrite=False),
df1.combine(df2, add, overwrite=False))
assert dda.combine(ddb, add)._name == dda.combine(ddb, add)._name
def test_combine_first():
df1 = pd.DataFrame({'A': np.random.choice([1, 2, np.nan], 100),
'B': np.random.choice(['a', 'b', np.nan], 100)})
df2 = pd.DataFrame({'A': np.random.choice([1, 2, 3], 100),
'B': np.random.choice(['a', 'b', 'c'], 100)})
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
# DataFrame
assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))
assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))
# Series
assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))
assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))
def test_dataframe_picklable():
from pickle import loads, dumps
cloudpickle = pytest.importorskip('cloudpickle')
cp_dumps = cloudpickle.dumps
d = tm.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert_eq(df, df2)
df2 = loads(cp_dumps(df))
assert_eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert_eq(df.A, a2)
a2 = loads(cp_dumps(df.A))
assert_eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert_eq(df.index, i2)
i2 = loads(cp_dumps(df.index))
assert_eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = loads(cp_dumps(s))
assert_eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5], 42)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert a._name != b._name
assert len(a.compute()) + len(b.compute()) == len(full)
a2, b2 = d.random_split([0.5, 0.5], 42)
assert a2._name == a._name
assert b2._name == b._name
parts = d.random_split([0.4, 0.5, 0.1], 42)
names = set([p._name for p in parts])
names.update([a._name, b._name])
assert len(names) == 5
with pytest.raises(ValueError):
d.random_split([0.4, 0.5], 42)
def test_series_round():
ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name='a')
s = dd.from_pandas(ps, npartitions=3)
assert_eq(s.round(), ps.round())
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith('repartition-split')]
keys = sorted(keys)
sp = pd.concat([compute_as_if_collection(dd.DataFrame, d.dask, k)
for k in keys])
assert_eq(orig, sp)
assert_eq(orig, d)
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert_eq(a, b)
assert_eq(compute_as_if_collection(dd.DataFrame, b.dask, (b._name, 0)),
df.iloc[:1])
for div in [[20, 60], [10, 50], [1], # first / last element mismatch
[0, 60], [10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60]]: # not unique (last element can be duplicated)
pytest.raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list('abxyz'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [[0, 6], [0, 6, 6], [0, 5, 6], [0, 4, 6, 6],
[0, 2, 6], [0, 2, 6, 6],
[0, 2, 3, 6, 6], [0, 1, 2, 3, 4, 5, 6, 6]]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
pdf = pd.DataFrame({'x': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'y': [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list('abcdefghij'))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [list('aj'), list('ajj'), list('adj'),
list('abfj'), list('ahjj'), list('acdj'), list('adfij'),
list('abdefgij'), list('abcdefghij')]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [list('Yadijm'), list('acmrxz'), list('Yajz')]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], 'a', 'b', 'c')
assert result == {('b', 0): (methods.boundary_slice, ('a', 0), 0, 6, False),
('b', 1): (methods.boundary_slice, ('a', 0), 6, 6, True),
('c', 0): ('b', 0),
('c', 1): ('b', 1)}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], 'a', 'b', 'c')
assert result == {('b', 0): (methods.boundary_slice, ('a', 0), 1, 3, False),
('b', 1): (methods.boundary_slice, ('a', 1), 3, 4, False),
('b', 2): (methods.boundary_slice, ('a', 1), 4, 6, False),
('b', 3): (methods.boundary_slice, ('a', 1), 6, 7, True),
('c', 0): (methods.concat, [('b', 0), ('b', 1)]),
('c', 1): ('b', 2),
('c', 2): ('b', 3)}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df.y)
@pytest.mark.parametrize('use_index', [True, False])
@pytest.mark.parametrize('n', [1, 2, 4, 5])
@pytest.mark.parametrize('k', [1, 2, 4, 5])
@pytest.mark.parametrize('dtype', [int, float, 'M8[ns]'])
@pytest.mark.parametrize('transform', [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10,
'y': list('abdabd') * 10},
index=pd.Series([10, 20, 30, 40, 50, 60] * 10, dtype=dtype))
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(npartitions=k)
assert_eq(a, b)
assert b.npartitions == k
parts = dask.get(b.dask, b.__dask_keys__())
assert all(map(len, parts))
def test_repartition_npartitions_same_limits():
df = pd.DataFrame({'x': [1, 2, 3]},
index=[pd.Timestamp('2017-05-09 00:00:00.006000'),
pd.Timestamp('2017-05-09 02:45:00.017999'),
pd.Timestamp('2017-05-09 05:59:58.938999')])
ddf = dd.from_pandas(df, npartitions=2)
ddf.repartition(npartitions=10)
def test_repartition_object_index():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5, 6] * 10},
index=list('abdabd') * 10)
a = dd.from_pandas(df, npartitions=5)
b = a.repartition(npartitions=2)
assert b.npartitions == 2
assert_eq(b, df)
b = a.repartition(npartitions=10)
assert b.npartitions == 10
assert_eq(b, df)
assert not b.known_divisions
@pytest.mark.slow
@pytest.mark.parametrize('npartitions', [1, 20, 243])
@pytest.mark.parametrize('freq', ['1D', '7D', '28h', '1h'])
@pytest.mark.parametrize('end', ['2000-04-15', '2000-04-15 12:37:01', '2000-01-01 12:37:00'])
@pytest.mark.parametrize('start', ['2000-01-01', '2000-01-01 12:30:00', '2000-01-01 12:30:00'])
def test_repartition_freq(npartitions, freq, start, end):
start = pd.Timestamp(start)
end = pd.Timestamp(end)
ind = pd.DatetimeIndex(start=start, end=end, freq='60s')
df = pd.DataFrame({'x': np.arange(len(ind))}, index=ind)
ddf = dd.from_pandas(df, npartitions=npartitions, name='x')
ddf2 = ddf.repartition(freq=freq)
assert_eq(ddf2, df)
def test_repartition_freq_divisions():
df = pd.DataFrame({'x': np.random.random(10)},
index=pd.DatetimeIndex(np.random.random(10) * 100e9))
ddf = dd.from_pandas(df, npartitions=3)
ddf2 = ddf.repartition(freq='15s')
for div in ddf2.divisions[1:-1]:
assert div == div.round('15s')
assert ddf2.divisions[0] == df.index.min()
assert ddf2.divisions[-1] == df.index.max()
assert_eq(ddf2, ddf2)
def test_repartition_freq_errors():
df = pd.DataFrame({'x': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(TypeError) as info:
ddf.repartition(freq='1s')
assert 'only' in str(info.value)
assert 'timeseries' in str(info.value)
def test_repartition_freq_month():
ts = pd.date_range("2015-01-01 00:00", " 2015-05-01 23:50", freq="10min")
df = pd.DataFrame(np.random.randint(0,100,size=(len(ts),4)),
columns=list('ABCD'), index=ts)
ddf = dd.from_pandas(df,npartitions=1).repartition(freq='1M')
assert_eq(df, ddf)
assert 2 < ddf.npartitions <= 6
def test_embarrassingly_parallel_operations():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
assert_eq(a.x.astype('float32'), df.x.astype('float32'))
assert a.x.astype('float32').compute().dtype == 'float32'
assert_eq(a.x.dropna(), df.x.dropna())
assert_eq(a.x.between(2, 4), df.x.between(2, 4))
assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert_eq(a.x.notnull(), df.x.notnull())
assert_eq(a.x.isnull(), df.x.isnull())
assert_eq(a.notnull(), df.notnull())
assert_eq(a.isnull(), df.isnull())
assert len(a.sample(frac=0.5).compute()) < len(df)
def test_fillna():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.fillna(100), df.fillna(100))
assert_eq(ddf.A.fillna(100), df.A.fillna(100))
assert_eq(ddf.fillna(method='pad'), df.fillna(method='pad'))
assert_eq(ddf.A.fillna(method='pad'), df.A.fillna(method='pad'))
assert_eq(ddf.fillna(method='bfill'), df.fillna(method='bfill'))
assert_eq(ddf.A.fillna(method='bfill'), df.A.fillna(method='bfill'))
assert_eq(ddf.fillna(method='pad', limit=2),
df.fillna(method='pad', limit=2))
assert_eq(ddf.A.fillna(method='pad', limit=2),
df.A.fillna(method='pad', limit=2))
assert_eq(ddf.fillna(method='bfill', limit=2),
df.fillna(method='bfill', limit=2))
assert_eq(ddf.A.fillna(method='bfill', limit=2),
df.A.fillna(method='bfill', limit=2))
assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))
assert_eq(ddf.fillna(method='pad', axis=1), df.fillna(method='pad', axis=1))
assert_eq(ddf.fillna(method='pad', limit=2, axis=1),
df.fillna(method='pad', limit=2, axis=1))
pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))
df = tm.makeMissingDataframe(0.2, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
pytest.raises(ValueError, lambda: ddf.fillna(method='pad').compute())
assert_eq(df.fillna(method='pad', limit=3),
ddf.fillna(method='pad', limit=3))
def test_fillna_multi_dataframe():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))
assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))
def test_ffill_bfill():
df = tm.makeMissingDataframe(0.8, 42)
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.ffill(), df.ffill())
assert_eq(ddf.bfill(), df.bfill())
assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))
assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))
def test_fillna_series_types():
# https://github.com/dask/dask/issues/2809
df = pd.DataFrame({"A": [1, np.nan, 3], "B": [1, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=2)
fill_value = pd.Series([1, 10], index=['A', 'C'])
assert_eq(ddf.fillna(fill_value), df.fillna(fill_value))
def test_sample():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.5)
assert_eq(b, b)
c = a.sample(frac=0.5, random_state=1234)
d = a.sample(frac=0.5, random_state=1234)
assert_eq(c, d)
assert a.sample(frac=0.5)._name != a.sample(frac=0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_sample_raises():
df = pd.DataFrame({'x': [1, 2, 3, 4, None, 6], 'y': list('abdabd')},
index=[10, 20, 30, 40, 50, 60])
a = dd.from_pandas(df, 2)
# Make sure frac is replaced with n when 0 <= n <= 1
# This is so existing code (i.e. ddf.sample(0.5)) won't break
with pytest.warns(UserWarning):
b = a.sample(0.5, random_state=1234)
c = a.sample(frac=0.5, random_state=1234)
assert_eq(b, c)
with pytest.raises(ValueError):
a.sample(n=10)
# Make sure frac is provided
with pytest.raises(ValueError):
a.sample(frac=None)
def test_datetime_accessor():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df['x'] = df.x.astype('M8[us]')
a = dd.from_pandas(df, 2)
assert 'date' in dir(a.x.dt)
# pandas loses Series.name via datetime accessor
# see https://github.com/pydata/pandas/issues/10712
assert_eq(a.x.dt.date, df.x.dt.date, check_names=False)
# to_pydatetime returns a numpy array in pandas, but a Series in dask
assert_eq(a.x.dt.to_pydatetime(),
pd.Series(df.x.dt.to_pydatetime(), index=df.index, dtype=object))
assert set(a.x.dt.date.dask) == set(a.x.dt.date.dask)
assert set(a.x.dt.to_pydatetime().dask) == set(a.x.dt.to_pydatetime().dask)
def test_str_accessor():
df = pd.DataFrame({'x': ['abc', 'bcd', 'cdef', 'DEFG'], 'y': [1, 2, 3, 4]},
index=['E', 'f', 'g', 'h'])
ddf = dd.from_pandas(df, 2)
# Check that str not in dir/hasattr for non-object columns
assert 'str' not in dir(ddf.y)
assert not hasattr(ddf.y, 'str')
# not implemented methods don't show up
assert 'get_dummies' not in dir(ddf.x.str)
assert not hasattr(ddf.x.str, 'get_dummies')
assert 'upper' in dir(ddf.x.str)
assert_eq(ddf.x.str.upper(), df.x.str.upper())
assert set(ddf.x.str.upper().dask) == set(ddf.x.str.upper().dask)
assert 'upper' in dir(ddf.index.str)
assert_eq(ddf.index.str.upper(), df.index.str.upper())
assert set(ddf.index.str.upper().dask) == set(ddf.index.str.upper().dask)
# make sure to pass thru args & kwargs
assert 'contains' in dir(ddf.x.str)
assert_eq(ddf.x.str.contains('a'), df.x.str.contains('a'))
assert set(ddf.x.str.contains('a').dask) == set(ddf.x.str.contains('a').dask)
assert_eq(ddf.x.str.contains('d', case=False), df.x.str.contains('d', case=False))
assert (set(ddf.x.str.contains('d', case=False).dask) ==
set(ddf.x.str.contains('d', case=False).dask))
for na in [True, False]:
assert_eq(ddf.x.str.contains('a', na=na), df.x.str.contains('a', na=na))
assert (set(ddf.x.str.contains('a', na=na).dask) ==
set(ddf.x.str.contains('a', na=na).dask))
for regex in [True, False]:
assert_eq(ddf.x.str.contains('a', regex=regex), df.x.str.contains('a', regex=regex))
assert (set(ddf.x.str.contains('a', regex=regex).dask) ==
set(ddf.x.str.contains('a', regex=regex).dask))
assert_eq(ddf.x.str[:2], df.x.str[:2])
assert_eq(ddf.x.str[1], df.x.str[1])
# str.extractall
assert_eq(ddf.x.str.extractall('(.*)b(.*)'),
df.x.str.extractall('(.*)b(.*)'))
# str.cat
sol = df.x.str.cat(df.x.str.upper(), sep=':')
assert_eq(ddf.x.str.cat(ddf.x.str.upper(), sep=':'), sol)
assert_eq(ddf.x.str.cat(df.x.str.upper(), sep=':'), sol)
assert_eq(ddf.x.str.cat([ddf.x.str.upper(), df.x.str.lower()], sep=':'),
df.x.str.cat([df.x.str.upper(), df.x.str.lower()], sep=':'))
for o in ['foo', ['foo']]:
with pytest.raises(TypeError):
ddf.x.str.cat(o)
with pytest.raises(NotImplementedError):
ddf.x.str.cat(sep=':')
def test_empty_max():
meta = make_meta({'x': 'i8'})
a = dd.DataFrame({('x', 0): pd.DataFrame({'x': [1]}),
('x', 1): pd.DataFrame({'x': []})}, 'x',
meta, [None, None, None])
assert_eq(a.x.max(), 1)
def test_query():
pytest.importorskip('numexpr')
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.query('x**2 > y'),
df.query('x**2 > y'))
assert_eq(ddf.query('x**2 > @value', local_dict={'value': 4}),
df.query('x**2 > @value', local_dict={'value': 4}))
def test_eval():
pytest.importorskip('numexpr')
p = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
assert_eq(p.eval('x + y'), d.eval('x + y'))
assert_eq(p.eval('z = x + y', inplace=False),
d.eval('z = x + y', inplace=False))
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=True)
# catch FutureWarning from pandas about assignment in eval
with pytest.warns(None):
if PANDAS_VERSION < '0.21.0':
if p.eval('z = x + y', inplace=None) is None:
with pytest.raises(NotImplementedError):
d.eval('z = x + y', inplace=None)
@pytest.mark.parametrize('include, exclude', [
([int], None),
(None, [int]),
([np.number, object], [float]),
(['datetime'], None)
])
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame({'cint': [1] * n,
'cstr': ['a'] * n,
'clfoat': [1.] * n,
'cdt': pd.date_range('2016-01-01', periods=n)})
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert_eq(result, expected)
# count dtypes
tm.assert_series_equal(a.get_dtype_counts(), df.get_dtype_counts())
tm.assert_series_equal(result.get_dtype_counts(),
expected.get_dtype_counts())
if PANDAS_VERSION >= '0.23.0':
ctx = pytest.warns(FutureWarning)
else:
ctx = pytest.warns(None)
with ctx:
tm.assert_series_equal(a.get_ftype_counts(), df.get_ftype_counts())
tm.assert_series_equal(result.get_ftype_counts(),
expected.get_ftype_counts())
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert (sorted(a.x.drop_duplicates().dask) ==
sorted(a.x.drop_duplicates().dask))
assert (sorted(a.groupby('x').y.mean().dask) ==
sorted(a.groupby('x').y.mean().dask))
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert (sorted(aca(a.x, f, f, a.x._meta).dask) !=
sorted(aca(a.x, f2, f2, a.x._meta).dask))
assert (sorted(aca(a.x, f, f, a.x._meta).dask) ==
sorted(aca(a.x, f, f, a.x._meta).dask))
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(a.x, chunk=chunk, aggregate=agg, chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key}, both_key=both_key)
assert (sorted(res.dask) ==
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=both_key).dask))
assert (sorted(res.dask) !=
sorted(aca(a.x, chunk=chunk, aggregate=agg,
chunk_kwargs={'c_key': c_key},
aggregate_kwargs={'a_key': a_key},
both_key=0).dask))
assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg,
chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert_eq(res, sol)
# Should infer as a scalar
res = aca([ddf.x], chunk=lambda x: pd.Series([x.sum()]),
aggregate=lambda x: x.sum())
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_aca_split_every():
df = pd.DataFrame({'x': [1] * 60})
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, y, constant=0):
return x.sum() + y + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, combine=combine,
constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
aca([ddf, 2.0], chunk=chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_reduction_method():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert_eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert_eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={'val': 25})
res2._name == ddf.reduction(chunk, aggregate=agg,
chunk_kwargs={'val': 25})._name
assert res2._name != res._name
assert_eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({'sum': x.sum(), 'count': x.count()})
res = ddf.reduction(sum_and_count,
aggregate=lambda x: x.groupby(level=0).sum())
assert_eq(res, pd.DataFrame({'sum': df.sum(), 'count': df.count()}))
def test_reduction_method_split_every():
df = pd.Series([1] * 60)
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, constant=0):
return x.sum() + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: ddf.reduction(chunk, aggregate=agg, combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert set(f(15).dask.keys()) == set(f(ddf.npartitions).dask.keys())
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(set(r3.dask.keys()) & set(r4.dask.keys())) == len(ddf.dask.keys())
# Keywords are different for each step
assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = ddf.reduction(chunk, aggregate=agg, combine=combine, constant=3.0,
split_every=3)
assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
ddf.reduction(chunk, aggregate=agg, split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0))
def test_pipe():
df = pd.DataFrame({'x': range(50), 'y': range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
def f(x, y, z=0):
return x + y + z
assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))
assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=['a', 'b'])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': [5, 6, 7, 8],
'z': [9, 10, 11, 12]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.drop('y', axis=1), df.drop('y', axis=1))
assert_eq(ddf.drop(['y', 'z'], axis=1), df.drop(['y', 'z'], axis=1))
with pytest.raises(ValueError):
ddf.drop(['a', 'x'], axis=1)
assert_eq(ddf.drop(['a', 'x'], axis=1, errors='ignore'),
df.drop(['a', 'x'], axis=1, errors='ignore'))
def test_gh580():
df = pd.DataFrame({'x': np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
assert_eq(np.cos(df['x']), np.cos(ddf['x']))
def test_rename_dict():
renamer = {'a': 'A', 'b': 'B'}
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert_eq(d.rename(columns=renamer),
full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
pytest.raises(ValueError, lambda: d.rename(index=renamer))
def test_to_timestamp():
index = pd.PeriodIndex(freq='A', start='1/1/2001', end='12/1/2004')
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]}, index=index)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.to_timestamp(), df.to_timestamp())
assert_eq(ddf.to_timestamp(freq='M', how='s').compute(),
df.to_timestamp(freq='M', how='s'))
assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
assert_eq(ddf.x.to_timestamp(freq='M', how='s').compute(),
df.x.to_timestamp(freq='M', how='s'))
def test_to_frame():
s = pd.Series([1, 2, 3], name='foo')
a = dd.from_pandas(s, npartitions=2)
assert_eq(s.to_frame(), a.to_frame())
assert_eq(s.to_frame('bar'), a.to_frame('bar'))
@pytest.mark.parametrize('as_frame', [False, False])
def test_to_dask_array_raises(as_frame):
s = pd.Series([1, 2, 3, 4, 5, 6], name='foo')
a = dd.from_pandas(s, npartitions=2)
if as_frame:
a = a.to_frame()
with pytest.raises(ValueError, message="4 != 2"):
a.to_dask_array((1, 2, 3, 4))
with pytest.raises(ValueError, message="Unexpected value"):
a.to_dask_array(5)
@pytest.mark.parametrize('as_frame', [False, False])
def test_to_dask_array_unknown(as_frame):
s = pd.Series([1, 2, 3, 4, 5], name='foo')
a = dd.from_pandas(s, chunksize=2)
if as_frame:
a = a.to_frame()
result = a.to_dask_array()
assert isinstance(result, da.Array)
result = result.chunks
if as_frame:
assert result[1] == (1,)
assert len(result) == 1
result = result[0]
assert len(result) == 2
assert all(np.isnan(x) for x in result)
@pytest.mark.parametrize('lengths', [
[2, 3],
True,
])
@pytest.mark.parametrize('as_frame', [False, False])
def test_to_dask_array(as_frame, lengths):
s = pd.Series([1, 2, 3, 4, 5], name='foo')
a = dd.from_pandas(s, chunksize=2)
if as_frame:
a = a.to_frame()
result = a.to_dask_array(lengths=lengths)
assert isinstance(result, da.Array)
expected_chunks = ((2, 3),)
if as_frame:
expected_chunks = expected_chunks + ((1,),)
assert result.chunks == expected_chunks
def test_apply():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
assert_eq(ddf.x.apply(lambda x: x + 1, meta=("x", int)),
df.x.apply(lambda x: x + 1))
# specify meta
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1, meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis='columns', meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis='columns'))
# inference
with pytest.warns(None):
assert_eq(ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1))
with pytest.warns(None):
assert_eq(ddf.apply(lambda xy: xy, axis=1),
df.apply(lambda xy: xy, axis=1))
# specify meta
func = lambda x: pd.Series([x, x])
assert_eq(ddf.x.apply(func, meta=[(0, int), (1, int)]), df.x.apply(func))
# inference
with pytest.warns(None):
assert_eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis='index')
@pytest.mark.skipif(PY2,
reason="Global filter is applied by another library, and "
"not reset properly.")
def test_apply_warns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row['x'] + row['y']
with pytest.warns(UserWarning) as w:
ddf.apply(func, axis=1)
assert len(w) == 1
with pytest.warns(None) as w:
ddf.apply(func, axis=1, meta=(None, int))
assert len(w) == 0
def test_applymap():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))
assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))
def test_abs():
df = pd.DataFrame({'A': [1, -2, 3, -4, 5],
'B': [-6., -7, -8, -9, 10],
'C': ['a', 'b', 'c', 'd', 'e']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.A.abs(), df.A.abs())
assert_eq(ddf[['A', 'B']].abs(), df[['A', 'B']].abs())
pytest.raises(ValueError, lambda: ddf.C.abs())
pytest.raises(TypeError, lambda: ddf.abs())
def test_round():
df = pd.DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.round(), df.round())
assert_eq(ddf.round(2), df.round(2))
def test_cov():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.cov()
res2 = ddf.cov(split_every=2)
res3 = ddf.cov(10)
res4 = ddf.cov(10, split_every=2)
sol = df.cov()
sol2 = df.cov(10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.cov()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.cov(db)
res2 = da.cov(db, split_every=2)
res3 = da.cov(db, 10)
res4 = da.cov(db, 10, split_every=2)
sol = a.cov(b)
sol2 = a.cov(b, 10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.cov(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
def test_corr():
# DataFrame
df = pd.util.testing.makeMissingDataframe(0.3, 42)
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.corr()
res2 = ddf.corr(split_every=2)
res3 = ddf.corr(min_periods=10)
res4 = ddf.corr(min_periods=10, split_every=2)
sol = df.corr()
sol2 = df.corr(min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.corr()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method='spearman'))
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.corr(db)
res2 = da.corr(db, split_every=2)
res3 = da.corr(db, min_periods=10)
res4 = da.corr(db, min_periods=10, split_every=2)
sol = da.corr(db)
sol2 = da.corr(db, min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.corr(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method='spearman'))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_cov_corr_meta():
df = pd.DataFrame({'a': np.array([1, 2, 3]),
'b': np.array([1.0, 2.0, 3.0], dtype='f4'),
'c': np.array([1.0, 2.0, 3.0])},
index=pd.Index([1, 2, 3], name='myindex'))
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.corr(), df.corr())
assert_eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == 'f8'
assert ddf.a.corr(ddf.b)._meta.dtype == 'f8'
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.uniform(-1, 1, (20000000, 2)), columns=['a', 'b'])
ddf = dd.from_pandas(df, npartitions=50)
assert_eq(ddf.cov(split_every=8), df.cov())
assert_eq(ddf.corr(split_every=8), df.corr())
def test_cov_corr_mixed():
size = 1000
d = {'dates' : pd.date_range('2015-01-01', periods=size, freq='1T'),
'unique_id' : np.arange(0, size),
'ints' : np.random.randint(0, size, size=size),
'floats' : np.random.randn(size),
'bools' : np.random.choice([0, 1], size=size),
'int_nans' : np.random.choice([0, 1, np.nan], size=size),
'float_nans' : np.random.choice([0.0, 1.0, np.nan], size=size),
'constant' : 1,
'int_categorical' : np.random.choice([10, 20, 30, 40, 50], size=size) ,
'categorical_binary' : np.random.choice(['a', 'b'], size=size),
'categorical_nans' : np.random.choice(['a', 'b', 'c'], size=size)}
df = pd.DataFrame(d)
df['hardbools'] = df['bools'] == 1
df['categorical_nans'] = df['categorical_nans'].replace('c', np.nan)
df['categorical_binary'] = df['categorical_binary'].astype('category')
df['unique_id'] = df['unique_id'].astype(str)
ddf = dd.from_pandas(df, npartitions=20)
assert_eq(ddf.corr(split_every=4), df.corr(), check_divisions=False)
assert_eq(ddf.cov(split_every=4), df.cov(), check_divisions=False)
def test_autocorr():
x = pd.Series(np.random.random(100))
dx = dd.from_pandas(x, npartitions=10)
assert_eq(dx.autocorr(2), x.autocorr(2))
assert_eq(dx.autocorr(0), x.autocorr(0))
assert_eq(dx.autocorr(-2), x.autocorr(-2))
assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))
pytest.raises(TypeError, lambda: dx.autocorr(1.5))
def test_apply_infer_columns():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=['sum', 'mean'])
# DataFrame to completely different DataFrame
with pytest.warns(None):
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['sum', 'mean']))
assert_eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
with pytest.warns(None):
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert_eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=['x2', 'x3'])
# Series to completely different DataFrame
with pytest.warns(None):
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(['x2', 'x3']))
assert_eq(result, df.x.apply(return_df2))
# Series to Series
with pytest.warns(None):
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == 'x'
assert_eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = tm.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert 'day' in dir(a.index)
# returns a numpy array in pandas, but a Index in dask
assert_eq(a.index.day, pd.Index(i.index.day))
assert_eq(a.index.month, pd.Index(i.index.month))
def test_nlargest_nsmallest():
from string import ascii_lowercase
df = pd.DataFrame({'a': np.random.permutation(20),
'b': list(ascii_lowercase[:20]),
'c': np.random.permutation(20).astype('float64')})
ddf = dd.from_pandas(df, npartitions=3)
for m in ['nlargest', 'nsmallest']:
f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)
res = f(ddf, 5, 'a')
res2 = f(ddf, 5, 'a', split_every=2)
sol = f(df, 5, 'a')
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf, 5, ['a', 'c'])
res2 = f(ddf, 5, ['a', 'c'], split_every=2)
sol = f(df, 5, ['a', 'c'])
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf.a, 5)
res2 = f(ddf.a, 5, split_every=2)
sol = f(df.a, 5)
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
def test_reset_index():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
sol = df.reset_index()
res = ddf.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.reset_index(drop=True)
res = ddf.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index()
res = ddf.x.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index(drop=True)
res = ddf.x.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({'a': range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_series_iteritems():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df['x'].iteritems(), ddf['x'].iteritems()):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
def test_astype():
df = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[10, 20, 30, 40])
a = dd.from_pandas(df, 2)
assert_eq(a.astype(float), df.astype(float))
assert_eq(a.x.astype(float), df.x.astype(float))
def test_astype_categoricals():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'b', 'c'],
'y': ['x', 'y', 'z', 'x', 'y'],
'z': [1, 2, 3, 4, 5]})
df = df.astype({'y': 'category'})
ddf = dd.from_pandas(df, 2)
assert ddf.y.cat.known
ddf2 = ddf.astype({'x': 'category'})
assert not ddf2.x.cat.known
assert ddf2.y.cat.known
assert ddf2.x.dtype == 'category'
assert ddf2.compute().x.dtype == 'category'
dx = ddf.x.astype('category')
assert not dx.cat.known
assert dx.dtype == 'category'
assert dx.compute().dtype == 'category'
@pytest.mark.skipif(PANDAS_VERSION < '0.21.0',
reason="No CategoricalDtype with categories")
def test_astype_categoricals_known():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'b', 'c'],
'y': ['x', 'y', 'z', 'y', 'z'],
'z': ['b', 'b', 'b', 'c', 'b'],
'other': [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, 2)
abc = pd.api.types.CategoricalDtype(['a', 'b', 'c'])
category = pd.api.types.CategoricalDtype()
# DataFrame
ddf2 = ddf.astype({'x': abc,
'y': category,
'z': 'category',
'other': 'f8'})
for col, known in [('x', True), ('y', False), ('z', False)]:
x = getattr(ddf2, col)
assert pd.api.types.is_categorical_dtype(x.dtype)
assert x.cat.known == known
# Series
for dtype, known in [('category', False), (category, False), (abc, True)]:
dx2 = ddf.x.astype(dtype)
assert pd.api.types.is_categorical_dtype(dx2.dtype)
assert dx2.cat.known == known
def test_groupby_callable():
a = pd.DataFrame({'x': [1, 2, 3, None], 'y': [10, 20, 30, 40]},
index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert_eq(a.groupby(iseven).y.sum(),
b.groupby(iseven).y.sum())
assert_eq(a.y.groupby(iseven).sum(),
b.y.groupby(iseven).sum())
def test_methods_tokenize_differently():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (df.x.map_partitions(lambda x: pd.Series(x.min()))._name !=
df.x.map_partitions(lambda x: pd.Series(x.max()))._name)
def _assert_info(df, ddf, memory_usage=True):
from io import StringIO
assert isinstance(df, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
buf_pd, buf_da = StringIO(), StringIO()
df.info(buf=buf_pd, memory_usage=memory_usage)
ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
assert stdout_pd == stdout_da
def test_info():
from io import StringIO
from dask.compatibility import unicode
pandas_format._put_lines = put_lines
test_frames = [
pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=pd.Int64Index(range(4))), # No RangeIndex in dask
pd.DataFrame()
]
for df in test_frames:
ddf = dd.from_pandas(df, npartitions=4)
_assert_info(df, ddf)
buf = StringIO()
ddf = dd.from_pandas(pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]}, index=range(4)), npartitions=4)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, x to y\n"
"dtypes: int64(2)")
# buf=None
assert ddf.info(buf=None) is None
def test_groupby_multilevel_info():
# GH 1844
from io import StringIO
from dask.compatibility import unicode
pandas_format._put_lines = put_lines
df = pd.DataFrame({'A': [1, 1, 2, 2],
'B': [1, 2, 3, 4],
'C': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby(['A', 'B']).sum()
# slight difference between memory repr (single additional space)
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == unicode("""<class 'dask.dataframe.core.DataFrame'>
Columns: 1 entries, C to C
dtypes: int64(1)""")
# multilevel
g = ddf.groupby(['A', 'B']).agg(['count', 'sum'])
_assert_info(g.compute(), g, memory_usage=False)
buf = StringIO()
g.info(buf, verbose=False)
expected = unicode(textwrap.dedent("""\
<class 'dask.dataframe.core.DataFrame'>
Columns: 2 entries, ('C', 'count') to ('C', 'sum')
dtypes: int64(2)"""))
assert buf.getvalue() == expected
def test_categorize_info():
# assert that we can call info after categorize
# workaround for: https://github.com/pydata/pandas/issues/14368
from io import StringIO
from dask.compatibility import unicode
pandas_format._put_lines = put_lines
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Series(list('aabc')),
'z': pd.Series(list('aabc'))},
index=pd.Int64Index(range(4))) # No RangeIndex in dask
ddf = dd.from_pandas(df, npartitions=4).categorize(['y'])
# Verbose=False
buf = StringIO()
ddf.info(buf=buf, verbose=True)
expected = unicode("<class 'dask.dataframe.core.DataFrame'>\n"
"Int64Index: 4 entries, 0 to 3\n"
"Data columns (total 3 columns):\n"
"x 4 non-null int64\n"
"y 4 non-null category\n"
"z 4 non-null object\n"
"dtypes: category(1), object(1), int64(1)")
assert buf.getvalue() == expected
def test_gh_1301():
df = pd.DataFrame([['1', '2'], ['3', '4']])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
assert_eq(ddf2, df.assign(y=df[1].astype(int)))
assert ddf2.dtypes['y'] == np.dtype(int)
def test_timeseries_sorted():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = 'index'
assert_eq(ddf.set_index('index', sorted=True, drop=True), df)
def test_column_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4], 'y': [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
ddf['z'] = ddf.x + ddf.y
df['z'] = df.x + df.y
assert_eq(df, ddf)
assert 'z' not in orig.columns
def test_columns_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[['a', 'b']] = df2[['y', 'z']]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[['a', 'b']] = ddf2[['y', 'z']]
assert_eq(df, ddf)
def test_attribute_assignment():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y = ddf.x + ddf.y
assert_eq(ddf, df.assign(y=df.x + df.y))
def test_setitem_triggering_realign():
a = dd.from_pandas(pd.DataFrame({"A": range(12)}), npartitions=3)
b = dd.from_pandas(pd.Series(range(12), name='B'), npartitions=4)
a['C'] = b
assert len(a) == 12
def test_inplace_operators():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1., 2., 3., 4., 5.]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y **= 0.5
assert_eq(ddf.y, df.y ** 0.5)
assert_eq(ddf, df.assign(y=df.y ** 0.5))
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("idx", [
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range('20150101', periods=100)
])
def test_idxmaxmin(idx, skipna):
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'), index=idx)
df.b.iloc[31] = np.nan
df.d.iloc[78] = np.nan
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(df.idxmax(axis=1, skipna=skipna),
ddf.idxmax(axis=1, skipna=skipna))
assert_eq(df.idxmin(axis=1, skipna=skipna),
ddf.idxmin(axis=1, skipna=skipna))
assert_eq(df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna))
assert_eq(df.idxmax(skipna=skipna),
ddf.idxmax(skipna=skipna, split_every=2))
assert (ddf.idxmax(skipna=skipna)._name !=
ddf.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna))
assert_eq(df.idxmin(skipna=skipna),
ddf.idxmin(skipna=skipna, split_every=2))
assert (ddf.idxmin(skipna=skipna)._name !=
ddf.idxmin(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert_eq(df.a.idxmax(skipna=skipna),
ddf.a.idxmax(skipna=skipna, split_every=2))
assert (ddf.a.idxmax(skipna=skipna)._name !=
ddf.a.idxmax(skipna=skipna, split_every=2)._name)
assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
assert_eq(df.a.idxmin(skipna=skipna),
ddf.a.idxmin(skipna=skipna, split_every=2))
assert (ddf.a.idxmin(skipna=skipna)._name !=
ddf.a.idxmin(skipna=skipna, split_every=2)._name)
def test_idxmaxmin_empty_partitions():
df = pd.DataFrame({'a': [1, 2, 3],
'b': [1.5, 2, 3],
'c': [np.NaN] * 3,
'd': [1, 2, np.NaN]})
empty = df.iloc[:0]
ddf = dd.concat([dd.from_pandas(df, npartitions=1)] +
[dd.from_pandas(empty, npartitions=1)] * 10)
for skipna in [True, False]:
assert_eq(ddf.idxmin(skipna=skipna, split_every=3),
df.idxmin(skipna=skipna))
assert_eq(ddf[['a', 'b', 'd']].idxmin(skipna=skipna, split_every=3),
df[['a', 'b', 'd']].idxmin(skipna=skipna))
assert_eq(ddf.b.idxmax(split_every=3), df.b.idxmax())
# Completely empty raises
ddf = dd.concat([dd.from_pandas(empty, npartitions=1)] * 10)
with pytest.raises(ValueError):
ddf.idxmax().compute()
with pytest.raises(ValueError):
ddf.b.idxmax().compute()
def test_getitem_meta():
data = {'col1': ['a', 'a', 'b'],
'col2': [0, 1, 0]}
df = pd.DataFrame(data=data, columns=['col1', 'col2'])
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df.col2[df.col1 == 'a'], ddf.col2[ddf.col1 == 'a'])
def test_getitem_multilevel():
pdf = pd.DataFrame({('A', '0') : [1,2,2], ('B', '1') : [1,2,3]})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf['A', '0'], ddf['A', '0'])
assert_eq(pdf[[('A', '0'), ('B', '1')]], ddf[[('A', '0'), ('B', '1')]])
def test_getitem_string_subclass():
df = pd.DataFrame({'column_1': list(range(10))})
ddf = dd.from_pandas(df, npartitions=3)
class string_subclass(str):
pass
column_1 = string_subclass('column_1')
assert_eq(df[column_1], ddf[column_1])
@pytest.mark.parametrize('col_type', [list, np.array, pd.Series, pd.Index])
def test_getitem_column_types(col_type):
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
ddf = dd.from_pandas(df, 2)
cols = col_type(['C', 'A', 'B'])
assert_eq(df[cols], ddf[cols])
def test_ipython_completion():
df = pd.DataFrame({'a': [1], 'b': [2]})
ddf = dd.from_pandas(df, npartitions=1)
completions = ddf._ipython_key_completions_()
assert 'a' in completions
assert 'b' in completions
assert 'c' not in completions
def test_diff():
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.diff(), df.diff())
assert_eq(ddf.diff(0), df.diff(0))
assert_eq(ddf.diff(2), df.diff(2))
assert_eq(ddf.diff(-2), df.diff(-2))
assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))
assert_eq(ddf.a.diff(), df.a.diff())
assert_eq(ddf.a.diff(0), df.a.diff(0))
assert_eq(ddf.a.diff(2), df.a.diff(2))
assert_eq(ddf.a.diff(-2), df.a.diff(-2))
assert ddf.diff(2)._name == ddf.diff(2)._name
assert ddf.diff(2)._name != ddf.diff(3)._name
pytest.raises(TypeError, lambda: ddf.diff(1.5))
def test_shift():
df = tm.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
# DataFrame
assert_eq(ddf.shift(), df.shift())
assert_eq(ddf.shift(0), df.shift(0))
assert_eq(ddf.shift(2), df.shift(2))
assert_eq(ddf.shift(-2), df.shift(-2))
assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))
# Series
assert_eq(ddf.A.shift(), df.A.shift())
assert_eq(ddf.A.shift(0), df.A.shift(0))
assert_eq(ddf.A.shift(2), df.A.shift(2))
assert_eq(ddf.A.shift(-2), df.A.shift(-2))
with pytest.raises(TypeError):
ddf.shift(1.5)
def test_shift_with_freq():
df = tm.makeTimeDataFrame(30)
# DatetimeIndex
for data_freq, divs1 in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(tm.makeDateIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq, divs2 in [('S', True), ('W', False),
(pd.Timedelta(10, unit='h'), True)]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions == divs2
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs1
# PeriodIndex
for data_freq, divs in [('B', False), ('D', True), ('H', True)]:
df = df.set_index(pd.period_range('2000-01-01', periods=30,
freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for d, p in [(ddf, df), (ddf.A, df.A)]:
res = d.shift(2, freq=data_freq)
assert_eq(res, p.shift(2, freq=data_freq))
assert res.known_divisions == divs
# PeriodIndex.shift doesn't have `freq` parameter
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs
with pytest.raises(ValueError):
ddf.index.shift(2, freq='D') # freq keyword not supported
# TimedeltaIndex
for data_freq in ['T', 'D', 'H']:
df = df.set_index(tm.makeTimedeltaIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq in ['S', pd.Timedelta(10, unit='h')]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions
# Other index types error
df = tm.makeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq='S'))
pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))
@pytest.mark.parametrize('method', ['first', 'last'])
def test_first_and_last(method):
f = lambda x, offset: getattr(x, method)(offset)
freqs = ['12h', 'D']
offsets = ['0d', '100h', '20d', '20B', '3W', '3M', '400d', '13M']
for freq in freqs:
index = pd.date_range('1/1/2000', '1/1/2001', freq=freq)[::4]
df = pd.DataFrame(np.random.random((len(index), 4)), index=index,
columns=['A', 'B', 'C', 'D'])
ddf = dd.from_pandas(df, npartitions=10)
for offset in offsets:
assert_eq(f(ddf, offset), f(df, offset))
assert_eq(f(ddf.A, offset), f(df.A, offset))
@pytest.mark.parametrize('npartitions', [1, 4, 20])
@pytest.mark.parametrize('split_every', [2, 5])
@pytest.mark.parametrize('split_out', [None, 1, 5, 20])
def test_hash_split_unique(npartitions, split_every, split_out):
from string import ascii_lowercase
s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))
ds = dd.from_pandas(s, npartitions=npartitions)
dropped = ds.unique(split_every=split_every, split_out=split_out)
dsk = dropped.__dask_optimize__(dropped.dask, dropped.__dask_keys__())
from dask.core import get_deps
dependencies, dependents = get_deps(dsk)
assert len([k for k, v in dependencies.items() if not v]) == npartitions
assert dropped.npartitions == (split_out or 1)
assert sorted(dropped.compute(scheduler='sync')) == sorted(s.unique())
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_drop_duplicates(split_every):
x = np.concatenate([np.arange(10)] * 100)[:, None]
y = x.copy()
z = np.concatenate([np.arange(20)] * 50)[:, None]
rs = np.random.RandomState(1)
rs.shuffle(x)
rs.shuffle(y)
rs.shuffle(z)
df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=['x', 'y', 'z'])
ddf = dd.from_pandas(df, npartitions=20)
for subset, keep in product([None, ['x', 'z']], ['first', 'last']):
sol = df.drop_duplicates(subset=subset, keep=keep)
res = ddf.drop_duplicates(subset=subset, keep=keep,
split_every=split_every, split_out=10)
assert res.npartitions == 10
assert_eq(sol, res)
@pytest.mark.parametrize('split_every', [None, 2])
def test_split_out_value_counts(split_every):
df = pd.DataFrame({'x': [1, 2, 3] * 100})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10
assert_eq(ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts())
def test_values():
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
ddf = dd.from_pandas(df, 2)
assert_eq(df.values, ddf.values)
assert_eq(df.x.values, ddf.x.values)
assert_eq(df.y.values, ddf.y.values)
assert_eq(df.index.values, ddf.index.values)
def test_copy():
df = pd.DataFrame({'x': [1, 2, 3]})
a = dd.from_pandas(df, npartitions=2)
b = a.copy()
a['y'] = a.x * 2
assert_eq(b, df)
df['y'] = df.x * 2
def test_del():
df = pd.DataFrame({'x': ['a', 'b', 'c', 'd'],
'y': [2, 3, 4, 5]},
index=pd.Index([1., 2., 3., 4.], name='ind'))
a = dd.from_pandas(df, 2)
b = a.copy()
del a['x']
assert_eq(b, df)
del df['x']
assert_eq(a, df)
@pytest.mark.parametrize('index', [True, False])
@pytest.mark.parametrize('deep', [True, False])
def test_memory_usage(index, deep):
df = pd.DataFrame({'x': [1, 2, 3],
'y': [1.0, 2.0, 3.0],
'z': ['a', 'b', 'c']})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.memory_usage(index=index, deep=deep),
ddf.memory_usage(index=index, deep=deep))
assert (df.x.memory_usage(index=index, deep=deep) ==
ddf.x.memory_usage(index=index, deep=deep).compute())
@pytest.mark.parametrize('reduction', ['sum', 'mean', 'std', 'var', 'count',
'min', 'max', 'idxmin', 'idxmax',
'prod', 'all', 'sem'])
def test_dataframe_reductions_arithmetic(reduction):
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [1.1, 2.2, 3.3, 4.4, 5.5]})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf - (getattr(ddf, reduction)() + 1),
df - (getattr(df, reduction)() + 1))
def test_datetime_loc_open_slicing():
dtRange = pd.date_range('01.01.2015','05.05.2015')
df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(df.loc[:'02.02.2015'], ddf.loc[:'02.02.2015'])
assert_eq(df.loc['02.02.2015':], ddf.loc['02.02.2015':])
assert_eq(df[0].loc[:'02.02.2015'], ddf[0].loc[:'02.02.2015'])
assert_eq(df[0].loc['02.02.2015':], ddf[0].loc['02.02.2015':])
def test_to_datetime():
df = pd.DataFrame({'year': [2015, 2016],
'month': [2, 3],
'day': [4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))
s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 100)
ds = dd.from_pandas(s, npartitions=10)
assert_eq(pd.to_datetime(s, infer_datetime_format=True),
dd.to_datetime(ds, infer_datetime_format=True))
def test_to_timedelta():
s = pd.Series(range(10))
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s), dd.to_timedelta(ds))
assert_eq(pd.to_timedelta(s, unit='h'), dd.to_timedelta(ds, unit='h'))
s = pd.Series([1, 2, 'this will error'])
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s, errors='coerce'),
dd.to_timedelta(ds, errors='coerce'))
@pytest.mark.skipif(PANDAS_VERSION < '0.22.0',
reason="No isna method")
@pytest.mark.parametrize('values', [[np.NaN, 0], [1, 1]])
def test_isna(values):
s = pd.Series(values)
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.isna(s), dd.isna(ds))
@pytest.mark.parametrize('drop', [0, 9])
def test_slice_on_filtered_boundary(drop):
# https://github.com/dask/dask/issues/2211
x = np.arange(10)
x[[5, 6]] -= 2
df = pd.DataFrame({"A": x, "B": np.arange(len(x))})
pdf = df.set_index("A").query("B != {}".format(drop))
ddf = dd.from_pandas(df, 1).set_index("A").query("B != {}".format(drop))
result = dd.concat([ddf, ddf.rename(columns={"B": "C"})], axis=1)
expected = pd.concat([pdf, pdf.rename(columns={"B": "C"})], axis=1)
assert_eq(result, expected)
def test_boundary_slice_nonmonotonic():
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, 0, 4)
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -1, 4)
expected = df.drop(-2)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3.5)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 4)
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('start, stop, right_boundary, left_boundary, drop', [
(-1, None, False, False, [-1, -2]),
(-1, None, False, True, [-2]),
(None, 3, False, False, [3, 4]),
(None, 3, True, False, [4]),
# Missing keys
(-.5, None, False, False, [-1, -2]),
(-.5, None, False, True, [-1, -2]),
(-1.5, None, False, True, [-2]),
(None, 3.5, False, False, [4]),
(None, 3.5, True, False, [4]),
(None, 2.5, False, False, [3, 4]),
])
def test_with_boundary(start, stop, right_boundary, left_boundary, drop):
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, start, stop, right_boundary, left_boundary)
expected = df.drop(drop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('index, left, right', [
(range(10), 0, 9),
(range(10), -1, None),
(range(10), None, 10),
([-1, 0, 2, 1], None, None),
([-1, 0, 2, 1], -1, None),
([-1, 0, 2, 1], None, 2),
([-1, 0, 2, 1], -2, 3),
(pd.date_range("2017", periods=10), None, None),
(pd.date_range("2017", periods=10), pd.Timestamp("2017"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2017-01-10")),
(pd.date_range("2017", periods=10), pd.Timestamp("2016"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2018")),
])
def test_boundary_slice_same(index, left, right):
df = pd.DataFrame({"A": range(len(index))}, index=index)
result = methods.boundary_slice(df, left, right)
tm.assert_frame_equal(result, df)
def test_better_errors_object_reductions():
# GH2452
s = pd.Series(['a', 'b', 'c', 'd'])
ds = dd.from_pandas(s, npartitions=2)
with pytest.raises(ValueError) as err:
ds.mean()
assert str(err.value) == "`mean` not supported with object series"
def test_sample_empty_partitions():
@dask.delayed
def make_df(n):
return pd.DataFrame(np.zeros((n, 4)), columns=list('abcd'))
ddf = dd.from_delayed([make_df(0), make_df(100), make_df(0)])
ddf2 = ddf.sample(frac=0.2)
# smoke test sample on empty partitions
res = ddf2.compute()
assert res.dtypes.equals(ddf2.dtypes)
def test_coerce():
df = pd.DataFrame(np.arange(100).reshape((10,10)))
ddf = dd.from_pandas(df, npartitions=2)
funcs = (int, float, complex)
for d,t in product(funcs,(ddf, ddf[0])):
pytest.raises(TypeError, lambda: t(d))
def test_bool():
df = pd.DataFrame(np.arange(100).reshape((10,10)))
ddf = dd.from_pandas(df, npartitions=2)
conditions = [ddf, ddf[0], ddf == ddf, ddf[0] == ddf[0]]
for cond in conditions:
with pytest.raises(ValueError):
bool(cond)
def test_cumulative_multiple_columns():
# GH 3037
df = pd.DataFrame(np.random.randn(100, 5), columns=list('abcde'))
ddf = dd.from_pandas(df, 5)
for d in [ddf, df]:
for c in df.columns:
d[c + 'cs'] = d[c].cumsum()
d[c + 'cmin'] = d[c].cummin()
d[c + 'cmax'] = d[c].cummax()
d[c + 'cp'] = d[c].cumprod()
assert_eq(ddf, df)
@pytest.mark.parametrize('func', [np.asarray, M.to_records])
def test_map_partition_array(func):
from dask.array.utils import assert_eq
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [6.0, 7.0, 8.0, 9.0, 10.0]},
index=['a', 'b', 'c', 'd', 'e'])
ddf = dd.from_pandas(df, npartitions=2)
for pre in [lambda a: a,
lambda a: a.x,
lambda a: a.y,
lambda a: a.index]:
try:
expected = func(pre(df))
except Exception:
continue
x = pre(ddf).map_partitions(func)
assert_eq(x, expected)
assert isinstance(x, da.Array)
assert x.chunks[0] == (np.nan, np.nan)
def test_map_partition_sparse():
sparse = pytest.importorskip('sparse')
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [6.0, 7.0, 8.0, 9.0, 10.0]},
index=['a', 'b', 'c', 'd', 'e'])
ddf = dd.from_pandas(df, npartitions=2)
def f(d):
return sparse.COO(np.array(d))
for pre in [lambda a: a,
lambda a: a.x]:
expected = f(pre(df))
result = pre(ddf).map_partitions(f)
assert isinstance(result, da.Array)
computed = result.compute()
assert (computed.data == expected.data).all()
assert (computed.coords == expected.coords).all()
def test_mixed_dask_array_operations():
df = pd.DataFrame({'x': [1, 2, 3]}, index=[4, 5, 6])
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.x + df.x.values,
ddf.x + ddf.x.values)
assert_eq(df.x.values + df.x,
ddf.x.values + ddf.x)
assert_eq(df.x + df.index.values,
ddf.x + ddf.index.values)
assert_eq(df.index.values + df.x,
ddf.index.values + ddf.x)
def test_mixed_dask_array_operations_errors():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5]}, index=[4, 5, 6, 7, 8])
ddf = dd.from_pandas(df, npartitions=2)
x = da.arange(5, chunks=((1, 4),))
x._chunks = ((np.nan, np.nan),)
with pytest.raises(ValueError):
(ddf.x + x).compute()
x = da.arange(5, chunks=((2, 2, 1),))
with pytest.raises(ValueError) as info:
ddf.x + x
assert 'add' in str(info.value)
def test_mixed_dask_array_multi_dimensional():
df = pd.DataFrame({'x': [1, 2, 3, 4, 5],
'y': [5., 6., 7., 8., 9.]},
columns=['x', 'y'])
ddf = dd.from_pandas(df, npartitions=2)
x = (df.values + 1).astype(float)
dx = (ddf.values + 1).astype(float)
assert_eq(ddf + dx + 1, df + x + 1)
assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)
assert_eq(ddf[['y', 'x']] + dx + 1, df[['y', 'x']] + x + 1)
def test_meta_raises():
# Raise when we use a user defined fucntion
s = pd.Series(['abcd', 'abcd'])
ds = dd.from_pandas(s, npartitions=2)
try:
ds.map(lambda x: x[3])
except ValueError as e:
assert "meta=" in str(e)
# But not otherwise
df = pd.DataFrame({'a': ['x', 'y', 'y'],
'b': ['x', 'y', 'z'],
'c': [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(Exception) as info:
ddf.a + ddf.c
assert "meta=" not in str(info.value)
def test_dask_dataframe_holds_scipy_sparse_containers():
sparse = pytest.importorskip('scipy.sparse')
da = pytest.importorskip('dask.array')
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
df = dd.from_dask_array(x)
y = df.map_partitions(sparse.csr_matrix)
assert isinstance(y, da.Array)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler='single-threaded')
assert all(isinstance(v, sparse.csr_matrix) for v in values)
|
gpl-3.0
|
mohanprasath/Course-Work
|
certifications/code/titanic_survival_exploration/visuals.py
|
10
|
5825
|
###########################################
# Suppress matplotlib user warnings
# Necessary for newer version of matplotlib
import warnings
warnings.filterwarnings("ignore", category = UserWarning, module = "matplotlib")
#
# Display inline matplotlib plots with IPython
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib', 'inline')
###########################################
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def filter_data(data, condition):
"""
Remove elements that do not match the condition provided.
Takes a data list as input and returns a filtered list.
Conditions should be a list of strings of the following format:
'<field> <op> <value>'
where the following operations are valid: >, <, >=, <=, ==, !=
Example: ["Sex == 'male'", 'Age < 18']
"""
field, op, value = condition.split(" ")
# convert value into number or strip excess quotes if string
try:
value = float(value)
except:
value = value.strip("\'\"")
# get booleans for filtering
if op == ">":
matches = data[field] > value
elif op == "<":
matches = data[field] < value
elif op == ">=":
matches = data[field] >= value
elif op == "<=":
matches = data[field] <= value
elif op == "==":
matches = data[field] == value
elif op == "!=":
matches = data[field] != value
else: # catch invalid operation codes
raise Exception("Invalid comparison operator. Only >, <, >=, <=, ==, != allowed.")
# filter data and outcomes
data = data[matches].reset_index(drop = True)
return data
def survival_stats(data, outcomes, key, filters = []):
"""
Print out selected statistics regarding survival, given a feature of
interest and any number of filters (including no filters)
"""
# Check that the key exists
if key not in data.columns.values :
print "'{}' is not a feature of the Titanic data. Did you spell something wrong?".format(key)
return False
# Return the function before visualizing if 'Cabin' or 'Ticket'
# is selected: too many unique categories to display
if(key == 'Cabin' or key == 'PassengerId' or key == 'Ticket'):
print "'{}' has too many unique categories to display! Try a different feature.".format(key)
return False
# Merge data and outcomes into single dataframe
all_data = pd.concat([data, outcomes], axis = 1)
# Apply filters to data
for condition in filters:
all_data = filter_data(all_data, condition)
# Create outcomes DataFrame
all_data = all_data[[key, 'Survived']]
# Create plotting figure
plt.figure(figsize=(8,6))
# 'Numerical' features
if(key == 'Age' or key == 'Fare'):
# Remove NaN values from Age data
all_data = all_data[~np.isnan(all_data[key])]
# Divide the range of data into bins and count survival rates
min_value = all_data[key].min()
max_value = all_data[key].max()
value_range = max_value - min_value
# 'Fares' has larger range of values than 'Age' so create more bins
if(key == 'Fare'):
bins = np.arange(0, all_data['Fare'].max() + 20, 20)
if(key == 'Age'):
bins = np.arange(0, all_data['Age'].max() + 10, 10)
# Overlay each bin's survival rates
nonsurv_vals = all_data[all_data['Survived'] == 0][key].reset_index(drop = True)
surv_vals = all_data[all_data['Survived'] == 1][key].reset_index(drop = True)
plt.hist(nonsurv_vals, bins = bins, alpha = 0.6,
color = 'red', label = 'Did not survive')
plt.hist(surv_vals, bins = bins, alpha = 0.6,
color = 'green', label = 'Survived')
# Add legend to plot
plt.xlim(0, bins.max())
plt.legend(framealpha = 0.8)
# 'Categorical' features
else:
# Set the various categories
if(key == 'Pclass'):
values = np.arange(1,4)
if(key == 'Parch' or key == 'SibSp'):
values = np.arange(0,np.max(data[key]) + 1)
if(key == 'Embarked'):
values = ['C', 'Q', 'S']
if(key == 'Sex'):
values = ['male', 'female']
# Create DataFrame containing categories and count of each
frame = pd.DataFrame(index = np.arange(len(values)), columns=(key,'Survived','NSurvived'))
for i, value in enumerate(values):
frame.loc[i] = [value, \
len(all_data[(all_data['Survived'] == 1) & (all_data[key] == value)]), \
len(all_data[(all_data['Survived'] == 0) & (all_data[key] == value)])]
# Set the width of each bar
bar_width = 0.4
# Display each category's survival rates
for i in np.arange(len(frame)):
nonsurv_bar = plt.bar(i-bar_width, frame.loc[i]['NSurvived'], width = bar_width, color = 'r')
surv_bar = plt.bar(i, frame.loc[i]['Survived'], width = bar_width, color = 'g')
plt.xticks(np.arange(len(frame)), values)
plt.legend((nonsurv_bar[0], surv_bar[0]),('Did not survive', 'Survived'), framealpha = 0.8)
# Common attributes for plot formatting
plt.xlabel(key)
plt.ylabel('Number of Passengers')
plt.title('Passenger Survival Statistics With \'%s\' Feature'%(key))
plt.show()
# Report number of passengers with missing values
if sum(pd.isnull(all_data[key])):
nan_outcomes = all_data[pd.isnull(all_data[key])]['Survived']
print "Passengers with missing '{}' values: {} ({} survived, {} did not survive)".format( \
key, len(nan_outcomes), sum(nan_outcomes == 1), sum(nan_outcomes == 0))
|
gpl-3.0
|
wzbozon/scikit-learn
|
sklearn/ensemble/__init__.py
|
217
|
1307
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification and regression.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
|
bsd-3-clause
|
matthew-tucker/mne-python
|
examples/preprocessing/plot_find_ecg_artifacts.py
|
19
|
1304
|
"""
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
|
bsd-3-clause
|
ThomasMiconi/nupic.research
|
projects/wavelet_dataAggregation/runDataAggregationExperiment.py
|
11
|
21206
|
from os.path import isfile, join, exists
import pandas as pd
import numpy as np
from scipy import signal
import numpy.matlib
import csv
import os
import time
os.environ['TZ'] = 'GMT'
time.tzset()
display = True
if display:
import matplotlib.pyplot as plt
plt.close('all')
plt.ion()
def plotWaveletPower(sig, cwtmatr, time_scale, x_range=None, title=''):
"""
Display wavelet transformations along with the original data
:param sig: original sigal
:param cwtmatr: cwt coefficients
:param time_scale: time scales of wavelets
:param x_range: x range of the plot
:param title: title of the plot
"""
if x_range is None:
x_range = range(0, cwtmatr.shape[1])
fig, ax = plt.subplots(nrows=2, ncols=1)
y_time_scale_tick = ['1-sec', '1mins', '5mins', '30mins', '60mins', '2hrs', '4hrs', '12hrs', '1day', '1week']
y_time_scale = [1, 60, 300, 1800, 3600, 7200, 14400, 43200, 86400, 604800]
y_tick = (np.log10(y_time_scale) - np.log10(time_scale[0]) ) / \
(np.log10(time_scale[-1]) - np.log10(time_scale[0])) * (len(time_scale)-1)
good_tick = np.where(np.logical_and(y_tick >= 0, y_tick < len(time_scale)))[0]
y_tick = y_tick[good_tick]
y_time_scale_tick = [y_time_scale_tick[i] for i in good_tick]
ax[0].imshow(np.abs(cwtmatr[:, x_range]), aspect='auto')
ax[0].set_yticks(y_tick)
ax[0].set_yticklabels(y_time_scale_tick)
ax[0].set_xlabel(' Time ')
ax[0].set_title(title)
ax[1].plot(sig[x_range])
ax[1].set_xlabel(' Time ')
ax[1].autoscale(tight=True)
plt.show()
def calculate_cwt(sampling_interval, sig, figDir='./', fileName='./', display=True):
"""
Calculate continuous wavelet transformation (CWT)
Return variance of the cwt coefficients overtime and its cumulative
distribution
:param sampling_interval: sampling interval of the time series
:param sig: value of the time series
:param figDir: directory of cwt plots
:param fileName: name of the dataset, used for determining figDir
:param display: whether to create the cwt plot
"""
t = np.array(range(len(sig)))*sampling_interval
widths = np.logspace(0, np.log10(len(sig)/20), 50)
T = int(widths[-1])
# continulus wavelet transformation with ricker wavelet
cwtmatr = signal.cwt(sig, signal.ricker, widths)
cwtmatr = cwtmatr[:, 4*T:-4*T]
sig = sig[4*T:-4*T]
t = t[4*T:-4*T]
freq = 1/widths.astype('float') / sampling_interval / 4
time_scale = widths * sampling_interval * 4
# variance of wavelet power
cwt_var = np.var(np.abs(cwtmatr), axis=1)
cwt_var = cwt_var/np.sum(cwt_var)
cum_cwt_var = np.cumsum(cwt_var)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
if not exists(figDir):
os.makedirs(figDir)
if display:
# plot wavelet coefficients along with the raw signal
plt.close('all')
plotWaveletPower(sig, cwtmatr, time_scale)
plt.savefig(join(figDir, fileName + 'wavelet_transform.pdf'))
fig, axs = plt.subplots(nrows=2, ncols=1)
ax = axs[0]
ax.plot(time_scale, cwt_var, '-o')
ax.axvline(x=86400, color='c')
ax.axvline(x=604800, color='c')
for _ in xrange(len(local_max)):
ax.axvline(x=time_scale[local_max[_]], color='r')
for _ in xrange(len(strong_local_max)):
ax.axvline(x=time_scale[strong_local_max[_]], color='k')
for _ in xrange(len(local_min)):
ax.axvline(x=time_scale[local_min[_]], color='b')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Variance of Power')
ax.autoscale(tight='True')
ax.set_title(fileName)
ax = axs[1]
ax.plot(time_scale, cum_cwt_var, '-o')
ax.set_xscale('log')
ax.set_xlabel(' Time Scale (sec) ')
ax.set_ylabel(' Accumulated Variance of Power')
ax.autoscale(tight='True')
plt.title(['useTimeOfDay: '+str(useTimeOfDay)+' useDayOfWeek: '+str(useDayOfWeek)])
plt.savefig(join(figDir, fileName + 'aggregation_time_scale.pdf'))
return cum_cwt_var, cwt_var, time_scale
def get_local_maxima(cwt_var, time_scale):
"""
Find local maxima from the wavelet coefficient variance spectrum
A strong maxima is defined as
(1) At least 10% higher than the nearest local minima
(2) Above the baseline value
"""
# peak & valley detection
local_min = (np.diff(np.sign(np.diff(cwt_var))) > 0).nonzero()[0] + 1
local_max = (np.diff(np.sign(np.diff(cwt_var))) < 0).nonzero()[0] + 1
baseline_value = 1.0/len(cwt_var)
dayPeriod = 86400.0
weekPeriod = 604800.0
cwt_var_at_dayPeriod = np.interp(dayPeriod, time_scale, cwt_var)
cwt_var_at_weekPeriod = np.interp(weekPeriod, time_scale, cwt_var)
useTimeOfDay = False
useDayOfWeek = False
strong_local_max = []
for i in xrange(len(local_max)):
left_local_min = np.where(np.less(local_min, local_max[i]))[0]
if len(left_local_min) == 0:
left_local_min = 0
left_local_min_value = cwt_var[0]
else:
left_local_min = local_min[left_local_min[-1]]
left_local_min_value = cwt_var[left_local_min]
right_local_min = np.where(np.greater(local_min, local_max[i]))[0]
if len(right_local_min) == 0:
right_local_min = len(cwt_var)-1
right_local_min_value = cwt_var[-1]
else:
right_local_min = local_min[right_local_min[0]]
right_local_min_value = cwt_var[right_local_min]
local_max_value = cwt_var[local_max[i]]
nearest_local_min_value = np.max(left_local_min_value, right_local_min_value)
if ( (local_max_value - nearest_local_min_value)/nearest_local_min_value > 0.1 and
local_max_value > baseline_value):
strong_local_max.append(local_max[i])
if (time_scale[left_local_min] < dayPeriod and
dayPeriod < time_scale[right_local_min] and
cwt_var_at_dayPeriod > local_max_value/2.0):
# if np.abs(dayPeriod - time_scale[local_max[i]])/dayPeriod < 0.5:
useTimeOfDay = True
if (time_scale[left_local_min] < weekPeriod and
weekPeriod < time_scale[right_local_min] and
cwt_var_at_weekPeriod > local_max_value/2.0):
# if np.abs(weekPeriod - time_scale[local_max[i]])/weekPeriod < 0.5:
useDayOfWeek = True
return useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max
def get_suggested_timescale_and_encoder(timestamp, sig, thresh=0.2):
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig)
(useTimeOfDay, useDayOfWeek, local_min, local_max, strong_local_max) = get_local_maxima(cwt_var, time_scale)
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = str(int(aggregation_time_scale/4))+'S'
return (new_sampling_interval, useTimeOfDay, useDayOfWeek)
def readCSVfiles(fileName):
"""
Read csv data file, the data file must have two columns
with header "timestamp", and "value"
"""
fileReader = csv.reader(open(fileName, 'r'))
fileReader.next() # skip header line
timestamps = []
values = []
for row in fileReader:
timestamps.append(row[0])
values.append(row[1])
timestamps = np.array(timestamps, dtype='datetime64')
values = np.array(values, dtype='float32')
return (timestamps, values)
def writeCSVfiles(fileName, timestamp, value):
"""
write data to csv file,
the data file will have two columns with header "timestamp", and "value"
"""
fileWriter = csv.writer(open(fileName, 'w'))
fileWriter.writerow(['timestamp', 'value'])
for i in xrange(len(timestamp)):
fileWriter.writerow([timestamp[i].astype('O').strftime("%Y-%m-%d %H:%M:%S"),
value[i]])
def resample_data(timestamp, sig, new_sampling_interval, display=False):
"""
Resample time series data at new sampling interval using linear interpolation
Note: the resampling function is using interpolation, it may not be appropriate for aggregation purpose
:param timestamp: timestamp in numpy datetime64 type
:param sig: value of the time series
:param new_sampling_interval: new sampling interrval
"""
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
for sampleI in xrange(nSampleNew):
timestamp_new[sampleI] = timestamp[0] + sampleI * new_sampling_interval
sig_new = np.interp((timestamp_new-timestamp[0]).astype('float32'),
(timestamp-timestamp[0]).astype('float32'), sig)
if display:
plt.figure(3)
plt.plot(timestamp, sig)
plt.plot(timestamp_new, sig_new)
plt.legend(['before resampling', 'after resampling'])
return (timestamp_new, sig_new)
def aggregate_data(thresh_list, dataFile, aggregatedDataPath, waveletDir='./wavelet/', display=False, verbose=0):
"""
Aggregate individual dataset, the aggregated data will be saved at aggregatedDataFile
:param thresh: aggregation threshold
:param dataFile: path of the original datafile
:param aggregatedDataFile: path of the aggregated datafile
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
data_file_dir = dataFile.split('/')
(timestamp, sig) = readCSVfiles(dataFile)
# dt = (timestamp[len(sig)-1] - timestamp[0])/(len(sig)-1)
dt = np.median(np.diff(timestamp))
dt_sec = dt.astype('float32')
# resample the data with homogeneous sampling intervals
(timestamp, sig) = resample_data(timestamp, sig, dt, display=True)
(cum_cwt_var, cwt_var, time_scale) = calculate_cwt(dt_sec, sig,
display=display,
figDir=join(waveletDir, data_file_dir[-2]),
fileName=data_file_dir[-1])
for thresh in thresh_list:
new_data_dir = join(aggregatedDataPath, 'thresh='+str(thresh), data_file_dir[-2])
if not exists(new_data_dir):
os.makedirs(new_data_dir)
new_data_file = join(new_data_dir, data_file_dir[-1])
# determine aggregation time scale
cutoff_time_scale = time_scale[np.where(cum_cwt_var >= thresh)[0][0]]
aggregation_time_scale = cutoff_time_scale/10.0
if aggregation_time_scale < dt_sec*4:
aggregation_time_scale = dt_sec*4
new_sampling_interval = np.timedelta64(int(aggregation_time_scale/4 * 1000), 'ms')
nSampleNew = np.floor((timestamp[-1] - timestamp[0])/new_sampling_interval).astype('int') + 1
timestamp_new = np.empty(nSampleNew, dtype='datetime64[s]')
value_new = np.empty(nSampleNew, dtype='float32')
left_sampleI = 0
new_sampleI = 0
for sampleI in xrange(len(sig)):
if timestamp[sampleI] >= timestamp[0] + new_sampleI * new_sampling_interval:
timestamp_new[new_sampleI] = timestamp[0] + new_sampleI * new_sampling_interval
value_new[new_sampleI] = (np.mean(sig[left_sampleI:sampleI+1]))
left_sampleI = sampleI+1
new_sampleI += 1
writeCSVfiles(new_data_file, timestamp_new, value_new)
if verbose > 0:
print " original length: ", len(sig), "\t file: ", dataFile
print "\t\tthreshold: ", thresh, "\t new length: ", len(value_new)
def aggregate_nab_data(thresh_list, dataPath='data/',
aggregatedDataPath='data_aggregate/',
waveletDir='wavelet/',
verbose=0):
"""
Aggregate all NAB data using the wavelet transformation based algorithm
:param thresh_list: threshold of the aggregation, a number in [0, 1)
:param dataPath: path of the original NAB data
:param aggregatedDataPath: path of the aggregated NAB data
:param waveletDir: path of wavelet transformations (for visual inspection)
"""
if not exists(aggregatedDataPath):
os.makedirs(aggregatedDataPath)
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
aggregate_data(thresh_list, datafiles[i], aggregatedDataPath, waveletDir, verbose=verbose)
def get_pre_aggregated_anomaly_score(data_path, result_folder, result_folder_pre_aggregate):
"""
This function transforms anomaly scores on the aggregated data file (in result_folder)
to the original sampling rate of the data (in data_path) before aggregation. The new anomaly
score will be saved to result_folder_pre_aggregate
"""
dataDirs = [join(result_folder, f) for f in os.listdir(result_folder) if not isfile(join(result_folder, f))]
for dir in dataDirs:
resultfiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(resultfiles)):
result_file_dir = resultfiles[i].split('/')
original_data_file = join(data_path, result_file_dir[-2], result_file_dir[-1][8:])
dat = pd.read_csv(original_data_file, header=0, names=['timestamp', 'value'])
result = pd.read_csv(resultfiles[i], header=0,
names=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
time_stamp_pre_aggregation = pd.to_datetime(dat.timestamp)
time_stamp_after_aggregation = pd.to_datetime(result.timestamp)
binary_anomaly_score_pre_aggregation = np.zeros(shape=(len(dat),))
binary_anomaly_score_after_aggregation = np.zeros(shape=(len(result),))
for j in range(len(result)):
if result.anomaly_score[j] > .5:
binary_anomaly_score_after_aggregation[j] = 1
idx_original = np.argmin(abs(time_stamp_pre_aggregation - time_stamp_after_aggregation[j]))
binary_anomaly_score_pre_aggregation[idx_original] = 1
value_pre_aggregation = dat.value.values
raw_score_pre_aggregation = np.zeros(shape=(len(dat),))
label_pre_aggregation = np.zeros(shape=(len(dat),))
# raw_score_pre_aggregation = np.interp(time_stamp_original, time_stamp_after_aggregation, result.raw_score.values)
result_pre_aggregate = pd.DataFrame(np.transpose(np.array([time_stamp_pre_aggregation,
value_pre_aggregation,
binary_anomaly_score_pre_aggregation,
raw_score_pre_aggregation,
label_pre_aggregation])),
columns=['timestamp', 'value', 'anomaly_score', 'raw_score', 'label'])
result_file_dir_pre_aggregate = join(result_folder_pre_aggregate, result_file_dir[-2])
if not exists(result_file_dir_pre_aggregate):
os.makedirs(result_file_dir_pre_aggregate)
result_file_pre_aggregate = join(result_file_dir_pre_aggregate, result_file_dir[-1])
result_pre_aggregate.to_csv(result_file_pre_aggregate, index=False)
print " write pre-aggregated file to ", result_file_pre_aggregate
# compare anomaly scores before and after aggregations for individual files
# plt.figure(2)
# plt.plot(time_stamp_after_aggregation, binary_anomaly_score_after_aggregation)
# plt.plot(time_stamp_pre_aggregation, binary_anomaly_score_pre_aggregation)
def runTimeVsDataLength(dataPath):
"""
Plot Data Aggregation Algorithm Runtime vs length of the data
"""
dataDirs = [join(dataPath, f) for f in os.listdir(dataPath) if not isfile(join(dataPath, f))]
thresh = 0.2
dataLength = []
runTime = []
for dir in dataDirs:
datafiles = [join(dir, f) for f in os.listdir(dir) if isfile(join(dir, f))]
for i in range(len(datafiles)):
(timestamp, sig) = readCSVfiles(datafiles[i])
dataLength.append(len(sig))
start_time = time.time()
aggregate_data([thresh], datafiles[i], aggregatedDataPath='data_aggregate/', display=False)
end_time = time.time()
print " length: ", len(sig), " file: ", datafiles[i], " Time: ", (end_time - start_time)
runTime.append(end_time - start_time)
plt.figure()
plt.plot(dataLength, runTime, '*')
plt.xlabel(' Dataset Size (# Record)')
plt.ylabel(' Runtime (seconds) ')
plt.savefig('RuntimeVsDatasetSize.pdf')
return (dataLength, runTime)
if __name__ == "__main__":
NABPath = '/Users/ycui/nta/NAB/'
currentPath = os.getcwd()
thresh_list = [0, 0.02, 0.04, 0.06, 0.08, 0.1, 0.12, 0.14, 0.16, 0.18, 0.2,
0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.36, 0.38, 0.40]
# step 1: aggregate NAB data with different threshold
print " aggregating NAB data ..."
aggregate_nab_data(thresh_list, dataPath=NABPath+'data/', verbose=2)
# step 2: run HTM on aggregated NAB data
for thresh in thresh_list:
resultsAggregatePath = currentPath + "/results_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(resultsAggregatePath):
os.os.makedirs(resultsAggregatePath)
print " run HTM on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --detect --dataDir " + currentPath + "/data_aggregate/thresh=" + str(thresh) + \
"/ --resultsDir "+ currentPath + "/results_aggregate/thresh=" + str(thresh) + " --skipConfirmation")
# step 3: get pre-aggregated anomaly score
for thresh in thresh_list:
preresultAggregatePath = currentPath + "/results_pre_aggregate/thresh=" + str(thresh) + "/numenta"
if not os.path.exists(preresultAggregatePath):
os.os.makedirs(preresultAggregatePath)
get_pre_aggregated_anomaly_score(data_path=NABPath+'data/',
result_folder='results_aggregate/thresh=' + str(thresh) + '/numenta',
result_folder_pre_aggregate='results_pre_aggregate/thresh=' + str(thresh) + '/numenta')
# step 4: run NAB scoring
for thresh in thresh_list:
print " run scoring on aggregated data with threshold " + str(thresh)
os.system("python " + NABPath + "run.py -d numenta --score --skipConfirmation " +
"--thresholdsFile " + NABPath + "config/thresholds.json " +
"--resultsDir " + currentPath + "/results_pre_aggregate/thresh="+str(thresh)+"/")
# step 5: read & compare scores
standard_score = []
data_length_all = []
for thresh in thresh_list:
scorefile = "./results_pre_aggregate/thresh=" + str(thresh) + "/numenta/numenta_standard_scores.csv"
scoredf = pd.read_csv(scorefile, header=0)
scoredf = scoredf.sort('File')
scoredf.index = range(len(scoredf))
standard_score.append(scoredf.Score.values[:-1])
data_length = []
for i in xrange(len(scoredf.File)-1):
datafile = './data_aggregate/thresh=' + str(thresh) + '/' + scoredf.File[i]
dat = pd.read_csv(datafile, header=0, names=['timestamp', 'value'])
data_length.append(len(dat))
data_length_all.append(data_length)
data_length_all = np.array(data_length_all)
standard_score = np.array(standard_score)
short_dat = np.where(data_length_all[0, :] < 1000)[0]
long_dat = np.where(data_length_all[0, :] > 1000)[0]
use_dat = np.array(range(data_length_all.shape[1]))
use_dat = long_dat
# plt.imshow(data_length_all, interpolation='nearest', aspect='auto')
# plot anomaly score vs aggregation threshold
anomaly_score_diff = standard_score[:, long_dat] - numpy.matlib.repmat(standard_score[0, long_dat], len(thresh_list), 1)
shortFileName = []
for i in range(len(scoredf.File.values[:-1])):
file = scoredf.File.values[i]
fileName = file.split('/')[-1]
fileName = fileName[:-4]
shortFileName.append(fileName)
fig=plt.figure()
plt.imshow(anomaly_score_diff, interpolation='nearest', aspect='auto')
ytickLoc = range(len(thresh_list))
plt.yticks(ytickLoc, thresh_list)
plt.xticks(range(len(scoredf.File)-1), shortFileName, rotation='vertical')
plt.subplots_adjust(bottom=0.6)
plt.ylabel(' Threshold')
plt.title(' Anomaly Score Relative to BaseLine')
plt.colorbar()
plt.clim(-2, 2)
plt.savefig('AnomalyScore_Vs_AggregationThreshold_EachFile.pdf')
plt.figure()
plt.subplot(2, 1, 1)
plt.plot(np.array(thresh_list)*100, np.median(standard_score[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(standard_score[:, use_dat], 1), '-o')
plt.legend(['Median', 'Mean'])
plt.xlabel(' Threshold (%)')
plt.ylabel(' Median Anomaly Score ')
plt.subplot(2, 1, 2)
plt.plot(np.array(thresh_list)*100, np.median(data_length_all[:, use_dat], 1), '-o')
plt.plot(np.array(thresh_list)*100, np.mean(data_length_all[:, use_dat], 1), '-o')
plt.xlabel(' Threshold (%)')
plt.ylabel(' Data Length ')
plt.legend(['Median', 'Mean'])
plt.savefig('AnomalyScore_Vs_AggregationThreshold.pdf')
num_better_anomaly_score = []
for i in xrange(len(thresh_list)-1):
num_better_anomaly_score.append(len(np.where(standard_score[i+1, :] > standard_score[0, :])[0]))
(dataLength, runTime) = runTimeVsDataLength(dataPath=NABPath+'data/')
|
agpl-3.0
|
quantumlib/Cirq
|
examples/examples_test.py
|
1
|
8983
|
import itertools
import numpy as np
import pytest
import matplotlib.pyplot as plt
import cirq
import examples.basic_arithmetic
import examples.bb84
import examples.bell_inequality
import examples.bernstein_vazirani
import examples.bcs_mean_field
import examples.cross_entropy_benchmarking_example
import examples.deutsch
import examples.grover
import examples.heatmaps
import examples.hello_qubit
import examples.hhl
import examples.hidden_shift_algorithm
import examples.noisy_simulation_example
import examples.phase_estimator
import examples.place_on_bristlecone
import examples.qaoa
import examples.quantum_fourier_transform
import examples.quantum_teleportation
import examples.qubit_characterizations_example
import examples.shor
import examples.simon_algorithm
import examples.superdense_coding
import examples.swap_networks
from examples.shors_code import OneQubitShorsCode
def test_example_runs_bernstein_vazirani():
examples.bernstein_vazirani.main(qubit_count=3)
# Check empty oracle case. Cover both biases.
a = cirq.NamedQubit('a')
assert list(examples.bernstein_vazirani.make_oracle([], a, [], False)) == []
assert list(examples.bernstein_vazirani.make_oracle([], a, [], True)) == [cirq.X(a)]
def test_example_runs_simon():
examples.simon_algorithm.main()
def test_example_runs_hidden_shift():
examples.hidden_shift_algorithm.main()
def test_example_runs_deutsch():
examples.deutsch.main()
def test_example_runs_hello_line():
pytest.importorskip("cirq_google")
examples.place_on_bristlecone.main()
def test_example_runs_hello_qubit():
examples.hello_qubit.main()
def test_example_runs_bell_inequality():
examples.bell_inequality.main()
def test_example_runs_bb84():
examples.bb84.main()
def test_example_runs_quantum_fourier_transform():
examples.quantum_fourier_transform.main()
def test_example_runs_bcs_mean_field():
pytest.importorskip("cirq_google")
examples.bcs_mean_field.main()
def test_example_runs_grover():
examples.grover.main()
def test_example_runs_basic_arithmetic():
examples.basic_arithmetic.main(n=2)
def test_example_runs_phase_estimator():
examples.phase_estimator.main(qnums=(2,), repetitions=2)
def test_example_heatmaps():
pytest.importorskip("cirq_google")
plt.switch_backend('agg')
examples.heatmaps.main()
def test_example_runs_qaoa():
examples.qaoa.main(repetitions=10, maxiter=5)
def test_example_runs_quantum_teleportation():
_, teleported = examples.quantum_teleportation.main(seed=12)
assert np.allclose(np.array([0.07023552, -0.9968105, -0.03788921]), teleported)
def test_example_runs_superdense_coding():
examples.superdense_coding.main()
def test_example_runs_hhl():
examples.hhl.main()
def test_example_runs_qubit_characterizations():
examples.qubit_characterizations_example.main(
minimum_cliffords=2, maximum_cliffords=6, cliffords_step=2
)
def test_example_swap_networks():
examples.swap_networks.main()
def test_example_cross_entropy_benchmarking():
examples.cross_entropy_benchmarking_example.main(
repetitions=10, num_circuits=2, cycles=[2, 3, 4]
)
def test_example_noisy_simulation():
examples.noisy_simulation_example.main()
def test_example_shor_modular_exp_register_size():
with pytest.raises(ValueError):
_ = examples.shor.ModularExp(
target=cirq.LineQubit.range(2), exponent=cirq.LineQubit.range(2, 5), base=4, modulus=5
)
def test_example_shor_modular_exp_register_type():
operation = examples.shor.ModularExp(
target=cirq.LineQubit.range(3), exponent=cirq.LineQubit.range(3, 5), base=4, modulus=5
)
with pytest.raises(ValueError):
_ = operation.with_registers(cirq.LineQubit.range(3))
with pytest.raises(ValueError):
_ = operation.with_registers(1, cirq.LineQubit.range(3, 6), 4, 5)
with pytest.raises(ValueError):
_ = operation.with_registers(
cirq.LineQubit.range(3), cirq.LineQubit.range(3, 6), cirq.LineQubit.range(6, 9), 5
)
with pytest.raises(ValueError):
_ = operation.with_registers(
cirq.LineQubit.range(3), cirq.LineQubit.range(3, 6), 4, cirq.LineQubit.range(6, 9)
)
def test_example_shor_modular_exp_registers():
target = cirq.LineQubit.range(3)
exponent = cirq.LineQubit.range(3, 5)
operation = examples.shor.ModularExp(target, exponent, 4, 5)
assert operation.registers() == (target, exponent, 4, 5)
new_target = cirq.LineQubit.range(5, 8)
new_exponent = cirq.LineQubit.range(8, 12)
new_operation = operation.with_registers(new_target, new_exponent, 6, 7)
assert new_operation.registers() == (new_target, new_exponent, 6, 7)
def test_example_shor_modular_exp_diagram():
target = cirq.LineQubit.range(3)
exponent = cirq.LineQubit.range(3, 5)
operation = examples.shor.ModularExp(target, exponent, 4, 5)
circuit = cirq.Circuit(operation)
cirq.testing.assert_has_diagram(
circuit,
"""
0: ───ModularExp(t*4**e % 5)───
│
1: ───t1───────────────────────
│
2: ───t2───────────────────────
│
3: ───e0───────────────────────
│
4: ───e1───────────────────────
""",
)
operation = operation.with_registers(target, 2, 4, 5)
circuit = cirq.Circuit(operation)
cirq.testing.assert_has_diagram(
circuit,
"""
0: ───ModularExp(t*4**2 % 5)───
│
1: ───t1───────────────────────
│
2: ───t2───────────────────────
""",
)
def assert_order(r: int, x: int, n: int) -> None:
"""Assert that r is the order of x modulo n."""
y = x
for _ in range(1, r):
assert y % n != 1
y *= x
assert y % n == 1
@pytest.mark.parametrize(
'x, n', ((2, 3), (5, 6), (2, 7), (6, 7), (5, 8), (6, 11), (6, 49), (7, 810))
)
def test_example_shor_naive_order_finder(x, n):
r = examples.shor.naive_order_finder(x, n)
assert_order(r, x, n)
@pytest.mark.parametrize('x, n', ((2, 3), (5, 6), (2, 7), (6, 7)))
def test_example_shor_quantum_order_finder(x, n):
r = None
for _ in range(15):
r = examples.shor.quantum_order_finder(x, n)
if r is not None:
break
assert_order(r, x, n)
@pytest.mark.parametrize('x, n', ((1, 7), (7, 7)))
def test_example_shor_naive_order_finder_invalid_x(x, n):
with pytest.raises(ValueError):
_ = examples.shor.naive_order_finder(x, n)
@pytest.mark.parametrize('x, n', ((1, 7), (7, 7)))
def test_example_shor_quantum_order_finder_invalid_x(x, n):
with pytest.raises(ValueError):
_ = examples.shor.quantum_order_finder(x, n)
@pytest.mark.parametrize('n', (4, 6, 15, 125, 101 * 103, 127 * 127))
def test_example_shor_find_factor_with_composite_n_and_naive_order_finder(n):
d = examples.shor.find_factor(n, examples.shor.naive_order_finder)
assert 1 < d < n
assert n % d == 0
@pytest.mark.parametrize('n', (4, 6, 15, 125))
def test_example_shor_find_factor_with_composite_n_and_quantum_order_finder(n):
d = examples.shor.find_factor(n, examples.shor.quantum_order_finder)
assert 1 < d < n
assert n % d == 0
@pytest.mark.parametrize(
'n, order_finder',
itertools.product(
(2, 3, 5, 11, 101, 127, 907),
(examples.shor.naive_order_finder, examples.shor.quantum_order_finder),
),
)
def test_example_shor_find_factor_with_prime_n(n, order_finder):
d = examples.shor.find_factor(n, order_finder)
assert d is None
@pytest.mark.parametrize('n', (2, 3, 15, 17, 2 ** 89 - 1))
def test_example_runs_shor_valid(n):
examples.shor.main(n=n)
@pytest.mark.parametrize('n', (-1, 0, 1))
def test_example_runs_shor_invalid(n):
with pytest.raises(ValueError):
examples.shor.main(n=n)
def test_example_qec_single_qubit():
mycode1 = OneQubitShorsCode()
my_circuit1 = cirq.Circuit(mycode1.encode())
my_circuit1 += cirq.Circuit(mycode1.correct())
my_circuit1 += cirq.measure(mycode1.physical_qubits[0])
sim1 = cirq.DensityMatrixSimulator()
result1 = sim1.run(my_circuit1, repetitions=1)
assert result1.measurements['0'] == [[0]]
mycode2 = OneQubitShorsCode()
my_circuit2 = cirq.Circuit(mycode2.apply_gate(cirq.X, 0))
with pytest.raises(IndexError):
mycode2.apply_gate(cirq.Z, 89)
my_circuit2 += cirq.Circuit(mycode2.encode())
my_circuit2 += cirq.Circuit(mycode2.correct())
my_circuit2 += cirq.measure(mycode2.physical_qubits[0])
sim2 = cirq.DensityMatrixSimulator()
result2 = sim2.run(my_circuit2, repetitions=1)
assert result2.measurements['0'] == [[1]]
|
apache-2.0
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/sklearn/tests/test_common.py
|
39
|
6031
|
"""
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import re
import pkgutil
from sklearn.externals.six import PY3
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import _named_check
import sklearn
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
_yield_all_checks,
check_parameters_default_constructible,
check_class_weight_balanced_linear_classifier)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, cloneable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield (_named_check(check_parameters_default_constructible, name),
name, Estimator)
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.startswith("_"):
continue
for check in _yield_all_checks(name, Estimator):
yield _named_check(check, name), name, Estimator
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_balanced_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if ('class_weight' in clazz().get_params().keys() and
issubclass(clazz, LinearClassifierMixin))]
for name, Classifier in linear_classifiers:
yield _named_check(check_class_weight_balanced_linear_classifier,
name), name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_all_tests_are_importable():
# Ensure that for each contentful subpackage, there is a test directory
# within it that is also a subpackage (i.e. a directory with __init__.py)
HAS_TESTS_EXCEPTIONS = re.compile(r'''(?x)
\.externals(\.|$)|
\.tests(\.|$)|
\._
''')
lookup = dict((name, ispkg)
for _, name, ispkg
in pkgutil.walk_packages(sklearn.__path__,
prefix='sklearn.'))
missing_tests = [name for name, ispkg in lookup.items()
if ispkg
and not HAS_TESTS_EXCEPTIONS.search(name)
and name + '.tests' not in lookup]
assert_equal(missing_tests, [],
'{0} do not have `tests` subpackages. Perhaps they require '
'__init__.py or an add_subpackage directive in the parent '
'setup.py'.format(missing_tests))
|
mit
|
pkruskal/scikit-learn
|
sklearn/feature_extraction/text.py
|
110
|
50157
|
# -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
|
bsd-3-clause
|
lthurlow/Network-Grapher
|
proj/external/matplotlib-1.2.1/examples/units/bar_demo2.py
|
9
|
1062
|
"""
plot using a variety of cm vs inches conversions. The example shows
how default unit instrospection works (ax1), how various keywords can
be used to set the x and y units to override the defaults (ax2, ax3,
ax4) and how one can set the xlimits using scalars (ax3, current units
assumed) or units (conversions applied to get the numbers to current
units)
"""
import numpy as np
from basic_units import cm, inch
import matplotlib.pyplot as plt
cms = cm *np.arange(0, 10, 2)
bottom=0*cm
width=0.8*cm
fig = plt.figure()
ax1 = fig.add_subplot(2,2,1)
ax1.bar(cms, cms, bottom=bottom)
ax2 = fig.add_subplot(2,2,2)
ax2.bar(cms, cms, bottom=bottom, width=width, xunits=cm, yunits=inch)
ax3 = fig.add_subplot(2,2,3)
ax3.bar(cms, cms, bottom=bottom, width=width, xunits=inch, yunits=cm)
ax3.set_xlim(2, 6) # scalars are interpreted in current units
ax4 = fig.add_subplot(2,2,4)
ax4.bar(cms, cms, bottom=bottom, width=width, xunits=inch, yunits=inch)
#fig.savefig('simple_conversion_plot.png')
ax4.set_xlim(2*cm, 6*cm) # cm are converted to inches
plt.show()
|
mit
|
MartinSavc/scikit-learn
|
doc/datasets/mldata_fixture.py
|
367
|
1183
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org and create a temporary data folder.
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import install_mldata_mock
from sklearn.utils.testing import uninstall_mldata_mock
def globs(globs):
# Create a temporary folder for the data fetcher
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
return globs
def setup_module():
# setup mock urllib2 module to avoid downloading from mldata.org
install_mldata_mock({
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
})
def teardown_module():
uninstall_mldata_mock()
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
fredmorcos/attic
|
projects/pyfuzz-2/plotwidget.py
|
1
|
2297
|
""" Frederic-Gerald Morcos <[email protected]> """
from matplotlib.figure import Figure
from matplotlib.lines import Line2D as Line
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
class PlotWidget:
""" gtk widget class for plotting a graph, based on the matplotlib python
library
"""
def __init__(self, varlist = []):
""" constructor, the first item in the variables list is the original graph,
the rest is just for plotting other functions over the original one such
as the result of the implication function
"""
self.figure = Figure()
self.variables = varlist
def add_var(self, var):
""" adds a variable to the list of variables to be plotted, used to add
the product/minimums of the centroids so they can be plotted too
"""
self.variables.append(var)
def clear(self):
""" clears the list of variables and keeps the first item """
self.figure = Figure()
del self.variables[1:]
def load_plot(self):
""" loads the plot lines from the variables assigned to the class """
x = []
y = []
for v in self.variables:
for i in v.get_all_points():
x.append(i[0])
y.append(i[1])
x.sort()
y.sort()
sp = self.figure.add_subplot(111, title = self.variables[0].label)
""" create a set of points that represent continuous lines
ex: [(x1,y1),(x2,y2)], [(x2,y2),(x3,y3)]
"""
for k, v in enumerate(self.variables):
for i, f in enumerate(v.functions):
fx = []
fy = []
for p in f.points:
fx.append(p[0])
fy.append(p[1])
if i == len(v.functions) - 1:
fx.append(fx[len(fx) - 1] + 10)
fy.append(f.membership(fx[len(fx) - 1]))
if k != 0:
line = Line(fx, fy, linewidth = 2, c = [1, 0, 0])
else:
line = Line(fx, fy, linewidth = 2)
sp.add_line(line)
sp.plot()
sp.axis([x[0], x[len(x) - 1] + 10, y[0], y[len(y) - 1] + 0.5])
def get_canvas_widget(self):
""" returns a gtk widget including the plot to be directly used """
self.load_plot()
canvas = FigureCanvas(self.figure)
canvas.set_size_request(365, 250)
return canvas
|
isc
|
jmargeta/scikit-learn
|
examples/manifold/plot_mds.py
|
261
|
2616
|
"""
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <[email protected]>
# Licence: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20)
plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g')
plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b')
plt.legend(('True position', 'MDS', 'NMDS'), loc='best')
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
#a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
|
bsd-3-clause
|
dr-leo/pandaSDMX
|
pandasdmx/tests/writer/conftest.py
|
1
|
1161
|
import pytest
from pandasdmx.message import StructureMessage
from pandasdmx.model import Agency, Annotation, Code, Codelist
CL_ITEMS = [
dict(id="A", name={"en": "Average of observations through period"}),
dict(id="B", name={"en": "Beginning of period"}),
dict(id="B1", name={"en": "Child code of B"}),
]
@pytest.fixture
def codelist():
"""A Codelist for writer testing."""
ECB = Agency(id="ECB")
cl = Codelist(
id="CL_COLLECTION",
version="1.0",
is_final=False,
is_external_reference=False,
maintainer=ECB,
name={"en": "Collection indicator code list"},
)
# Add items
for info in CL_ITEMS:
cl.items[info["id"]] = Code(**info)
# Add a hierarchical relationship
cl.items["B"].append_child(cl.items["B1"])
# Add an annotation
cl.items["A"].annotations.append(
Annotation(id="A1", type="NOTE", text={"en": "Text annotation on Code A."})
)
return cl
@pytest.fixture
def structuremessage(codelist):
"""A StructureMessage for writer testing."""
sm = StructureMessage()
sm.codelist[codelist.id] = codelist
return sm
|
apache-2.0
|
huongttlan/statsmodels
|
statsmodels/sandbox/examples/try_multiols.py
|
33
|
1243
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
|
bsd-3-clause
|
pv/scikit-learn
|
sklearn/tests/test_lda.py
|
77
|
6258
|
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import make_blobs
from sklearn import lda
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]], dtype='f')
y = np.array([1, 1, 1, 2, 2, 2])
y3 = np.array([1, 1, 2, 2, 3, 3])
# Degenerate data with only one feature (still should be separable)
X1 = np.array([[-2, ], [-1, ], [-1, ], [1, ], [1, ], [2, ]], dtype='f')
solver_shrinkage = [('svd', None), ('lsqr', None), ('eigen', None),
('lsqr', 'auto'), ('lsqr', 0), ('lsqr', 0.43),
('eigen', 'auto'), ('eigen', 0), ('eigen', 0.43)]
def test_lda_predict():
# Test LDA classification.
# This checks that LDA implements fit and predict and returns correct values
# for simple toy data.
for test_case in solver_shrinkage:
solver, shrinkage = test_case
clf = lda.LDA(solver=solver, shrinkage=shrinkage)
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y, 'solver %s' % solver)
# Assert that it works with 1D data
y_pred1 = clf.fit(X1, y).predict(X1)
assert_array_equal(y_pred1, y, 'solver %s' % solver)
# Test probability estimates
y_proba_pred1 = clf.predict_proba(X1)
assert_array_equal((y_proba_pred1[:, 1] > 0.5) + 1, y,
'solver %s' % solver)
y_log_proba_pred1 = clf.predict_log_proba(X1)
assert_array_almost_equal(np.exp(y_log_proba_pred1), y_proba_pred1,
8, 'solver %s' % solver)
# Primarily test for commit 2f34950 -- "reuse" of priors
y_pred3 = clf.fit(X, y3).predict(X)
# LDA shouldn't be able to separate those
assert_true(np.any(y_pred3 != y3), 'solver %s' % solver)
# Test invalid shrinkages
clf = lda.LDA(solver="lsqr", shrinkage=-0.2231)
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="eigen", shrinkage="dummy")
assert_raises(ValueError, clf.fit, X, y)
clf = lda.LDA(solver="svd", shrinkage="auto")
assert_raises(NotImplementedError, clf.fit, X, y)
# Test unknown solver
clf = lda.LDA(solver="dummy")
assert_raises(ValueError, clf.fit, X, y)
def test_lda_coefs():
# Test if the coefficients of the solvers are approximately the same.
n_features = 2
n_classes = 2
n_samples = 1000
X, y = make_blobs(n_samples=n_samples, n_features=n_features,
centers=n_classes, random_state=11)
clf_lda_svd = lda.LDA(solver="svd")
clf_lda_lsqr = lda.LDA(solver="lsqr")
clf_lda_eigen = lda.LDA(solver="eigen")
clf_lda_svd.fit(X, y)
clf_lda_lsqr.fit(X, y)
clf_lda_eigen.fit(X, y)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_lsqr.coef_, 1)
assert_array_almost_equal(clf_lda_svd.coef_, clf_lda_eigen.coef_, 1)
assert_array_almost_equal(clf_lda_eigen.coef_, clf_lda_lsqr.coef_, 1)
def test_lda_transform():
# Test LDA transform.
clf = lda.LDA(solver="svd", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="eigen", n_components=1)
X_transformed = clf.fit(X, y).transform(X)
assert_equal(X_transformed.shape[1], 1)
clf = lda.LDA(solver="lsqr", n_components=1)
clf.fit(X, y)
msg = "transform not implemented for 'lsqr'"
assert_raise_message(NotImplementedError, msg, clf.transform, X)
def test_lda_orthogonality():
# arrange four classes with their means in a kite-shaped pattern
# the longer distance should be transformed to the first component, and
# the shorter distance to the second component.
means = np.array([[0, 0, -1], [0, 2, 0], [0, -2, 0], [0, 0, 5]])
# We construct perfectly symmetric distributions, so the LDA can estimate
# precise means.
scatter = np.array([[0.1, 0, 0], [-0.1, 0, 0], [0, 0.1, 0], [0, -0.1, 0],
[0, 0, 0.1], [0, 0, -0.1]])
X = (means[:, np.newaxis, :] + scatter[np.newaxis, :, :]).reshape((-1, 3))
y = np.repeat(np.arange(means.shape[0]), scatter.shape[0])
# Fit LDA and transform the means
clf = lda.LDA(solver="svd").fit(X, y)
means_transformed = clf.transform(means)
d1 = means_transformed[3] - means_transformed[0]
d2 = means_transformed[2] - means_transformed[1]
d1 /= np.sqrt(np.sum(d1 ** 2))
d2 /= np.sqrt(np.sum(d2 ** 2))
# the transformed within-class covariance should be the identity matrix
assert_almost_equal(np.cov(clf.transform(scatter).T), np.eye(2))
# the means of classes 0 and 3 should lie on the first component
assert_almost_equal(np.abs(np.dot(d1[:2], [1, 0])), 1.0)
# the means of classes 1 and 2 should lie on the second component
assert_almost_equal(np.abs(np.dot(d2[:2], [0, 1])), 1.0)
def test_lda_scaling():
# Test if classification works correctly with differently scaled features.
n = 100
rng = np.random.RandomState(1234)
# use uniform distribution of features to make sure there is absolutely no
# overlap between classes.
x1 = rng.uniform(-1, 1, (n, 3)) + [-10, 0, 0]
x2 = rng.uniform(-1, 1, (n, 3)) + [10, 0, 0]
x = np.vstack((x1, x2)) * [1, 100, 10000]
y = [-1] * n + [1] * n
for solver in ('svd', 'lsqr', 'eigen'):
clf = lda.LDA(solver=solver)
# should be able to separate the data perfectly
assert_equal(clf.fit(x, y).score(x, y), 1.0,
'using covariance: %s' % solver)
def test_covariance():
x, y = make_blobs(n_samples=100, n_features=5,
centers=1, random_state=42)
# make features correlated
x = np.dot(x, np.arange(x.shape[1] ** 2).reshape(x.shape[1], x.shape[1]))
c_e = lda._cov(x, 'empirical')
assert_almost_equal(c_e, c_e.T)
c_s = lda._cov(x, 'auto')
assert_almost_equal(c_s, c_s.T)
|
bsd-3-clause
|
google/telluride_decoding
|
test/attention_decoder_test.py
|
1
|
12551
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for telluride_decoding.attention_decoder."""
from absl.testing import absltest
from absl.testing import parameterized
import matplotlib.axes as axes
import matplotlib.pyplot as plt
import mock
import numpy as np
from telluride_decoding import attention_decoder
class AttentionDecoderPlotTest(absltest.TestCase):
@mock.patch.object(axes.Axes, 'add_patch')
@mock.patch.object(plt, 'fill_between')
@mock.patch.object(plt, 'title')
@mock.patch.object(plt, 'ylabel')
@mock.patch.object(plt, 'xlabel')
@mock.patch.object(plt, 'plot', wraps=plt.plot)
def test_basic(self, mock_plot, mock_xlabel, mock_ylabel, mock_title,
mock_fill_between, mock_add_patch):
ones = np.ones(10)
decision = ones*0.5
xlabel = 'Mock Time (frames)'
ylabel = 'Mock Prob of Speaker 1'
title = 'Mock AAD Decoding Result'
attention_decoder.plot_aad_results(decision, attention_flag=None,
decision_upper=None, decision_lower=None,
t=None, xlabel=xlabel,
ylabel=ylabel,
title=title)
mock_plot.assert_called_once_with(mock.ANY, mock.ANY, 'blue')
mock_xlabel.assert_called_once_with(xlabel)
mock_ylabel.assert_called_once_with(ylabel)
mock_title.assert_called_once_with(title)
mock_fill_between.assert_not_called()
attention_flag = np.array([0, 0, 1, 1, 0, 0, 1, 1, 0, 0])
attention_decoder.plot_aad_results(decision,
attention_flag=attention_flag,
decision_upper=ones*.4,
decision_lower=ones*.6,
t=None, xlabel=xlabel,
ylabel=ylabel,
title=title)
mock_fill_between.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY,
color='lightblue')
# There are two separate segments where the attention_flag variable above
# where attention_flag=1, so we expect just two gray patches in the plot.
self.assertEqual(mock_add_patch.call_count, 2)
class AttentionDecoderPlotErrorTest(parameterized.TestCase):
ones = np.ones(10)
@parameterized.named_parameters(
('bad_decision', True, ones, ones, ones, ones, 'decision'), # pylint: disable=bad-whitespace
('bad_attention', ones, True, ones, ones, ones, 'attention_flag'), # pylint: disable=bad-whitespace
('bad_upper', ones, ones, True, ones, ones, 'decision_upper'), # pylint: disable=bad-whitespace
('bad_lower', ones, ones, ones, True, ones, 'decision_lower'), # pylint: disable=bad-whitespace
('bad_t', ones, ones, ones, ones, True, 't'), # pylint: disable=bad-whitespace
)
def test_bad_param(self, decision, attention, upper, lower, t, var):
with self.assertRaisesRegex(TypeError,
'Argument %s must be an np array, not' % var):
attention_decoder.plot_aad_results(decision=decision,
attention_flag=attention,
decision_upper=upper,
decision_lower=lower,
t=t)
part = ones[:5]
@parameterized.named_parameters(
('bad_attention', ones, part, ones, ones, ones, 'attention_flag'), # pylint: disable=bad-whitespace
('bad_upper', ones, ones, part, ones, ones, 'decision_upper'), # pylint: disable=bad-whitespace
('bad_lower', ones, ones, ones, part, ones, 'decision_lower'), # pylint: disable=bad-whitespace
('bad_t', ones, ones, ones, ones, part, 't'), # pylint: disable=bad-whitespace
)
def test_short_param(self, decision, attention, upper, lower, t, var):
with self.assertRaisesRegex(TypeError,
'Input %s must match length of decision' % var):
attention_decoder.plot_aad_results(decision=decision,
attention_flag=attention,
decision_upper=upper,
decision_lower=lower,
t=t)
class AttentionDecoder(absltest.TestCase):
def test_basics(self):
ad = attention_decoder.AttentionDecoder()
self.assertTrue(ad.attention(0.6, 0.4)[0])
self.assertFalse(ad.attention(0.4, 0.6)[0])
self.assertTrue(ad.attention(0.6*np.ones(5), 0.4*np.ones(5))[0])
self.assertFalse(ad.attention(0.4*np.ones(5), 0.6*np.ones(5))[0])
cor1 = [2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0]
cor2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
desi = [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0]
ad.tune(cor1, cor2)
results = [ad.attention(r1, r2)[0] for (r1, r2) in zip(cor1, cor2)]
np.testing.assert_array_equal(desi, results)
class StepAttentionDecoder(absltest.TestCase):
def test_basics(self):
cor1 = [2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0]
cor2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
desi = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0]
ad = attention_decoder.StepAttentionDecoder()
results = [ad.attention(r1, r2)[0] for (r1, r2) in zip(cor1, cor2)]
np.testing.assert_array_equal(desi, results)
def test_short_switch(self):
cor1 = [2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
cor2 = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
desi = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
ad = attention_decoder.StepAttentionDecoder()
results = [ad.attention(r1, r2)[0] for (r1, r2) in zip(cor1, cor2)]
np.testing.assert_array_equal(desi, results)
class StateAttentionDecoderTest(absltest.TestCase):
def get_synth_corr(self, seconds, fs, state_switch, corr_scale=0.3):
"""Synth data to approximate attentional switch every state_switch seconds.
Args:
seconds: number of seconds total for generated sample.
fs: fictitious sample rate.
state_switch: the interval in seconds at which the attentional state
switches, ex. 10 implies an attentional state switches between s1 and s2
every ten seconds.
corr_scale: scale the correlation measures for more realistic behaviour.
Returns:
true_state: the true underlying attentional state.
correlations: the simulated correlation values.
"""
max_t = fs*seconds
noise_std = 0.5
t = np.arange(max_t)
true_state = np.zeros((max_t,))
true_state += (np.floor(t/(state_switch*fs))%2)+1
correlations = np.zeros((max_t, 2))
correlations[:, 0] = np.random.randn(max_t)*noise_std + (2-true_state)
correlations[:, 1] = np.random.randn(max_t)*noise_std + (true_state-1)
correlations *= corr_scale
correlations = np.minimum(1, np.maximum(-1, correlations))
return true_state, correlations
def test_attention_decoder(self):
"""Test StateSpaceAttentionDecoder can decode the simple synthetic data."""
print('\n\n**********test_attention_decoder starting... *******')
# fictitious data comprising 1000 seconds with attentional switch every 300s
# starting with speaker 1.
fs_corr = 1 # Hertz
len_data = 1000 # seconds
switch_interval = 300 # seconds
init_interval = 100 # seconds
init_frames = int(fs_corr*init_interval) # frames
true_state, correlations = self.get_synth_corr(len_data, fs_corr,
switch_interval)
# Create attention decoder object
outer_iter = 20
inner_iter = 1
newton_iter = 10
ad = attention_decoder.StateSpaceAttentionDecoder(outer_iter,
inner_iter,
newton_iter, fs_corr)
# Tune log normal priors using first 100s of data
ad.tune_log_normal_priors(correlations[:init_frames, 0],
correlations[:init_frames, 1])
# Decode attention based on each set of correlations
results = np.zeros((correlations.shape[0]-ad.k_w-init_frames, 3))
for i in range(correlations[init_frames:, :].shape[0]):
res = ad.attention(correlations[i+init_frames, 0],
correlations[i+init_frames, 1])
if res is not None:
results[i-ad.k_w] = res
print('ADD result: ', res)
s1 = results[:, 0] > 0.5 # classified as speaker 1
# true_state is a vector of 1,2 for speaker 1,2 respectively
# s1 is true if classified as attending to speaker 1
# Here, we test to make sure the error rate from a naive decision, classify
# as attending to speaker 1 if the point estimator results[:,0] is above
# 0.5. We use the first 100 observations to tune the hyperparameters of the
# model using tuneLogNormalPriors above, so we drop this from the subsequent
# analysis. The ad.k_w is the window size of the number of correlation
# values that are used to make a single attentional state decision so the
# first decoded attentional state corresponds to the 100+ad.k_w true_state
# (this is a bit complicated when using forward lags because the first
# decoded attentional state would correspond to the 100+ad.k_w-ad.k_f th
# true_state).
# The true_state vector is a vector of 1s and 2s corresponding to the labels
# speaker 1 and speaker 2, so we convert to a boolean where True indicates
# attending to speaker 1 and False to attending to speaker 2.
error = np.mean(np.abs(s1 != (true_state[init_frames+ad.k_w:] < 2)))
self.assertLess(error, 0.15)
def test_log_normal_initialization(self):
# Pick two different sets of parameters and make sure the initialization
# code recovers the correct Gaussian parameters
# Number of observations
num_data = 1000000
# Generate attended speaker data (log-normal)
mu_a = 0.2
var_a = 0.1
data_a = np.exp(np.random.randn(num_data)*var_a + mu_a)
# Generate unattended speaker data (log-normal)
mu_b = 0.0
var_b = 0.1
data_b = np.exp(np.random.randn(num_data)*var_b + mu_b)
# Create attention decoder object
ad = attention_decoder.StateSpaceAttentionDecoder(20, 1, 10, 1, 1)
# Tune log-normal priors
ad.tune(data_a, data_b)
# Log-transform and normalize between [0,1]
log_data_a = np.log(np.absolute(data_a))
log_data_b = np.log(np.absolute(data_b))
# Compute mean and precision
mu_a = np.mean(log_data_a)
mu_b = np.mean(log_data_b)
rho_a = np.var(log_data_a)
rho_b = np.var(log_data_b)
self.assertAlmostEqual(ad.mu_d[0], mu_a, delta=0.0001)
self.assertAlmostEqual(ad.mu_d[1], mu_b, delta=0.0001)
self.assertAlmostEqual(ad.rho_d[0], 1.0/rho_a, delta=5)
self.assertAlmostEqual(ad.rho_d[1], 1.0/rho_b, delta=5)
# Create new attention decoder object
ad = attention_decoder.StateSpaceAttentionDecoder(20, 1, 10, 1, 1,
offset=1.0)
# Tune log-normal priors
ad.tune(data_a, data_b)
# Make sure the mu value gets bigger with a positive offset.
self.assertGreater(ad.mu_d[0], mu_a + 0.01)
class CreateTest(absltest.TestCase):
def test_all(self):
ad = attention_decoder.create_attention_decoder('wta')
self.assertIsInstance(ad, attention_decoder.AttentionDecoder)
ad = attention_decoder.create_attention_decoder('stepped')
self.assertIsInstance(ad, attention_decoder.StepAttentionDecoder)
ad = attention_decoder.create_attention_decoder('ssd')
self.assertIsInstance(ad, attention_decoder.StateSpaceAttentionDecoder)
with self.assertRaisesRegex(ValueError, 'Unknown type'):
ad = attention_decoder.create_attention_decoder('bad type name')
if __name__ == '__main__':
absltest.main()
|
apache-2.0
|
tomsilver/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gdk.py
|
69
|
15968
|
from __future__ import division
import math
import os
import sys
import warnings
def fn_name(): return sys._getframe(1).f_code.co_name
import gobject
import gtk; gdk = gtk.gdk
import pango
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import numpy as npy
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase
from matplotlib.cbook import is_string_like
from matplotlib.figure import Figure
from matplotlib.mathtext import MathTextParser
from matplotlib.transforms import Affine2D
from matplotlib.backends._backend_gdk import pixbuf_get_pixels_array
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
# Image formats that this backend supports - for FileChooser and print_figure()
IMAGE_FORMAT = ['eps', 'jpg', 'png', 'ps', 'svg'] + ['bmp'] # , 'raw', 'rgb']
IMAGE_FORMAT.sort()
IMAGE_FORMAT_DEFAULT = 'png'
class RendererGDK(RendererBase):
fontweights = {
100 : pango.WEIGHT_ULTRALIGHT,
200 : pango.WEIGHT_LIGHT,
300 : pango.WEIGHT_LIGHT,
400 : pango.WEIGHT_NORMAL,
500 : pango.WEIGHT_NORMAL,
600 : pango.WEIGHT_BOLD,
700 : pango.WEIGHT_BOLD,
800 : pango.WEIGHT_HEAVY,
900 : pango.WEIGHT_ULTRABOLD,
'ultralight' : pango.WEIGHT_ULTRALIGHT,
'light' : pango.WEIGHT_LIGHT,
'normal' : pango.WEIGHT_NORMAL,
'medium' : pango.WEIGHT_NORMAL,
'semibold' : pango.WEIGHT_BOLD,
'bold' : pango.WEIGHT_BOLD,
'heavy' : pango.WEIGHT_HEAVY,
'ultrabold' : pango.WEIGHT_ULTRABOLD,
'black' : pango.WEIGHT_ULTRABOLD,
}
# cache for efficiency, these must be at class, not instance level
layoutd = {} # a map from text prop tups to pango layouts
rotated = {} # a map from text prop tups to rotated text pixbufs
def __init__(self, gtkDA, dpi):
# widget gtkDA is used for:
# '<widget>.create_pango_layout(s)'
# cmap line below)
self.gtkDA = gtkDA
self.dpi = dpi
self._cmap = gtkDA.get_colormap()
self.mathtext_parser = MathTextParser("Agg")
def set_pixmap (self, pixmap):
self.gdkDrawable = pixmap
def set_width_height (self, width, height):
"""w,h is the figure w,h not the pixmap w,h
"""
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
transform = transform + Affine2D(). \
scale(1.0, -1.0).translate(0, self.height)
polygons = path.to_polygons(transform, self.width, self.height)
for polygon in polygons:
# draw_polygon won't take an arbitrary sequence -- it must be a list
# of tuples
polygon = [(int(round(x)), int(round(y))) for x, y in polygon]
if rgbFace is not None:
saveColor = gc.gdkGC.foreground
gc.gdkGC.foreground = gc.rgb_to_gdk_color(rgbFace)
self.gdkDrawable.draw_polygon(gc.gdkGC, True, polygon)
gc.gdkGC.foreground = saveColor
if gc.gdkGC.line_width > 0:
self.gdkDrawable.draw_lines(gc.gdkGC, polygon)
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
if bbox != None:
l,b,w,h = bbox.bounds
#rectangle = (int(l), self.height-int(b+h),
# int(w), int(h))
# set clip rect?
im.flipud_out()
rows, cols, image_str = im.as_rgba_str()
image_array = npy.fromstring(image_str, npy.uint8)
image_array.shape = rows, cols, 4
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
has_alpha=True, bits_per_sample=8,
width=cols, height=rows)
array = pixbuf_get_pixels_array(pixbuf)
array[:,:,:] = image_array
gc = self.new_gc()
y = self.height-y-rows
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), cols, rows,
gdk.RGB_DITHER_NONE, 0, 0)
# unflip
im.flipud_out()
def draw_text(self, gc, x, y, s, prop, angle, ismath):
x, y = int(x), int(y)
if x <0 or y <0: # window has shrunk and text is off the edge
return
if angle not in (0,90):
warnings.warn('backend_gdk: unable to draw text at angles ' +
'other than 0 or 90')
elif ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
elif angle==90:
self._draw_rotated_text(gc, x, y, s, prop, angle)
else:
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
self.gdkDrawable.draw_layout(gc.gdkGC, x, y-h-b, layout)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
if angle==90:
width, height = height, width
x -= width
y -= height
imw = font_image.get_width()
imh = font_image.get_height()
N = imw * imh
# a numpixels by num fonts array
Xall = npy.zeros((N,1), npy.uint8)
image_str = font_image.as_str()
Xall[:,0] = npy.fromstring(image_str, npy.uint8)
# get the max alpha at each pixel
Xs = npy.amax(Xall,axis=1)
# convert it to it's proper shape
Xs.shape = imh, imw
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, has_alpha=True,
bits_per_sample=8, width=imw, height=imh)
array = pixbuf_get_pixels_array(pixbuf)
rgb = gc.get_rgb()
array[:,:,0]=int(rgb[0]*255)
array[:,:,1]=int(rgb[1]*255)
array[:,:,2]=int(rgb[2]*255)
array[:,:,3]=Xs
try: # new in 2.2
# can use None instead of gc.gdkGC, if don't need clipping
self.gdkDrawable.draw_pixbuf (gc.gdkGC, pixbuf, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
except AttributeError:
# deprecated in 2.2
pixbuf.render_to_drawable(self.gdkDrawable, gc.gdkGC, 0, 0,
int(x), int(y), imw, imh,
gdk.RGB_DITHER_NONE, 0, 0)
def _draw_rotated_text(self, gc, x, y, s, prop, angle):
"""
Draw the text rotated 90 degrees, other angles are not supported
"""
# this function (and its called functions) is a bottleneck
# Pango 1.6 supports rotated text, but pygtk 2.4.0 does not yet have
# wrapper functions
# GTK+ 2.6 pixbufs support rotation
gdrawable = self.gdkDrawable
ggc = gc.gdkGC
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
x = int(x-h)
y = int(y-w)
if x < 0 or y < 0: # window has shrunk and text is off the edge
return
key = (x,y,s,angle,hash(prop))
imageVert = self.rotated.get(key)
if imageVert != None:
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
return
imageBack = gdrawable.get_image(x, y, w, h)
imageVert = gdrawable.get_image(x, y, h, w)
imageFlip = gtk.gdk.Image(type=gdk.IMAGE_FASTEST,
visual=gdrawable.get_visual(),
width=w, height=h)
if imageFlip == None or imageBack == None or imageVert == None:
warnings.warn("Could not renderer vertical text")
return
imageFlip.set_colormap(self._cmap)
for i in range(w):
for j in range(h):
imageFlip.put_pixel(i, j, imageVert.get_pixel(j,w-i-1) )
gdrawable.draw_image(ggc, imageFlip, 0, 0, x, y, w, h)
gdrawable.draw_layout(ggc, x, y-b, layout)
imageIn = gdrawable.get_image(x, y, w, h)
for i in range(w):
for j in range(h):
imageVert.put_pixel(j, i, imageIn.get_pixel(w-i-1,j) )
gdrawable.draw_image(ggc, imageBack, 0, 0, x, y, w, h)
gdrawable.draw_image(ggc, imageVert, 0, 0, x, y, h, w)
self.rotated[key] = imageVert
def _get_pango_layout(self, s, prop):
"""
Create a pango layout instance for Text 's' with properties 'prop'.
Return - pango layout (from cache if already exists)
Note that pango assumes a logical DPI of 96
Ref: pango/fonts.c/pango_font_description_set_size() manual page
"""
# problem? - cache gets bigger and bigger, is never cleared out
# two (not one) layouts are created for every text item s (then they
# are cached) - why?
key = self.dpi, s, hash(prop)
value = self.layoutd.get(key)
if value != None:
return value
size = prop.get_size_in_points() * self.dpi / 96.0
size = round(size)
font_str = '%s, %s %i' % (prop.get_name(), prop.get_style(), size,)
font = pango.FontDescription(font_str)
# later - add fontweight to font_str
font.set_weight(self.fontweights[prop.get_weight()])
layout = self.gtkDA.create_pango_layout(s)
layout.set_font_description(font)
inkRect, logicalRect = layout.get_pixel_extents()
self.layoutd[key] = layout, inkRect, logicalRect
return layout, inkRect, logicalRect
def flipy(self):
return True
def get_canvas_width_height(self):
return self.width, self.height
def get_text_width_height_descent(self, s, prop, ismath):
if ismath:
ox, oy, width, height, descent, font_image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
layout, inkRect, logicalRect = self._get_pango_layout(s, prop)
l, b, w, h = inkRect
return w, h+1, h + 1
def new_gc(self):
return GraphicsContextGDK(renderer=self)
def points_to_pixels(self, points):
return points/72.0 * self.dpi
class GraphicsContextGDK(GraphicsContextBase):
# a cache shared by all class instances
_cached = {} # map: rgb color -> gdk.Color
_joind = {
'bevel' : gdk.JOIN_BEVEL,
'miter' : gdk.JOIN_MITER,
'round' : gdk.JOIN_ROUND,
}
_capd = {
'butt' : gdk.CAP_BUTT,
'projecting' : gdk.CAP_PROJECTING,
'round' : gdk.CAP_ROUND,
}
def __init__(self, renderer):
GraphicsContextBase.__init__(self)
self.renderer = renderer
self.gdkGC = gtk.gdk.GC(renderer.gdkDrawable)
self._cmap = renderer._cmap
def rgb_to_gdk_color(self, rgb):
"""
rgb - an RGB tuple (three 0.0-1.0 values)
return an allocated gtk.gdk.Color
"""
try:
return self._cached[tuple(rgb)]
except KeyError:
color = self._cached[tuple(rgb)] = \
self._cmap.alloc_color(
int(rgb[0]*65535),int(rgb[1]*65535),int(rgb[2]*65535))
return color
#def set_antialiased(self, b):
# anti-aliasing is not supported by GDK
def set_capstyle(self, cs):
GraphicsContextBase.set_capstyle(self, cs)
self.gdkGC.cap_style = self._capd[self._capstyle]
def set_clip_rectangle(self, rectangle):
GraphicsContextBase.set_clip_rectangle(self, rectangle)
if rectangle is None:
return
l,b,w,h = rectangle.bounds
rectangle = (int(l), self.renderer.height-int(b+h)+1,
int(w), int(h))
#rectangle = (int(l), self.renderer.height-int(b+h),
# int(w+1), int(h+2))
self.gdkGC.set_clip_rectangle(rectangle)
def set_dashes(self, dash_offset, dash_list):
GraphicsContextBase.set_dashes(self, dash_offset, dash_list)
if dash_list == None:
self.gdkGC.line_style = gdk.LINE_SOLID
else:
pixels = self.renderer.points_to_pixels(npy.asarray(dash_list))
dl = [max(1, int(round(val))) for val in pixels]
self.gdkGC.set_dashes(dash_offset, dl)
self.gdkGC.line_style = gdk.LINE_ON_OFF_DASH
def set_foreground(self, fg, isRGB=False):
GraphicsContextBase.set_foreground(self, fg, isRGB)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_graylevel(self, frac):
GraphicsContextBase.set_graylevel(self, frac)
self.gdkGC.foreground = self.rgb_to_gdk_color(self.get_rgb())
def set_joinstyle(self, js):
GraphicsContextBase.set_joinstyle(self, js)
self.gdkGC.join_style = self._joind[self._joinstyle]
def set_linewidth(self, w):
GraphicsContextBase.set_linewidth(self, w)
if w == 0:
self.gdkGC.line_width = 0
else:
pixels = self.renderer.points_to_pixels(w)
self.gdkGC.line_width = max(1, int(round(pixels)))
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGDK(thisFig)
manager = FigureManagerBase(canvas, num)
# equals:
#manager = FigureManagerBase (FigureCanvasGDK (Figure(*args, **kwargs),
# num)
return manager
class FigureCanvasGDK (FigureCanvasBase):
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
self._renderer_init()
def _renderer_init(self):
self._renderer = RendererGDK (gtk.DrawingArea(), self.figure.dpi)
def _render_figure(self, pixmap, width, height):
self._renderer.set_pixmap (pixmap)
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format, *args, **kwargs):
width, height = self.get_width_height()
pixmap = gtk.gdk.Pixmap (None, width, height, depth=24)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, 0, 8,
width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
pixbuf.save(filename, format)
def get_default_filetype(self):
return 'png'
|
gpl-3.0
|
nrhine1/scikit-learn
|
examples/ensemble/plot_voting_probas.py
|
316
|
2824
|
"""
===========================================================
Plot class probabilities calculated by the VotingClassifier
===========================================================
Plot the class probabilities of the first sample in a toy dataset
predicted by three different classifiers and averaged by the
`VotingClassifier`.
First, three examplary classifiers are initialized (`LogisticRegression`,
`GaussianNB`, and `RandomForestClassifier`) and used to initialize a
soft-voting `VotingClassifier` with weights `[1, 1, 5]`, which means that
the predicted probabilities of the `RandomForestClassifier` count 5 times
as much as the weights of the other classifiers when the averaged probability
is calculated.
To visualize the probability weighting, we fit each classifier on the training
set and plot the predicted class probabilities for the first sample in this
example dataset.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import VotingClassifier
clf1 = LogisticRegression(random_state=123)
clf2 = RandomForestClassifier(random_state=123)
clf3 = GaussianNB()
X = np.array([[-1.0, -1.0], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2]])
y = np.array([1, 1, 2, 2])
eclf = VotingClassifier(estimators=[('lr', clf1), ('rf', clf2), ('gnb', clf3)],
voting='soft',
weights=[1, 1, 5])
# predict class probabilities for all classifiers
probas = [c.fit(X, y).predict_proba(X) for c in (clf1, clf2, clf3, eclf)]
# get class probabilities for the first sample in the dataset
class1_1 = [pr[0, 0] for pr in probas]
class2_1 = [pr[0, 1] for pr in probas]
# plotting
N = 4 # number of groups
ind = np.arange(N) # group positions
width = 0.35 # bar width
fig, ax = plt.subplots()
# bars for classifier 1-3
p1 = ax.bar(ind, np.hstack(([class1_1[:-1], [0]])), width, color='green')
p2 = ax.bar(ind + width, np.hstack(([class2_1[:-1], [0]])), width, color='lightgreen')
# bars for VotingClassifier
p3 = ax.bar(ind, [0, 0, 0, class1_1[-1]], width, color='blue')
p4 = ax.bar(ind + width, [0, 0, 0, class2_1[-1]], width, color='steelblue')
# plot annotations
plt.axvline(2.8, color='k', linestyle='dashed')
ax.set_xticks(ind + width)
ax.set_xticklabels(['LogisticRegression\nweight 1',
'GaussianNB\nweight 1',
'RandomForestClassifier\nweight 5',
'VotingClassifier\n(average probabilities)'],
rotation=40,
ha='right')
plt.ylim([0, 1])
plt.title('Class probabilities for sample 1 by different classifiers')
plt.legend([p1[0], p2[0]], ['class 1', 'class 2'], loc='upper left')
plt.show()
|
bsd-3-clause
|
jcasner/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/font_manager.py
|
69
|
42655
|
"""
A module for finding, managing, and using fonts across platforms.
This module provides a single :class:`FontManager` instance that can
be shared across backends and platforms. The :func:`findfont`
function returns the best TrueType (TTF) font file in the local or
system font path that matches the specified :class:`FontProperties`
instance. The :class:`FontManager` also handles Adobe Font Metrics
(AFM) font files for use by the PostScript backend.
The design is based on the `W3C Cascading Style Sheet, Level 1 (CSS1)
font specification <http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_.
Future versions may implement the Level 2 or 2.1 specifications.
Experimental support is included for using `fontconfig
<http://www.fontconfig.org>`_ on Unix variant plaforms (Linux, OS X,
Solaris). To enable it, set the constant ``USE_FONTCONFIG`` in this
file to ``True``. Fontconfig has the advantage that it is the
standard way to look up fonts on X11 platforms, so if a font is
installed, it is much more likely to be found.
"""
"""
KNOWN ISSUES
- documentation
- font variant is untested
- font stretch is incomplete
- font size is incomplete
- font size_adjust is incomplete
- default font algorithm needs improvement and testing
- setWeights function needs improvement
- 'light' is an invalid weight value, remove it.
- update_fonts not implemented
Authors : John Hunter <[email protected]>
Paul Barrett <[email protected]>
Michael Droettboom <[email protected]>
Copyright : John Hunter (2004,2005), Paul Barrett (2004,2005)
License : matplotlib license (PSF compatible)
The font directory code is from ttfquery,
see license/LICENSE_TTFQUERY.
"""
import os, sys, glob
try:
set
except NameError:
from sets import Set as set
import matplotlib
from matplotlib import afm
from matplotlib import ft2font
from matplotlib import rcParams, get_configdir
from matplotlib.cbook import is_string_like
from matplotlib.fontconfig_pattern import \
parse_fontconfig_pattern, generate_fontconfig_pattern
try:
import cPickle as pickle
except ImportError:
import pickle
USE_FONTCONFIG = False
verbose = matplotlib.verbose
font_scalings = {
'xx-small' : 0.579,
'x-small' : 0.694,
'small' : 0.833,
'medium' : 1.0,
'large' : 1.200,
'x-large' : 1.440,
'xx-large' : 1.728,
'larger' : 1.2,
'smaller' : 0.833,
None : 1.0}
stretch_dict = {
'ultra-condensed' : 100,
'extra-condensed' : 200,
'condensed' : 300,
'semi-condensed' : 400,
'normal' : 500,
'semi-expanded' : 600,
'expanded' : 700,
'extra-expanded' : 800,
'ultra-expanded' : 900}
weight_dict = {
'ultralight' : 100,
'light' : 200,
'normal' : 400,
'regular' : 400,
'book' : 400,
'medium' : 500,
'roman' : 500,
'semibold' : 600,
'demibold' : 600,
'demi' : 600,
'bold' : 700,
'heavy' : 800,
'extra bold' : 800,
'black' : 900}
font_family_aliases = set([
'serif',
'sans-serif',
'cursive',
'fantasy',
'monospace',
'sans'])
# OS Font paths
MSFolders = \
r'Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders'
MSFontDirectories = [
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion\Fonts',
r'SOFTWARE\Microsoft\Windows\CurrentVersion\Fonts']
X11FontDirectories = [
# an old standard installation point
"/usr/X11R6/lib/X11/fonts/TTF/",
# here is the new standard location for fonts
"/usr/share/fonts/",
# documented as a good place to install new fonts
"/usr/local/share/fonts/",
# common application, not really useful
"/usr/lib/openoffice/share/fonts/truetype/",
]
OSXFontDirectories = [
"/Library/Fonts/",
"/Network/Library/Fonts/",
"/System/Library/Fonts/"
]
if not USE_FONTCONFIG:
home = os.environ.get('HOME')
if home is not None:
# user fonts on OSX
path = os.path.join(home, 'Library', 'Fonts')
OSXFontDirectories.append(path)
path = os.path.join(home, '.fonts')
X11FontDirectories.append(path)
def get_fontext_synonyms(fontext):
"""
Return a list of file extensions extensions that are synonyms for
the given file extension *fileext*.
"""
return {'ttf': ('ttf', 'otf'),
'otf': ('ttf', 'otf'),
'afm': ('afm',)}[fontext]
def win32FontDirectory():
"""
Return the user-specified font directory for Win32. This is
looked up from the registry key::
\\HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders\Fonts
If the key is not found, $WINDIR/Fonts will be returned.
"""
try:
import _winreg
except ImportError:
pass # Fall through to default
else:
try:
user = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, MSFolders)
try:
try:
return _winreg.QueryValueEx(user, 'Fonts')[0]
except OSError:
pass # Fall through to default
finally:
_winreg.CloseKey(user)
except OSError:
pass # Fall through to default
return os.path.join(os.environ['WINDIR'], 'Fonts')
def win32InstalledFonts(directory=None, fontext='ttf'):
"""
Search for fonts in the specified font directory, or use the
system directories if none given. A list of TrueType font
filenames are returned by default, or AFM fonts if *fontext* ==
'afm'.
"""
import _winreg
if directory is None:
directory = win32FontDirectory()
fontext = get_fontext_synonyms(fontext)
key, items = None, {}
for fontdir in MSFontDirectories:
try:
local = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, fontdir)
except OSError:
continue
if not local:
files = []
for ext in fontext:
files.extend(glob.glob(os.path.join(directory, '*.'+ext)))
return files
try:
for j in range(_winreg.QueryInfoKey(local)[1]):
try:
key, direc, any = _winreg.EnumValue( local, j)
if not os.path.dirname(direc):
direc = os.path.join(directory, direc)
direc = os.path.abspath(direc).lower()
if os.path.splitext(direc)[1][1:] in fontext:
items[direc] = 1
except EnvironmentError:
continue
except WindowsError:
continue
return items.keys()
finally:
_winreg.CloseKey(local)
return None
def OSXFontDirectory():
"""
Return the system font directories for OS X. This is done by
starting at the list of hardcoded paths in
:attr:`OSXFontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in OSXFontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def OSXInstalledFonts(directory=None, fontext='ttf'):
"""
Get list of font files on OS X - ignores font suffix by default.
"""
if directory is None:
directory = OSXFontDirectory()
fontext = get_fontext_synonyms(fontext)
files = []
for path in directory:
if fontext is None:
files.extend(glob.glob(os.path.join(path,'*')))
else:
for ext in fontext:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
return files
def x11FontDirectory():
"""
Return the system font directories for X11. This is done by
starting at the list of hardcoded paths in
:attr:`X11FontDirectories` and returning all nested directories
within them.
"""
fontpaths = []
def add(arg,directory,files):
fontpaths.append(directory)
for fontdir in X11FontDirectories:
try:
if os.path.isdir(fontdir):
os.path.walk(fontdir, add, None)
except (IOError, OSError, TypeError, ValueError):
pass
return fontpaths
def get_fontconfig_fonts(fontext='ttf'):
"""
Grab a list of all the fonts that are being tracked by fontconfig
by making a system call to ``fc-list``. This is an easy way to
grab all of the fonts the user wants to be made available to
applications, without needing knowing where all of them reside.
"""
try:
import commands
except ImportError:
return {}
fontext = get_fontext_synonyms(fontext)
fontfiles = {}
status, output = commands.getstatusoutput("fc-list file")
if status == 0:
for line in output.split('\n'):
fname = line.split(':')[0]
if (os.path.splitext(fname)[1][1:] in fontext and
os.path.exists(fname)):
fontfiles[fname] = 1
return fontfiles
def findSystemFonts(fontpaths=None, fontext='ttf'):
"""
Search for fonts in the specified font paths. If no paths are
given, will use a standard set of system paths, as well as the
list of fonts tracked by fontconfig if fontconfig is installed and
available. A list of TrueType fonts are returned by default with
AFM fonts as an option.
"""
fontfiles = {}
fontexts = get_fontext_synonyms(fontext)
if fontpaths is None:
if sys.platform == 'win32':
fontdir = win32FontDirectory()
fontpaths = [fontdir]
# now get all installed fonts directly...
for f in win32InstalledFonts(fontdir):
base, ext = os.path.splitext(f)
if len(ext)>1 and ext[1:].lower() in fontexts:
fontfiles[f] = 1
else:
fontpaths = x11FontDirectory()
# check for OS X & load its fonts if present
if sys.platform == 'darwin':
for f in OSXInstalledFonts(fontext=fontext):
fontfiles[f] = 1
for f in get_fontconfig_fonts(fontext):
fontfiles[f] = 1
elif isinstance(fontpaths, (str, unicode)):
fontpaths = [fontpaths]
for path in fontpaths:
files = []
for ext in fontexts:
files.extend(glob.glob(os.path.join(path, '*.'+ext)))
files.extend(glob.glob(os.path.join(path, '*.'+ext.upper())))
for fname in files:
fontfiles[os.path.abspath(fname)] = 1
return [fname for fname in fontfiles.keys() if os.path.exists(fname)]
def weight_as_number(weight):
"""
Return the weight property as a numeric value. String values
are converted to their corresponding numeric value.
"""
if isinstance(weight, str):
try:
weight = weight_dict[weight.lower()]
except KeyError:
weight = 400
elif weight in range(100, 1000, 100):
pass
else:
raise ValueError, 'weight not a valid integer'
return weight
class FontEntry(object):
"""
A class for storing Font properties. It is used when populating
the font lookup dictionary.
"""
def __init__(self,
fname ='',
name ='',
style ='normal',
variant='normal',
weight ='normal',
stretch='normal',
size ='medium',
):
self.fname = fname
self.name = name
self.style = style
self.variant = variant
self.weight = weight
self.stretch = stretch
try:
self.size = str(float(size))
except ValueError:
self.size = size
def ttfFontProperty(font):
"""
A function for populating the :class:`FontKey` by extracting
information from the TrueType font file.
*font* is a :class:`FT2Font` instance.
"""
name = font.family_name
# Styles are: italic, oblique, and normal (default)
sfnt = font.get_sfnt()
sfnt2 = sfnt.get((1,0,0,2))
sfnt4 = sfnt.get((1,0,0,4))
if sfnt2:
sfnt2 = sfnt2.lower()
else:
sfnt2 = ''
if sfnt4:
sfnt4 = sfnt4.lower()
else:
sfnt4 = ''
if sfnt4.find('oblique') >= 0:
style = 'oblique'
elif sfnt4.find('italic') >= 0:
style = 'italic'
elif sfnt2.find('regular') >= 0:
style = 'normal'
elif font.style_flags & ft2font.ITALIC:
style = 'italic'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = None
for w in weight_dict.keys():
if sfnt4.find(w) >= 0:
weight = w
break
if not weight:
if font.style_flags & ft2font.BOLD:
weight = 700
else:
weight = 400
weight = weight_as_number(weight)
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
if sfnt4.find('narrow') >= 0 or sfnt4.find('condensed') >= 0 or \
sfnt4.find('cond') >= 0:
stretch = 'condensed'
elif sfnt4.find('demi cond') >= 0:
stretch = 'semi-condensed'
elif sfnt4.find('wide') >= 0 or sfnt4.find('expanded') >= 0:
stretch = 'expanded'
else:
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# !!!! Incomplete
if font.scalable:
size = 'scalable'
else:
size = str(float(font.get_fontsize()))
# !!!! Incomplete
size_adjust = None
return FontEntry(font.fname, name, style, variant, weight, stretch, size)
def afmFontProperty(fontpath, font):
"""
A function for populating a :class:`FontKey` instance by
extracting information from the AFM font file.
*font* is a class:`AFM` instance.
"""
name = font.get_familyname()
# Styles are: italic, oblique, and normal (default)
if font.get_angle() != 0 or name.lower().find('italic') >= 0:
style = 'italic'
elif name.lower().find('oblique') >= 0:
style = 'oblique'
else:
style = 'normal'
# Variants are: small-caps and normal (default)
# !!!! Untested
if name.lower() in ['capitals', 'small-caps']:
variant = 'small-caps'
else:
variant = 'normal'
# Weights are: 100, 200, 300, 400 (normal: default), 500 (medium),
# 600 (semibold, demibold), 700 (bold), 800 (heavy), 900 (black)
# lighter and bolder are also allowed.
weight = weight_as_number(font.get_weight().lower())
# Stretch can be absolute and relative
# Absolute stretches are: ultra-condensed, extra-condensed, condensed,
# semi-condensed, normal, semi-expanded, expanded, extra-expanded,
# and ultra-expanded.
# Relative stretches are: wider, narrower
# Child value is: inherit
# !!!! Incomplete
stretch = 'normal'
# Sizes can be absolute and relative.
# Absolute sizes are: xx-small, x-small, small, medium, large, x-large,
# and xx-large.
# Relative sizes are: larger, smaller
# Length value is an absolute font size, e.g. 12pt
# Percentage values are in 'em's. Most robust specification.
# All AFM fonts are apparently scalable.
size = 'scalable'
# !!!! Incomplete
size_adjust = None
return FontEntry(fontpath, name, style, variant, weight, stretch, size)
def createFontList(fontfiles, fontext='ttf'):
"""
A function to create a font lookup list. The default is to create
a list of TrueType fonts. An AFM font list can optionally be
created.
"""
fontlist = []
# Add fonts from list of known font files.
seen = {}
for fpath in fontfiles:
verbose.report('createFontDict: %s' % (fpath), 'debug')
fname = os.path.split(fpath)[1]
if fname in seen: continue
else: seen[fname] = 1
if fontext == 'afm':
try:
fh = open(fpath, 'r')
except:
verbose.report("Could not open font file %s" % fpath)
continue
try:
try:
font = afm.AFM(fh)
finally:
fh.close()
except RuntimeError:
verbose.report("Could not parse font file %s"%fpath)
continue
prop = afmFontProperty(fpath, font)
else:
try:
font = ft2font.FT2Font(str(fpath))
except RuntimeError:
verbose.report("Could not open font file %s"%fpath)
continue
except UnicodeError:
verbose.report("Cannot handle unicode filenames")
#print >> sys.stderr, 'Bad file is', fpath
continue
try: prop = ttfFontProperty(font)
except: continue
fontlist.append(prop)
return fontlist
class FontProperties(object):
"""
A class for storing and manipulating font properties.
The font properties are those described in the `W3C Cascading
Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ font
specification. The six properties are:
- family: A list of font names in decreasing order of priority.
The items may include a generic font family name, either
'serif', 'sans-serif', 'cursive', 'fantasy', or 'monospace'.
In that case, the actual font to be used will be looked up
from the associated rcParam in :file:`matplotlibrc`.
- style: Either 'normal', 'italic' or 'oblique'.
- variant: Either 'normal' or 'small-caps'.
- stretch: A numeric value in the range 0-1000 or one of
'ultra-condensed', 'extra-condensed', 'condensed',
'semi-condensed', 'normal', 'semi-expanded', 'expanded',
'extra-expanded' or 'ultra-expanded'
- weight: A numeric value in the range 0-1000 or one of
'ultralight', 'light', 'normal', 'regular', 'book', 'medium',
'roman', 'semibold', 'demibold', 'demi', 'bold', 'heavy',
'extra bold', 'black'
- size: Either an relative value of 'xx-small', 'x-small',
'small', 'medium', 'large', 'x-large', 'xx-large' or an
absolute font size, e.g. 12
The default font property for TrueType fonts (as specified in the
default :file:`matplotlibrc` file) is::
sans-serif, normal, normal, normal, normal, scalable.
Alternatively, a font may be specified using an absolute path to a
.ttf file, by using the *fname* kwarg.
The preferred usage of font sizes is to use the relative values,
e.g. 'large', instead of absolute font sizes, e.g. 12. This
approach allows all text sizes to be made larger or smaller based
on the font manager's default font size, i.e. by using the
:meth:`FontManager.set_default_size` method.
This class will also accept a `fontconfig
<http://www.fontconfig.org/>`_ pattern, if it is the only argument
provided. See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_. This support
does not require fontconfig to be installed. We are merely
borrowing its pattern syntax for use here.
Note that matplotlib's internal font manager and fontconfig use a
different algorithm to lookup fonts, so the results of the same pattern
may be different in matplotlib than in other applications that use
fontconfig.
"""
def __init__(self,
family = None,
style = None,
variant= None,
weight = None,
stretch= None,
size = None,
fname = None, # if this is set, it's a hardcoded filename to use
_init = None # used only by copy()
):
self._family = None
self._slant = None
self._variant = None
self._weight = None
self._stretch = None
self._size = None
self._file = None
# This is used only by copy()
if _init is not None:
self.__dict__.update(_init.__dict__)
return
if is_string_like(family):
# Treat family as a fontconfig pattern if it is the only
# parameter provided.
if (style is None and
variant is None and
weight is None and
stretch is None and
size is None and
fname is None):
self.set_fontconfig_pattern(family)
return
self.set_family(family)
self.set_style(style)
self.set_variant(variant)
self.set_weight(weight)
self.set_stretch(stretch)
self.set_file(fname)
self.set_size(size)
def _parse_fontconfig_pattern(self, pattern):
return parse_fontconfig_pattern(pattern)
def __hash__(self):
l = self.__dict__.items()
l.sort()
return hash(repr(l))
def __str__(self):
return self.get_fontconfig_pattern()
def get_family(self):
"""
Return a list of font names that comprise the font family.
"""
if self._family is None:
family = rcParams['font.family']
if is_string_like(family):
return [family]
return family
return self._family
def get_name(self):
"""
Return the name of the font that best matches the font
properties.
"""
return ft2font.FT2Font(str(findfont(self))).family_name
def get_style(self):
"""
Return the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if self._slant is None:
return rcParams['font.style']
return self._slant
get_slant = get_style
def get_variant(self):
"""
Return the font variant. Values are: 'normal' or
'small-caps'.
"""
if self._variant is None:
return rcParams['font.variant']
return self._variant
def get_weight(self):
"""
Set the font weight. Options are: A numeric value in the
range 0-1000 or one of 'light', 'normal', 'regular', 'book',
'medium', 'roman', 'semibold', 'demibold', 'demi', 'bold',
'heavy', 'extra bold', 'black'
"""
if self._weight is None:
return rcParams['font.weight']
return self._weight
def get_stretch(self):
"""
Return the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'.
"""
if self._stretch is None:
return rcParams['font.stretch']
return self._stretch
def get_size(self):
"""
Return the font size.
"""
if self._size is None:
return rcParams['font.size']
return self._size
def get_size_in_points(self):
if self._size is not None:
try:
return float(self._size)
except ValueError:
pass
default_size = fontManager.get_default_size()
return default_size * font_scalings.get(self._size)
def get_file(self):
"""
Return the filename of the associated font.
"""
return self._file
def get_fontconfig_pattern(self):
"""
Get a fontconfig pattern suitable for looking up the font as
specified with fontconfig's ``fc-match`` utility.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
return generate_fontconfig_pattern(self)
def set_family(self, family):
"""
Change the font family. May be either an alias (generic name
is CSS parlance), such as: 'serif', 'sans-serif', 'cursive',
'fantasy', or 'monospace', or a real font name.
"""
if family is None:
self._family = None
else:
if is_string_like(family):
family = [family]
self._family = family
set_name = set_family
def set_style(self, style):
"""
Set the font style. Values are: 'normal', 'italic' or
'oblique'.
"""
if style not in ('normal', 'italic', 'oblique', None):
raise ValueError("style must be normal, italic or oblique")
self._slant = style
set_slant = set_style
def set_variant(self, variant):
"""
Set the font variant. Values are: 'normal' or 'small-caps'.
"""
if variant not in ('normal', 'small-caps', None):
raise ValueError("variant must be normal or small-caps")
self._variant = variant
def set_weight(self, weight):
"""
Set the font weight. May be either a numeric value in the
range 0-1000 or one of 'ultralight', 'light', 'normal',
'regular', 'book', 'medium', 'roman', 'semibold', 'demibold',
'demi', 'bold', 'heavy', 'extra bold', 'black'
"""
if weight is not None:
try:
weight = int(weight)
if weight < 0 or weight > 1000:
raise ValueError()
except ValueError:
if weight not in weight_dict:
raise ValueError("weight is invalid")
self._weight = weight
def set_stretch(self, stretch):
"""
Set the font stretch or width. Options are: 'ultra-condensed',
'extra-condensed', 'condensed', 'semi-condensed', 'normal',
'semi-expanded', 'expanded', 'extra-expanded' or
'ultra-expanded', or a numeric value in the range 0-1000.
"""
if stretch is not None:
try:
stretch = int(stretch)
if stretch < 0 or stretch > 1000:
raise ValueError()
except ValueError:
if stretch not in stretch_dict:
raise ValueError("stretch is invalid")
self._stretch = stretch
def set_size(self, size):
"""
Set the font size. Either an relative value of 'xx-small',
'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'
or an absolute font size, e.g. 12.
"""
if size is not None:
try:
size = float(size)
except ValueError:
if size is not None and size not in font_scalings:
raise ValueError("size is invalid")
self._size = size
def set_file(self, file):
"""
Set the filename of the fontfile to use. In this case, all
other properties will be ignored.
"""
self._file = file
def set_fontconfig_pattern(self, pattern):
"""
Set the properties by parsing a fontconfig *pattern*.
See the documentation on `fontconfig patterns
<http://www.fontconfig.org/fontconfig-user.html>`_.
This support does not require fontconfig to be installed or
support for it to be enabled. We are merely borrowing its
pattern syntax for use here.
"""
for key, val in self._parse_fontconfig_pattern(pattern).items():
if type(val) == list:
getattr(self, "set_" + key)(val[0])
else:
getattr(self, "set_" + key)(val)
def copy(self):
"""Return a deep copy of self"""
return FontProperties(_init = self)
def ttfdict_to_fnames(d):
"""
flatten a ttfdict to all the filenames it contains
"""
fnames = []
for named in d.values():
for styled in named.values():
for variantd in styled.values():
for weightd in variantd.values():
for stretchd in weightd.values():
for fname in stretchd.values():
fnames.append(fname)
return fnames
def pickle_dump(data, filename):
"""
Equivalent to pickle.dump(data, open(filename, 'w'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'w')
try:
pickle.dump(data, fh)
finally:
fh.close()
def pickle_load(filename):
"""
Equivalent to pickle.load(open(filename, 'r'))
but closes the file to prevent filehandle leakage.
"""
fh = open(filename, 'r')
try:
data = pickle.load(fh)
finally:
fh.close()
return data
class FontManager:
"""
On import, the :class:`FontManager` singleton instance creates a
list of TrueType fonts based on the font properties: name, style,
variant, weight, stretch, and size. The :meth:`findfont` method
does a nearest neighbor search to find the font that most closely
matches the specification. If no good enough match is found, a
default font is returned.
"""
def __init__(self, size=None, weight='normal'):
self.__default_weight = weight
self.default_size = size
paths = [os.path.join(rcParams['datapath'], 'fonts', 'ttf'),
os.path.join(rcParams['datapath'], 'fonts', 'afm')]
# Create list of font paths
for pathname in ['TTFPATH', 'AFMPATH']:
if pathname in os.environ:
ttfpath = os.environ[pathname]
if ttfpath.find(';') >= 0: #win32 style
paths.extend(ttfpath.split(';'))
elif ttfpath.find(':') >= 0: # unix style
paths.extend(ttfpath.split(':'))
else:
paths.append(ttfpath)
verbose.report('font search path %s'%(str(paths)))
# Load TrueType fonts and create font dictionary.
self.ttffiles = findSystemFonts(paths) + findSystemFonts()
for fname in self.ttffiles:
verbose.report('trying fontname %s' % fname, 'debug')
if fname.lower().find('vera.ttf')>=0:
self.defaultFont = fname
break
else:
# use anything
self.defaultFont = self.ttffiles[0]
self.ttflist = createFontList(self.ttffiles)
if rcParams['pdf.use14corefonts']:
# Load only the 14 PDF core fonts. These fonts do not need to be
# embedded; every PDF viewing application is required to have them:
# Helvetica, Helvetica-Bold, Helvetica-Oblique, Helvetica-BoldOblique,
# Courier, Courier-Bold, Courier-Oblique, Courier-BoldOblique,
# Times-Roman, Times-Bold, Times-Italic, Times-BoldItalic, Symbol,
# ZapfDingbats.
afmpath = os.path.join(rcParams['datapath'],'fonts','pdfcorefonts')
afmfiles = findSystemFonts(afmpath, fontext='afm')
self.afmlist = createFontList(afmfiles, fontext='afm')
else:
self.afmfiles = findSystemFonts(paths, fontext='afm') + \
findSystemFonts(fontext='afm')
self.afmlist = createFontList(self.afmfiles, fontext='afm')
self.ttf_lookup_cache = {}
self.afm_lookup_cache = {}
def get_default_weight(self):
"""
Return the default font weight.
"""
return self.__default_weight
def get_default_size(self):
"""
Return the default font size.
"""
if self.default_size is None:
return rcParams['font.size']
return self.default_size
def set_default_weight(self, weight):
"""
Set the default font weight. The initial value is 'normal'.
"""
self.__default_weight = weight
def set_default_size(self, size):
"""
Set the default font size in points. The initial value is set
by ``font.size`` in rc.
"""
self.default_size = size
def update_fonts(self, filenames):
"""
Update the font dictionary with new font files.
Currently not implemented.
"""
# !!!! Needs implementing
raise NotImplementedError
# Each of the scoring functions below should return a value between
# 0.0 (perfect match) and 1.0 (terrible match)
def score_family(self, families, family2):
"""
Returns a match score between the list of font families in
*families* and the font family name *family2*.
An exact match anywhere in the list returns 0.0.
A match by generic font name will return 0.1.
No match will return 1.0.
"""
for i, family1 in enumerate(families):
if family1.lower() in font_family_aliases:
if family1 == 'sans':
family1 == 'sans-serif'
options = rcParams['font.' + family1]
if family2 in options:
idx = options.index(family2)
return 0.1 * (float(idx) / len(options))
elif family1.lower() == family2.lower():
return 0.0
return 1.0
def score_style(self, style1, style2):
"""
Returns a match score between *style1* and *style2*.
An exact match returns 0.0.
A match between 'italic' and 'oblique' returns 0.1.
No match returns 1.0.
"""
if style1 == style2:
return 0.0
elif style1 in ('italic', 'oblique') and \
style2 in ('italic', 'oblique'):
return 0.1
return 1.0
def score_variant(self, variant1, variant2):
"""
Returns a match score between *variant1* and *variant2*.
An exact match returns 0.0, otherwise 1.0.
"""
if variant1 == variant2:
return 0.0
else:
return 1.0
def score_stretch(self, stretch1, stretch2):
"""
Returns a match score between *stretch1* and *stretch2*.
The result is the absolute value of the difference between the
CSS numeric values of *stretch1* and *stretch2*, normalized
between 0.0 and 1.0.
"""
try:
stretchval1 = int(stretch1)
except ValueError:
stretchval1 = stretch_dict.get(stretch1, 500)
try:
stretchval2 = int(stretch2)
except ValueError:
stretchval2 = stretch_dict.get(stretch2, 500)
return abs(stretchval1 - stretchval2) / 1000.0
def score_weight(self, weight1, weight2):
"""
Returns a match score between *weight1* and *weight2*.
The result is the absolute value of the difference between the
CSS numeric values of *weight1* and *weight2*, normalized
between 0.0 and 1.0.
"""
try:
weightval1 = int(weight1)
except ValueError:
weightval1 = weight_dict.get(weight1, 500)
try:
weightval2 = int(weight2)
except ValueError:
weightval2 = weight_dict.get(weight2, 500)
return abs(weightval1 - weightval2) / 1000.0
def score_size(self, size1, size2):
"""
Returns a match score between *size1* and *size2*.
If *size2* (the size specified in the font file) is 'scalable', this
function always returns 0.0, since any font size can be generated.
Otherwise, the result is the absolute distance between *size1* and
*size2*, normalized so that the usual range of font sizes (6pt -
72pt) will lie between 0.0 and 1.0.
"""
if size2 == 'scalable':
return 0.0
# Size value should have already been
try:
sizeval1 = float(size1)
except ValueError:
sizeval1 = self.default_size * font_scalings(size1)
try:
sizeval2 = float(size2)
except ValueError:
return 1.0
return abs(sizeval1 - sizeval2) / 72.0
def findfont(self, prop, fontext='ttf'):
"""
Search the font list for the font that most closely matches
the :class:`FontProperties` *prop*.
:meth:`findfont` performs a nearest neighbor search. Each
font is given a similarity score to the target font
properties. The first font with the highest score is
returned. If no matches below a certain threshold are found,
the default font (usually Vera Sans) is returned.
The result is cached, so subsequent lookups don't have to
perform the O(n) nearest neighbor search.
See the `W3C Cascading Style Sheet, Level 1
<http://www.w3.org/TR/1998/REC-CSS2-19980512/>`_ documentation
for a description of the font finding algorithm.
"""
debug = False
if prop is None:
return self.defaultFont
if is_string_like(prop):
prop = FontProperties(prop)
fname = prop.get_file()
if fname is not None:
verbose.report('findfont returning %s'%fname, 'debug')
return fname
if fontext == 'afm':
font_cache = self.afm_lookup_cache
fontlist = self.afmlist
else:
font_cache = self.ttf_lookup_cache
fontlist = self.ttflist
cached = font_cache.get(hash(prop))
if cached:
return cached
best_score = 1e64
best_font = None
for font in fontlist:
# Matching family should have highest priority, so it is multiplied
# by 10.0
score = \
self.score_family(prop.get_family(), font.name) * 10.0 + \
self.score_style(prop.get_style(), font.style) + \
self.score_variant(prop.get_variant(), font.variant) + \
self.score_weight(prop.get_weight(), font.weight) + \
self.score_stretch(prop.get_stretch(), font.stretch) + \
self.score_size(prop.get_size(), font.size)
if score < best_score:
best_score = score
best_font = font
if score == 0:
break
if best_font is None or best_score >= 10.0:
verbose.report('findfont: Could not match %s. Returning %s' %
(prop, self.defaultFont))
result = self.defaultFont
else:
verbose.report('findfont: Matching %s to %s (%s) with score of %f' %
(prop, best_font.name, best_font.fname, best_score))
result = best_font.fname
font_cache[hash(prop)] = result
return result
_is_opentype_cff_font_cache = {}
def is_opentype_cff_font(filename):
"""
Returns True if the given font is a Postscript Compact Font Format
Font embedded in an OpenType wrapper. Used by the PostScript and
PDF backends that can not subset these fonts.
"""
if os.path.splitext(filename)[1].lower() == '.otf':
result = _is_opentype_cff_font_cache.get(filename)
if result is None:
fd = open(filename, 'rb')
tag = fd.read(4)
fd.close()
result = (tag == 'OTTO')
_is_opentype_cff_font_cache[filename] = result
return result
return False
# The experimental fontconfig-based backend.
if USE_FONTCONFIG and sys.platform != 'win32':
import re
def fc_match(pattern, fontext):
import commands
fontexts = get_fontext_synonyms(fontext)
ext = "." + fontext
status, output = commands.getstatusoutput('fc-match -sv "%s"' % pattern)
if status == 0:
for match in _fc_match_regex.finditer(output):
file = match.group(1)
if os.path.splitext(file)[1][1:] in fontexts:
return file
return None
_fc_match_regex = re.compile(r'\sfile:\s+"([^"]*)"')
_fc_match_cache = {}
def findfont(prop, fontext='ttf'):
if not is_string_like(prop):
prop = prop.get_fontconfig_pattern()
cached = _fc_match_cache.get(prop)
if cached is not None:
return cached
result = fc_match(prop, fontext)
if result is None:
result = fc_match(':', fontext)
_fc_match_cache[prop] = result
return result
else:
_fmcache = os.path.join(get_configdir(), 'fontList.cache')
fontManager = None
def _rebuild():
global fontManager
fontManager = FontManager()
pickle_dump(fontManager, _fmcache)
verbose.report("generated new fontManager")
try:
fontManager = pickle_load(_fmcache)
fontManager.default_size = None
verbose.report("Using fontManager instance from %s" % _fmcache)
except:
_rebuild()
def findfont(prop, **kw):
global fontManager
font = fontManager.findfont(prop, **kw)
if not os.path.exists(font):
verbose.report("%s returned by pickled fontManager does not exist" % font)
_rebuild()
font = fontManager.findfont(prop, **kw)
return font
|
agpl-3.0
|
rexshihaoren/scikit-learn
|
examples/decomposition/plot_pca_vs_fa_model_selection.py
|
78
|
4510
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
|
bsd-3-clause
|
HesselTjeerdsma/Cyber-Physical-Pacman-Game
|
Algor/flask/lib/python2.7/site-packages/scipy/stats/_binned_statistic.py
|
14
|
26457
|
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from scipy._lib.six import callable, xrange
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([ 4. , 4.5]), array([ 1., 4., 7.]), array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
(array([[ 4. , 4.5], [ 8. , 9. ]]), array([ 1., 4., 7.]),
array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
(array([ 1., 2., 4.]), array([ 1., 2., 3., 4.]),
array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> windspeed = 8 * np.random.rand(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * np.random.rand(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, normed=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny])
>>> ret.statistic
array([[ 2., 1.],
[ 1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx,biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of D arrays of length N, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std','min','max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
smin = np.zeros(Ndim)
smax = np.zeros(Ndim)
for i in xrange(Ndim):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in xrange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in xrange(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1)
else:
edges[i] = np.asarray(bins[i], float)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
# Compute the bin number each sample falls into, in each dimension
sampBin = {}
for i in xrange(Ndim):
sampBin[i] = np.digitize(sample[:, i], edges[i])
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in xrange(Ndim):
# Find the rounding precision
decimal = int(-np.log10(dedges[i].min())) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
ni = nbin.argsort()
# `binnumbers` is which bin (in linearized `Ndim` space) each sample goes
binnumbers = np.zeros(Dlen, int)
for i in xrange(0, Ndim - 1):
binnumbers += sampBin[ni[i]] * nbin[ni[i + 1:]].prod()
binnumbers += sampBin[ni[-1]]
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
flatsum2 = np.bincount(binnumbers, values[vv] ** 2)
result[vv, a] = np.sqrt(flatsum2[a] / flatcount[a] -
(flatsum[a] / flatcount[a]) ** 2)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in xrange(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.median(values[vv, binnumbers == i])
elif statistic == 'min':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.min(values[vv, binnumbers == i])
elif statistic == 'max':
result.fill(np.nan)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = np.max(values[vv, binnumbers == i])
elif callable(statistic):
with warnings.catch_warnings():
# Numpy generates a warnings for mean/std/... with empty list
warnings.filterwarnings('ignore', category=RuntimeWarning)
old = np.seterr(invalid='ignore')
try:
null = statistic([])
except:
null = np.nan
np.seterr(**old)
result.fill(null)
for i in np.unique(binnumbers):
for vv in xrange(Vdim):
result[vv, i] = statistic(values[vv, binnumbers == i])
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, np.sort(nbin)))
for i in xrange(nbin.size):
j = ni.argsort()[i]
# Accomodate the extra `Vdim` dimension-zero with `+1`
result = result.swapaxes(i+1, j+1)
ni[i], ni[j] = ni[j], ni[i]
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = [slice(None)] + Ndim * [slice(1, -1)]
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`reulst`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
|
apache-2.0
|
yaukwankiu/twstocks
|
mark2.py
|
1
|
16302
|
# -*- coding: utf8 -*-
"""
USE:
from mark2 import *
main()
"""
############################
# imports
#from matplotlib import ion
#ion()
import time
import datetime
import urllib2
import re
import sys
import os
#import random
import pickle
import numpy as np
import matplotlib.pyplot as plt
############################
# defining the parameters
currentPriceRegex = re.compile(r'(?<=\<td\ align\=\"center\"\ bgcolor\=\"\#FFFfff\"\ nowrap\>\<b\>)\d*\.\d*(?=\<\/b\>\<\/td\>)')
#companyNameRegex = re.compile( ur'(?<=\<TITLE\>).+(?=-公司資料-奇摩股市\<\/TITLE\>)',re.UNICODE) #doesn't work somehow
companyNameRegex = re.compile( ur'\<TITLE.+TITLE\>', re.UNICODE)
companyPageUrlRegex = re.compile(ur"(?<=\' target\=\'_NONE\'\>)http\:\/\/.+?\/" )
stockSymbolsList = []
outputFolder = "c:/chen chen/stocks/"
stockSymbolsFile='stockSymbols.pydump'
pricesFolder = outputFolder+ "prices/"
stocksFolder = outputFolder +"stocks/"
foldersList = [stocksFolder, pricesFolder]
numberOfPricesToShow = 10
stocksList=[]
############################
#
############################
# defining the classes
class stock:
def __init__(self, symbol):
"""e.g.
https://tw.stock.yahoo.com/d/s/company_1473.html
"""
symbol= ('000'+str(symbol))[-4:]
self.symbol = symbol
self.yahooFrontPageUrl = 'https://tw.stock.yahoo.com/d/s/company_' + symbol + '.html'
self.yahooCurrentPageUrl = 'https://tw.stock.yahoo.com/q/q?s=' + symbol
# get some basic information from the front page
self.name = str(symbol) #default
try:
yahooFrontPage = urllib2.urlopen(self.yahooFrontPageUrl)
raw_text = yahooFrontPage.read()
self.name = companyNameRegex.findall(raw_text)[0]
self.name = self.name[7:-26]
except:
print "Can't open yahooFrontPage for symbol ", symbol
self.pricesList = []
#return self
def __call__(self, numberOfPricesToShow=numberOfPricesToShow):
outputString = ""
#outputString += self.symbol + '\n' #unnecessary
outputString += self.name + '\n'
outputString += self.yahooCurrentPageUrl + '\n'
outputString += '\n'.join([time.asctime(time.localtime((v['pingTime'])))+ ": $" + str(v['price']) for v in self.pricesList][-numberOfPricesToShow:])
print outputString
return self
def openYahooCurrentPage(self):
self.yahooCurrentPage = urllib2.urlopen(self.yahooCurrentPageUrl)
def getCurrentPrice(self, verbose=True, showResponseTime=True):
self.openYahooCurrentPage()
t0 = time.time()
raw_text = self.yahooCurrentPage.read()
t1 = time.time()
self.yahooCurrentPage.close()
currentPrice = float(currentPriceRegex.findall(raw_text)[0])
self.currentPricePingTime = t0
self.currentPricePingReturnTime = t1
self.currentPrice = currentPrice
if verbose:
print "Time: ", time.asctime(time.localtime(t0)),
if showResponseTime:
print "(response time: ", t1-t0, ")",
#print self.symbol, #unnecessary
print self.name, "Price:", currentPrice
self.pricesList.append({'price' : currentPrice,
'pingTime' : t0,
'responseTime' : t1-t0,
})
return currentPrice, t0, t1-t0
def writeCurrentPrice(self, verbose=True):
P = self.pricesList[-1] # the last one
currentPrice = P['price']
t0 = P['pingTime']
dt = P['responseTime']
outputString= ''
if not os.path.exists(pricesFolder+self.name+'.dat'):
outputString = "#time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+self.name+'.dat','a').write(outputString)
if verbose:
print self.name, outputString
def getPriceList(self, throttle=0.3, repetitions=-999, verbose=True):
count = 0
while count!= repetitions:
count +=1
p, t0, dt = self.getCurrentPrice(verbose=verbose)
self.pricesList.append({'price' : p,
'pingTime' : t0,
'responseTime' : dt,
})
if throttle>0:
time.sleep(throttle)
def loadPrices(self, pricesPath="", eraseOld=True, verbose=False):
if eraseOld:
self.pricesList = []
if pricesPath == "":
pricesPath = pricesFolder + self.name + ".dat"
if not os.path.exists(pricesPath):
return []
raw_text = open(pricesPath, 'r').read()
x = raw_text.split('\n')[1:]
xx = [v.split(',') for v in x]
for u in xx:
if verbose:
print u
if len(u) ==2:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0]) ,
'responseTime': 0
})
elif len(u) ==3:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0]) ,
'responseTime': float(u[2])
})
return self
def load(self, *args, **kwargs):
return self.loadPrices(*args, **kwargs)
def plot(self, display=True, block=False):
plt.close()
y = [v['price'] for v in self.pricesList]
x = [v['pingTime'] for v in self.pricesList]
plt.plot(x,y)
plt.title(self.symbol+":" + time.asctime(time.localtime()))
#plt.title(self.symbol+":" + time.asctime(time.localtime()) +\
# getCompanyPageUrl(self))
if display:
plt.show(block=block)
return self
############################
# defining the functions
def loadStock(symbol, folder=stocksFolder, verbose=True):
symbol = str(symbol)
L = os.listdir(folder)
L = [v for v in L if symbol in v]
if verbose:
print "Folder:", folder+L[0]
if len(L) == 0:
print symbol, "not found!!"
else:
st = pickle.load(open(folder + L[0], 'r'))
print st.name, "loaded"
return st
def getStockSymbolsList1():
for N in range(9999):
try:
s = stock(N)
stockSymbolsList.append(N)
print N, s.name, "<-------------added"
except:
print N, "doesn't exist!"
return stocksSymbolsList
def getStockSymbolsList2(url="http://sheet1688.blogspot.tw/2008/11/blog-post_18.html"):
raw_text = urllib2.urlopen(url).read()
symbols = re.findall(ur'(?<=num\>)\d\d\d\d(?=\<\/td\>)', raw_text, re.UNICODE)
symbols.sort()
pickle.dump(symbols, open(outputFolder+stockSymbolsFile,'w'))
stockSymbolsList = symbols
return symbols
def loadStockSymbolsList(path=outputFolder+stockSymbolsFile):
stockSymbolsList = pickle.load(open(path,'r'))
return stockSymbolsList
def makeStocksList(inPath=outputFolder+stockSymbolsFile,
outputFolder=stocksFolder):
symbols = loadStockSymbolsList()
for N in symbols:
try:
st = stock(N)
pickle.dump(st, open(outputFolder+st.name+'.pydump','w'))
print st.name, "-->", outputFolder+st.name+'.pydump'
except:
print "stock symbol", N, "not found!!!!"
def loadStocksList(inputFolder=stocksFolder):
stocksList = []
L = os.listdir(inputFolder)
L.sort(key=lambda v: v[-13:-7])
for fileName in L:
stocksList.append(pickle.load(open(inputFolder+fileName,'r')))
return stocksList
def writeCurrentStockPrices(verbose=True):
stocksList = loadStocksList()
for st in stocksList:
try:
st.getCurrentPrice()
#if verbose:
# print st.name, st.pricesList[-1]
except:
print st.name, "<-- can't get current price!"
try:
st.writeCurrentPrice(verbose=verbose)
except:
print " ", st.name, "<-- no price to write!"
time.sleep(0.5)
def isTradingHour():
"""determine if it is trading Hour"""
#return ((time.localtime(time.time()).tm_hour >8 and time.localtime(time.time()).tm_hour <13) or\
# (time.localtime(time.time()).tm_hour==13 and time.localtime(time.time()).tm_min<=40) or\
return ((time.localtime(time.time()).tm_hour >8 and time.localtime(time.time()).tm_hour <14) or\
(time.localtime(time.time()).tm_hour==14 and time.localtime(time.time()).tm_min<=30) or\
(time.localtime(time.time()).tm_hour==8 and time.localtime(time.time()).tm_min>=30))\
and\
( time.localtime(time.time()).tm_wday <=4)
def clearStockPrices(stocksList=stocksList):
for st in stocksList:
st.pricesList = []
def initialise(toGetSymbols=False, toMakeStockObjects=True ):
"""to initialise the project, setting up the folders etc"""
# creating the folders
for path in foldersList:
if not os.path.exists(path):
os.makedirs(path)
# getting the stock index lists
# constructing the stocks objects
if toGetSymbols:
symbols = getStockSymbolsList2()
else:
symbols = loadStockSymbolsList()
if toMakeStockObjects:
makeStocksList()
def summary(stocks=""):
if stocks =="":
try:
stocks=stocksList
except:
stocks = examples()
for st in stocks:
st.load()
st()
st.plot()
return stocks
def find(key1=""):
L = [v for v in stocksList if key1 in v.name]
if len(L)==1:
L=L[0]
return L
def check(symbol):
return stock(symbol)().load()().plot()
def getCompanyPageUrl(st):
yahooFrontPage = urllib2.urlopen(self.yahooFrontPageUrl)
raw_text = yahooFrontPage.read()
try:
companyPageUrl = companyPageUrlRegex.findall(raw_text)[0]
except:
companyPageUrl = ""
return companyPageUrl
###
############################
# test run
def main0():
for st in stocksList:
st()
st.getPriceList(repetitions=5, throttle=0.3)
def main1():
for st in stocksList:
st()
print "=================="
while True:
for st in stocksList:
st.getCurrentPrice()
time.sleep(.5)
def main2(#toWatch="fixed",
toWatch="random",
#toWatch="both",
timeSleep=5,
verbose=False):
print "=================="
print time.asctime(time.localtime(time.time()))
#symbols = loadStockSymbolsList()
if not isTradingHour() and (verbose=="yes" or verbose=="true" or verbose):
print "not trading hour!"
for st in stocksList:
st.load()
for st in stocksList:
st()
time.sleep(1)
writeCurrentStockPrices() #if after hour, do it once
stocks = loadStocksList()
randomPosition = int(np.random.random()*len(stocks))
stocks = stocks[randomPosition:] + stocks[:randomPosition]
while True:
time0= time.time()
time1= time0
#print "loading stocks"
print time.asctime(time.localtime(time.time()))
#stocks = loadStocksList() #clean up every day
while not isTradingHour():
if toWatch =='random':
watchRandom(stocks=stocks)
elif toWatch =='fixed':
watch()
else:
watchRandom(stocks=stocks, timeSleep=timeSleep)
watchRandom(stocks=stocks, timeSleep=timeSleep)
watchRandom(stocks=stocks, timeSleep=timeSleep)
watch(timeSleep=timeSleep)
watchRandom(stocks=stocks, timeSleep=timeSleep)
while isTradingHour():
for st in stocks:
if time.time()-time0 > 600: #every 10 minutes
for st in stocksList:
st() # watch selected stocks
time0 = time.time()
if (time.time() - time0) % timeSleep < 0.7:
plt.close()
stockRandom = stocks[int(np.random.random()*len(stocks))]
try:
stockRandom.load()
stockRandom.getCurrentPrice()
#stockRandom.writeCurrentPrice()
stockRandom.plot()
except:
print "Can't get data for: ",stockRandom.name
try:
currentPrice, t0, dt = st.getCurrentPrice()
if not os.path.exists(pricesFolder+st.name+'.dat'):
outputString = "#time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+st.name+'.dat','a').write(outputString)
time.sleep(.3)
except:
print "ERROR!! <------ ", st.name
T = time.localtime()
print time.asctime(T)
#if T.tm_hour < 9 or T.tm_hour>=13 and T.tm_min>=30:
# time.sleep(86400 - (13-9)*3600 - 30*60)
#print "End of the trading session of the day!"
def main(*args, **kwargs):
main2(*args, **kwargs)
def getWatchList():
############################
# constructing examples
symbols = [1473,
#2926, #no data
2374, 1319, 2498, 2511]
tainam = ""
chenpinsen = ""
ganung = ""
tungyang = ""
htc = ""
prince = ""
stocksList = []
try:
tainam = stock(symbol='1473')
#chenpinsen = stock(symbol=2926) #no data
ganung = stock(symbol=2374)
tungyang = stock(symbol=1319)
htc = stock(2498)
prince = stock(2511)
stocksList = [tainam,
#chenpinsen,
ganung, tungyang, htc, prince]
except:
print "Error constructing the %dth example!" % (len(stocksList)+1)
for i in range(len(symbols)):
try:
stocksList.append(stock(symbol=i))
except:
print "Error constructing stock with symbol " + str(i)
##############################
return stocksList
def watch(L="", load=True, display=True, timeSleep=5):
if L =="":
L = getWatchList()
for st in L:
if load:
st.load()
st(30)
if display:
st.plot()
time.sleep(timeSleep)
def watchRandom(stocks="", timeSleep=10):
if stocks=="":
stocks = loadStocksList()
print '...............'
print time.asctime(time.localtime(time.time()))
N = int(len(stocks)* np.random.random())
st = stocks[N]
st.load(verbose=False)
st(5)
st.plot()
seconds = time.localtime().tm_sec
#time.sleep(60-seconds-0.05)
time.sleep(timeSleep)
if __name__=="__main__":
print "sleeping 5 seconds"
time.sleep(5)
tainam = stock(symbol='1473')
chenpinsen = stock(symbol=2926)
ganung = stock(symbol=2374)
tungyang = stock(symbol=1319)
htc = stock(2498)
prince = stock(2511)
stocksList = [tainam, chenpinsen, ganung, tungyang, htc, prince]
# test run
main(*sys.argv[1:])
#######################################
# examples
#if __name__ != "__main__":
# stocksList = loadStocksList()
# examples = examples()
|
cc0-1.0
|
mistercrunch/panoramix
|
superset/models/helpers.py
|
2
|
16383
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""a collection of model-related helper classes and functions"""
import json
import logging
import re
import uuid
from datetime import datetime, timedelta
from json.decoder import JSONDecodeError
from typing import Any, Dict, List, Optional, Set, Union
import humanize
import pandas as pd
import pytz
import sqlalchemy as sa
import yaml
from flask import escape, g, Markup
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.models.mixins import AuditMixin
from flask_appbuilder.security.sqla.models import User
from sqlalchemy import and_, or_, UniqueConstraint
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import Mapper, Session
from sqlalchemy.orm.exc import MultipleResultsFound
from sqlalchemy_utils import UUIDType
from superset.utils.core import QueryStatus
logger = logging.getLogger(__name__)
def json_to_dict(json_str: str) -> Dict[Any, Any]:
if json_str:
val = re.sub(",[ \t\r\n]+}", "}", json_str)
val = re.sub(",[ \t\r\n]+\\]", "]", val)
return json.loads(val)
return {}
def convert_uuids(obj: Any) -> Any:
"""
Convert UUID objects to str so we can use yaml.safe_dump
"""
if isinstance(obj, uuid.UUID):
return str(obj)
if isinstance(obj, list):
return [convert_uuids(el) for el in obj]
if isinstance(obj, dict):
return {k: convert_uuids(v) for k, v in obj.items()}
return obj
class ImportExportMixin:
uuid = sa.Column(
UUIDType(binary=True), primary_key=False, unique=True, default=uuid.uuid4
)
export_parent: Optional[str] = None
# The name of the attribute
# with the SQL Alchemy back reference
export_children: List[str] = []
# List of (str) names of attributes
# with the SQL Alchemy forward references
export_fields: List[str] = []
# The names of the attributes
# that are available for import and export
extra_import_fields: List[str] = []
# Additional fields that should be imported,
# even though they were not exported
__mapper__: Mapper
@classmethod
def _unique_constrains(cls) -> List[Set[str]]:
"""Get all (single column and multi column) unique constraints"""
unique = [
{c.name for c in u.columns}
for u in cls.__table_args__ # type: ignore
if isinstance(u, UniqueConstraint)
]
unique.extend(
{c.name} for c in cls.__table__.columns if c.unique # type: ignore
)
return unique
@classmethod
def parent_foreign_key_mappings(cls) -> Dict[str, str]:
"""Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent)
if parent_rel:
return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs}
return {}
@classmethod
def export_schema(
cls, recursive: bool = True, include_parent_ref: bool = False
) -> Dict[str, Any]:
"""Export schema as a dictionary"""
parent_excludes = set()
if not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {column.name for column in parent_ref.local_columns}
def formatter(column: sa.Column) -> str:
return (
"{0} Default ({1})".format(str(column.type), column.default.arg)
if column.default
else str(column.type)
)
schema: Dict[str, Any] = {
column.name: formatter(column)
for column in cls.__table__.columns # type: ignore
if (column.name in cls.export_fields and column.name not in parent_excludes)
}
if recursive:
for column in cls.export_children:
child_class = cls.__mapper__.relationships[column].argument.class_
schema[column] = [
child_class.export_schema(
recursive=recursive, include_parent_ref=include_parent_ref
)
]
return schema
@classmethod
def import_from_dict(
# pylint: disable=too-many-arguments,too-many-branches,too-many-locals
cls,
session: Session,
dict_rep: Dict[Any, Any],
parent: Optional[Any] = None,
recursive: bool = True,
sync: Optional[List[str]] = None,
) -> Any:
"""Import obj from a dictionary"""
if sync is None:
sync = []
parent_refs = cls.parent_foreign_key_mappings()
export_fields = (
set(cls.export_fields)
| set(cls.extra_import_fields)
| set(parent_refs.keys())
| {"uuid"}
)
new_children = {c: dict_rep[c] for c in cls.export_children if c in dict_rep}
unique_constrains = cls._unique_constrains()
filters = [] # Using these filters to check if obj already exists
# Remove fields that should not get imported
for k in list(dict_rep):
if k not in export_fields and k not in parent_refs:
del dict_rep[k]
if not parent:
if cls.export_parent:
for prnt in parent_refs.keys():
if prnt not in dict_rep:
raise RuntimeError(
"{0}: Missing field {1}".format(cls.__name__, prnt)
)
else:
# Set foreign keys to parent obj
for k, v in parent_refs.items():
dict_rep[k] = getattr(parent, v)
# Add filter for parent obj
filters.extend([getattr(cls, k) == dict_rep.get(k) for k in parent_refs.keys()])
# Add filter for unique constraints
ucs = [
and_(
*[
getattr(cls, k) == dict_rep.get(k)
for k in cs
if dict_rep.get(k) is not None
]
)
for cs in unique_constrains
]
filters.append(or_(*ucs))
# Check if object already exists in DB, break if more than one is found
try:
obj_query = session.query(cls).filter(and_(*filters))
obj = obj_query.one_or_none()
except MultipleResultsFound as ex:
logger.error(
"Error importing %s \n %s \n %s",
cls.__name__,
str(obj_query),
yaml.safe_dump(dict_rep),
exc_info=True,
)
raise ex
if not obj:
is_new_obj = True
# Create new DB object
obj = cls(**dict_rep) # type: ignore
logger.info("Importing new %s %s", obj.__tablename__, str(obj))
if cls.export_parent and parent:
setattr(obj, cls.export_parent, parent)
session.add(obj)
else:
is_new_obj = False
logger.info("Updating %s %s", obj.__tablename__, str(obj))
# Update columns
for k, v in dict_rep.items():
setattr(obj, k, v)
# Recursively create children
if recursive:
for child in cls.export_children:
child_class = cls.__mapper__.relationships[child].argument.class_
added = []
for c_obj in new_children.get(child, []):
added.append(
child_class.import_from_dict(
session=session, dict_rep=c_obj, parent=obj, sync=sync
)
)
# If children should get synced, delete the ones that did not
# get updated.
if child in sync and not is_new_obj:
back_refs = child_class.parent_foreign_key_mappings()
delete_filters = [
getattr(child_class, k) == getattr(obj, back_refs.get(k))
for k in back_refs.keys()
]
to_delete = set(
session.query(child_class).filter(and_(*delete_filters))
).difference(set(added))
for o in to_delete:
logger.info("Deleting %s %s", child, str(obj))
session.delete(o)
return obj
def export_to_dict(
self,
recursive: bool = True,
include_parent_ref: bool = False,
include_defaults: bool = False,
export_uuids: bool = False,
) -> Dict[Any, Any]:
"""Export obj to dictionary"""
export_fields = set(self.export_fields)
if export_uuids:
export_fields.add("uuid")
if "id" in export_fields:
export_fields.remove("id")
cls = self.__class__
parent_excludes = set()
if recursive and not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
dict_rep = {
c.name: getattr(self, c.name)
for c in cls.__table__.columns # type: ignore
if (
c.name in export_fields
and c.name not in parent_excludes
and (
include_defaults
or (
getattr(self, c.name) is not None
and (not c.default or getattr(self, c.name) != c.default.arg)
)
)
)
}
# sort according to export_fields using DSU (decorate, sort, undecorate)
order = {field: i for i, field in enumerate(self.export_fields)}
decorated_keys = [(order.get(k, len(order)), k) for k in dict_rep]
decorated_keys.sort()
dict_rep = {k: dict_rep[k] for _, k in decorated_keys}
if recursive:
for cld in self.export_children:
# sorting to make lists of children stable
dict_rep[cld] = sorted(
[
child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults,
)
for child in getattr(self, cld)
],
key=lambda k: sorted(str(k.items())),
)
return convert_uuids(dict_rep)
def override(self, obj: Any) -> None:
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
def copy(self) -> Any:
"""Creates a copy of the dashboard without relationships."""
new_obj = self.__class__()
new_obj.override(self)
return new_obj
def alter_params(self, **kwargs: Any) -> None:
params = self.params_dict
params.update(kwargs)
self.params = json.dumps(params)
def remove_params(self, param_to_remove: str) -> None:
params = self.params_dict
params.pop(param_to_remove, None)
self.params = json.dumps(params)
def reset_ownership(self) -> None:
""" object will belong to the user the current user """
# make sure the object doesn't have relations to a user
# it will be filled by appbuilder on save
self.created_by = None
self.changed_by = None
# flask global context might not exist (in cli or tests for example)
self.owners = []
if g and hasattr(g, "user"):
self.owners = [g.user]
@property
def params_dict(self) -> Dict[Any, Any]:
return json_to_dict(self.params)
@property
def template_params_dict(self) -> Dict[Any, Any]:
return json_to_dict(self.template_params) # type: ignore
def _user_link(user: User) -> Union[Markup, str]:
if not user:
return ""
url = "/superset/profile/{}/".format(user.username)
return Markup('<a href="{}">{}</a>'.format(url, escape(user) or ""))
class AuditMixinNullable(AuditMixin):
"""Altering the AuditMixin to use nullable fields
Allows creating objects programmatically outside of CRUD
"""
created_on = sa.Column(sa.DateTime, default=datetime.now, nullable=True)
changed_on = sa.Column(
sa.DateTime, default=datetime.now, onupdate=datetime.now, nullable=True
)
@declared_attr
def created_by_fk(self) -> sa.Column:
return sa.Column(
sa.Integer,
sa.ForeignKey("ab_user.id"),
default=self.get_user_id,
nullable=True,
)
@declared_attr
def changed_by_fk(self) -> sa.Column:
return sa.Column(
sa.Integer,
sa.ForeignKey("ab_user.id"),
default=self.get_user_id,
onupdate=self.get_user_id,
nullable=True,
)
@property
def changed_by_name(self) -> str:
if self.changed_by:
return escape("{}".format(self.changed_by))
return ""
@renders("created_by")
def creator(self) -> Union[Markup, str]:
return _user_link(self.created_by)
@property
def changed_by_(self) -> Union[Markup, str]:
return _user_link(self.changed_by)
@renders("changed_on")
def changed_on_(self) -> Markup:
return Markup(f'<span class="no-wrap">{self.changed_on}</span>')
@renders("changed_on")
def changed_on_delta_humanized(self) -> str:
return self.changed_on_humanized
@renders("changed_on")
def changed_on_utc(self) -> str:
# Convert naive datetime to UTC
return self.changed_on.astimezone(pytz.utc).strftime("%Y-%m-%dT%H:%M:%S.%f%z")
@property
def changed_on_humanized(self) -> str:
return humanize.naturaltime(datetime.now() - self.changed_on)
@renders("changed_on")
def modified(self) -> Markup:
return Markup(f'<span class="no-wrap">{self.changed_on_humanized}</span>')
class QueryResult: # pylint: disable=too-few-public-methods
"""Object returned by the query interface"""
def __init__( # pylint: disable=too-many-arguments
self,
df: pd.DataFrame,
query: str,
duration: timedelta,
status: str = QueryStatus.SUCCESS,
error_message: Optional[str] = None,
errors: Optional[List[Dict[str, Any]]] = None,
) -> None:
self.df = df
self.query = query
self.duration = duration
self.status = status
self.error_message = error_message
self.errors = errors or []
class ExtraJSONMixin:
"""Mixin to add an `extra` column (JSON) and utility methods"""
extra_json = sa.Column(sa.Text, default="{}")
@property
def extra(self) -> Dict[str, Any]:
try:
return json.loads(self.extra_json)
except (TypeError, JSONDecodeError) as exc:
logger.error(
"Unable to load an extra json: %r. Leaving empty.", exc, exc_info=True
)
return {}
def set_extra_json(self, extras: Dict[str, Any]) -> None:
self.extra_json = json.dumps(extras)
def set_extra_json_key(self, key: str, value: Any) -> None:
extra = self.extra
extra[key] = value
self.extra_json = json.dumps(extra)
|
apache-2.0
|
jmargeta/scikit-learn
|
examples/applications/plot_outlier_detection_housing.py
|
4
|
5348
|
"""
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of vizualisation, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwith parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.iteritems()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1.values()[0].collections[0],
legend1.values()[1].collections[0],
legend1.values()[2].collections[0]),
(legend1.keys()[0], legend1.keys()[1], legend1.keys()[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teatcher ratio by town")
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2.values()[0].collections[0],
legend2.values()[1].collections[0],
legend2.values()[2].collections[0]),
(legend2.keys()[0], legend2.keys()[1], legend2.keys()[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
|
bsd-3-clause
|
roofit-dev/parallel-roofit-scripts
|
scaling/unbinned_scaling_analysis_4_serverloop.py
|
1
|
5781
|
# -*- coding: utf-8 -*-
# @Author: Patrick Bos
# @Date: 2016-11-16 16:23:55
# @Last Modified by: Patrick Bos
# @Last Modified time: 2017-01-16 10:41:57
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import glob
import os
pd.set_option("display.width", None)
# v4
# including branching timings in more detail
# ... but now measured cleanly
def json_liststr_from_naked_json_liststr(naked_json_liststr):
json_array_text = "[" + naked_json_liststr[:-2] + "]" # :-1 removes ,\n
return json_array_text
def df_from_sloppy_json_list_str(json_inside_text):
json_array_text = json_liststr_from_naked_json_liststr(json_inside_text)
df = pd.read_json(json_array_text)
return df
def df_from_broken_sloppy_json_list_str(json_inside_text, verbose=True):
"""
Removes broken lines from json list string.
"""
import json
import re
broken_line_nrs = []
# first try removing last line, which is often wrong
json_inside_text_lines = json_inside_text.split('\n')
json_inside_text_wo_last = "\n".join(json_inside_text_lines[:-1]) + "\n"
json_array_text = json_liststr_from_naked_json_liststr(json_inside_text_wo_last)
try:
json.loads(json_array_text)
if verbose:
print("Last line removed.")
return df_from_sloppy_json_list_str(json_inside_text_wo_last)
except Exception as e:
print(e)
print(json_array_text)
pass
# if not the last line, then filter out broken lines as we encounter them
while True:
try:
json_array_text = json_liststr_from_naked_json_liststr(json_inside_text)
json.loads(json_array_text)
except Exception as e:
try:
remove_line_nr = int(re.findall('line ([0-9]+)', e.args[0])[0])
except Exception as ee:
print(e, ee)
raise Exception("stuff")
broken_line_nrs.append(remove_line_nr +
len(broken_line_nrs) # -> line number before removing lines
)
json_inside_text_lines = json_inside_text.split('\n')
json_inside_text_lines.pop(remove_line_nr - 1)
json_inside_text = "\n".join(json_inside_text_lines) + "\n"
continue
break
if verbose:
print("Broken line numbers removed: ", broken_line_nrs)
return df_from_sloppy_json_list_str(json_inside_text)
def df_from_sloppy_json_list_file(fn, verbose=True):
with open(fn, 'r') as fh:
json_array_inside_text = fh.read()
try:
df = df_from_sloppy_json_list_str(json_array_inside_text)
except:
df = df_from_broken_sloppy_json_list_str(json_array_inside_text,
verbose=verbose)
return df
def merge_dataframes(*dataframes):
return reduce(pd.merge, dataframes)
def concat_dataframes(*dataframes):
return reduce(pd.concat, dataframes)
def df_from_json_incl_meta(fn, fn_meta=None,
drop_meta=['N_gaussians', 'N_observables',
'N_parameters', 'parallel_interleave',
'seed'],
drop_nan=False):
if fn_meta is None:
fn_meta = os.path.join(os.path.dirname(fn), 'timing_meta.json')
main_df = df_from_sloppy_json_list_file(fn)
meta_df = df_from_sloppy_json_list_file(fn_meta).drop(drop_meta, axis=1)
# not just single process runs, also master processes in multi-process runs:
single_process = pd.merge(main_df, meta_df, how='left', on='pid')
if 'ppid' in main_df.columns:
single_process = single_process.drop('ppid', axis=1)
multi_process = pd.merge(main_df, meta_df, how='left',
left_on='ppid', right_on='pid').drop('pid_y', axis=1)
multi_process.rename(columns={'pid_x': 'pid'}, inplace=True)
result = [single_process, multi_process]
else:
result = [single_process]
if drop_nan:
result = [df.dropna() for df in result]
return result
"""
cd ~/projects/apcocsm/code/scaling/unbinned_scaling_4
rsync -av --progress nikhef:"/user/pbos/project_atlas/apcocsm_code/scaling/unbinned_scaling_4/176664*.allier.nikhef.nl" ./
"""
savefig_dn = '/home/patrick/projects/apcocsm/code/scaling/unbinned_scaling_4/analysis/'
#### LOAD DATA FROM FILES
dnlist = sorted(glob.glob("unbinned_scaling_4/176664*.allier.nikhef.nl"))
dnlist = [dn for dn in dnlist if len(glob.glob(dn + '/*.json')) > 1]
fnlist = reduce(lambda x, y: x + y, [glob.glob(dn + '/*.json') for dn in dnlist])
fnlist = [fn for fn in fnlist if 'timing_meta.json' not in fn]
uniquefns = np.unique([fn.split('/')[-1] for fn in fnlist]).tolist()
# - 7 -> also remove the pid appendices
dfkeys = np.unique([u[7:-5 - 7] for u in uniquefns]).tolist()
dfs_split = {fn: df_from_json_incl_meta(fn) for fn in fnlist}
dfs_split_sp = {fn: dflist[0] for fn, dflist in dfs_split.iteritems()}
dfs_split_mp = {fn: dflist[1] for fn, dflist in dfs_split.iteritems() if len(dflist) > 1}
dfs_sp = {k: pd.concat([df for fn, df in dfs_split_sp.iteritems() if k in fn]) for k in dfkeys}
dfs_mp = {k: pd.concat([df for fn, df in dfs_split_mp.iteritems() if k in fn]) for k in dfkeys if k in "".join(dfs_split_mp.keys())}
# in this case we only have the serverloop_while files, so get rid of the dict
dfs_sp = dfs_sp.values()[0]
dfs_mp = dfs_mp.values()[0]
# in fact, the sp-case is empty, since servers are only in mp-mode, so we end up with just:
df = dfs_mp
#### ANALYSIS
df_23700 = df[df.ppid == 23700]
# the index is significant if you want to compare serverloops
df_23700['index'] = df_23700.index
|
apache-2.0
|
dsm054/pandas
|
pandas/tests/scalar/timedelta/test_formats.py
|
9
|
1068
|
# -*- coding: utf-8 -*-
import pytest
from pandas import Timedelta
@pytest.mark.parametrize('td, expected_repr', [
(Timedelta(10, unit='d'), "Timedelta('10 days 00:00:00')"),
(Timedelta(10, unit='s'), "Timedelta('0 days 00:00:10')"),
(Timedelta(10, unit='ms'), "Timedelta('0 days 00:00:00.010000')"),
(Timedelta(-10, unit='ms'), "Timedelta('-1 days +23:59:59.990000')")])
def test_repr(td, expected_repr):
assert repr(td) == expected_repr
@pytest.mark.parametrize('td, expected_iso', [
(Timedelta(days=6, minutes=50, seconds=3, milliseconds=10, microseconds=10,
nanoseconds=12), 'P6DT0H50M3.010010012S'),
(Timedelta(days=4, hours=12, minutes=30, seconds=5), 'P4DT12H30M5S'),
(Timedelta(nanoseconds=123), 'P0DT0H0M0.000000123S'),
# trim nano
(Timedelta(microseconds=10), 'P0DT0H0M0.00001S'),
# trim micro
(Timedelta(milliseconds=1), 'P0DT0H0M0.001S'),
# don't strip every 0
(Timedelta(minutes=1), 'P0DT0H1M0S')])
def test_isoformat(td, expected_iso):
assert td.isoformat() == expected_iso
|
bsd-3-clause
|
samnashi/howdoflawsgetlonger
|
plotmaker.py
|
1
|
9798
|
import numpy as np
#import matplotlib.pyplot as mpl.pyplot
#import seaborn as sns
import os
from random import shuffle
import matplotlib as mpl
import matplotlib.pyplot as plt
import sklearn.preprocessing
'''SCALE THE TRUTH FIRST!!!!!!!!!!! '''
mpl.rcParams['agg.path.chunksize']=100000000000
identifier = "_3_firstrun_fv2_"
Base_Path = "./"
train_path = "/home/ihsan/Documents/thesis_models/train/"
test_path = "/home/ihsan/Documents/thesis_models/test/"
generator_batch_size = 1024
# load data multiple times.
#predictions_filenames = os.listdir(test_path + "predictions")
predictions_filenames = os.listdir('/media/ihsan/LID_FLASH_1/Thesis/Preds_FV1b_Convnet/')
# print("before sorting, data_filenames: {}".format(data_filenames))
predictions_filenames.sort()
# print("after sorting, data_filenames: {}".format(data_filenames))
#label_filenames = os.listdir(test_path + "label")
label_filenames = os.listdir(test_path + "label")
label_filenames.sort()
# print("label_filenames: {}".format(data_filenames))
assert len(predictions_filenames) == len(label_filenames)
combined_filenames = zip(predictions_filenames, label_filenames)
# print("before shuffling: {}".format(combined_filenames))
#shuffle(combined_filenames)
print("after shuffling: {}".format(combined_filenames)) # shuffling works ok.
i=0
#TODO: still only saves single results.
scaler = sklearn.preprocessing.StandardScaler()
for files in combined_filenames:
i=i+1
#predictions_load_path = test_path + 'predictions/' + files[0]
predictions_load_path = '/media/ihsan/LID_FLASH_1/Thesis/Preds_FV1b_Convnet/' + files[0]
label_load_path = test_path + 'label/' + files[1]
# print("data/label load path: {} \n {}".format(data_load_path,label_load_path))
y_preds_to_reshape = np.load(predictions_load_path)
y_truth_unscaled = np.load(label_load_path)[:, 1:]
print("before reshaping preds/label shape: {}, {}".format(y_preds_to_reshape.shape, y_truth_unscaled.shape))
y_preds = np.reshape(y_preds_to_reshape,newshape=(y_preds_to_reshape.shape[1],y_preds_to_reshape.shape[2]))
#y_truth_unscaled = np.reshape(y_truth_unscaled,newshape=(y_truth_unscaled.shape[0],y_truth_unscaled.shape[1]))
y_truth = scaler.fit_transform(X=y_truth_unscaled, y=None)
predictions_length = generator_batch_size * (y_truth.shape[0] // generator_batch_size)
print("before resampling preds/label shape: {}, {}".format(y_preds.shape, y_truth.shape))
#--------COMMENTED OUT BECAUSE OF SCALER IN THE GENERATOR-----------------------------------
#test_array = np.reshape(test_array, (1, test_array.shape[0], test_array.shape[1]))
#label_array[0:,0:predictions_length,:] = np.reshape(label_array[0:,0:predictions_length,:],(1,label_array[0:,0:predictions_length,:].shape[0],label_array[0:,0:predictions_length,:].shape[1])) #label doesn't need to be 3D
# cut the label to be the same length as the predictions.
y_truth = y_truth[0:y_preds.shape[0],:]
#intervals = np.linspace(start=0, stop=y_preds.shape[0],num=2000)
resample_interval = 8
axis_option = 'log'
y_preds=y_preds[::resample_interval,:]
y_truth = y_truth[::resample_interval,:]
print("filename: {}, preds/label shape: {}, {}".format(str(files[0]),y_preds.shape, y_truth.shape))
#predictions_length = generator_batch_size * (y_truth.shape[0]//generator_batch_size)
#largest integer multiple of the generator batch size that fits into the length of the sequence.
#print("array to print's shape: {}".format(y_preds[int(0.75*y_preds.shape[0]):,:].shape))
print("array to print's shape: {}".format(y_preds.shape))
#np.save(file=('predictions_lstm_'+str(files[0])),arr=y_truth[0:,0:predictions_length,:])
#x_range= np.arange(start=0, stop=label_array[0:,0:predictions_length,:].shape[1])
# mpl.pyplot.scatter(x=x_range,y=y_pred[0,:,0])
# mpl.pyplot.scatter(x=x_range,y=label_array[0:,0:predictions_length,:][0,:,0])
mpl.pyplot.cla()
mpl.pyplot.clf()
mpl.pyplot.close()
# mpl.pyplot.plot(y_preds[0, int(0.95*predictions_length):, 0],'o')
# mpl.pyplot.plot(y_truth[0, int(0.95*predictions_length):, 0],'^')
# mpl.pyplot.plot(y_preds[0, int(0.95*predictions_length):, 1],'o')
# mpl.pyplot.plot(y_truth[0, int(0.95*predictions_length):, 1],'^')
#mpl.pyplot.scatter(x=y_preds[:, 0],y=y_truth[:, 0])
mpl.pyplot.plot(y_preds[int(0.75*float(y_preds.shape[0])):, 0],"o")
mpl.pyplot.plot(y_truth[int(0.75*float(y_truth.shape[0])):, 0],"^")
mpl.pyplot.yscale(axis_option)
mpl.pyplot.xscale('log')
mpl.pyplot.title('pred vs. y_truth')
mpl.pyplot.ylabel('crack growth rate, normalized and centered, in/cycle')
mpl.pyplot.xlabel('cycles * ' + str(resample_interval))
#mpl.pyplot.legend(['pred[0]', 'true[0]','pred[1]', 'true[1]','pred[2]', 'true[2]','pred[3]','true[3]'], loc='upper left')
mpl.pyplot.legend(['pred[0]', 'true[0]'], loc='upper left')
#mpl.pyplot.show()
mpl.pyplot.savefig(test_path + str(files[0])[:-4] + '_conv_detail_results_flaw_0' + '.png', bbox_inches='tight')
mpl.pyplot.cla()
mpl.pyplot.clf()
mpl.pyplot.close()
mpl.pyplot.plot(y_preds[int(0.75*float(y_preds.shape[0])):, 1],"o")
mpl.pyplot.plot(y_truth[int(0.75*float(y_truth.shape[0])):, 1],"^")
mpl.pyplot.yscale(axis_option)
mpl.pyplot.xscale('log')
mpl.pyplot.title('pred vs. y_truth')
mpl.pyplot.ylabel('crack growth rate, normalized and centered, in/cycle')
mpl.pyplot.xlabel('cycles * ' + str(resample_interval))
#mpl.pyplot.legend(['pred[0]', 'true[0]','pred[1]', 'true[1]','pred[2]', 'true[2]','pred[3]','true[3]'], loc='upper left')
mpl.pyplot.legend(['pred[1]', 'true[1]'], loc='upper left')
#mpl.pyplot.show()
mpl.pyplot.savefig(test_path + str(files[0])[:-4] + '_conv_detail_results_flaw_1' + '.png', bbox_inches='tight')
mpl.pyplot.cla()
mpl.pyplot.clf()
mpl.pyplot.close()
mpl.pyplot.plot(y_preds[int(0.75*float(y_preds.shape[0])):, 2],"o")
mpl.pyplot.plot(y_truth[int(0.75*float(y_truth.shape[0])):, 2],"^")
mpl.pyplot.yscale(axis_option)
mpl.pyplot.xscale('log')
mpl.pyplot.title('pred vs. y_truth')
mpl.pyplot.ylabel('crack growth rate, normalized and centered, in/cycle')
mpl.pyplot.xlabel('cycles * ' + str(resample_interval))
#mpl.pyplot.legend(['pred[0]', 'true[0]','pred[1]', 'true[1]','pred[2]', 'true[2]','pred[3]','true[3]'], loc='upper left')
mpl.pyplot.legend(['pred[2]', 'true[2]'], loc='upper left')
#mpl.pyplot.show()
mpl.pyplot.savefig(test_path + str(files[0])[:-4] + '_conv_detail_results_flaw_2' + '.png', bbox_inches='tight')
mpl.pyplot.cla()
mpl.pyplot.clf()
mpl.pyplot.close()
mpl.pyplot.plot(y_preds[int(0.75*float(y_preds.shape[0])):, 3],"o")
mpl.pyplot.plot(y_truth[int(0.75*float(y_truth.shape[0])):, 3],"^")
mpl.pyplot.yscale(axis_option)
mpl.pyplot.xscale('log')
mpl.pyplot.title('pred vs. y_truth')
mpl.pyplot.ylabel('crack growth rate, normalized and centered, in/cycle')
mpl.pyplot.xlabel('cycles * ' + str(resample_interval))
#mpl.pyplot.legend(['pred[0]', 'true[0]','pred[1]', 'true[1]','pred[2]', 'true[2]','pred[3]','true[3]'], loc='upper left')
mpl.pyplot.legend(['pred[3]', 'true[3]'], loc='upper left')
#mpl.pyplot.show()
mpl.pyplot.savefig(test_path + str(files[0])[:-4] + '_conv_detail_results_flaw_3' + '.png', bbox_inches='tight')
mpl.pyplot.cla()
mpl.pyplot.clf()
mpl.pyplot.close()
#
# # mpl.pyplot.scatter(x= x_range,y=y_pred[0, :, 2])
# # mpl.pyplot.scatter(x=x_range, y=label_array[0:,0:predictions_length,:][0, :, 2])
# mpl.pyplot.plot(y_pred[0, :, 2])
# mpl.pyplot.plot(label_array[0:,0:predictions_length,:][0, :, 2])
# mpl.pyplot.yscale((axis_option))
# mpl.pyplot.xscale((axis_option))
# mpl.pyplot.title('pred vs. label_array[0:,0:predictions_length,:]')
# mpl.pyplot.ylabel('crack growth rate, normalized and centered, in/cycle')
# mpl.pyplot.xlabel('epoch')
# #mpl.pyplot.legend(['pred[0]', 'true[0]','pred[1]', 'true[1]','pred[2]', 'true[2]','pred[3]','true[3]'], loc='upper left')
# mpl.pyplot.legend(['pred[1]', 'true[1]'], loc='upper left')
#
# mpl.pyplot.savefig(str(files[0]) + '_results_flaw_2' + '.png', bbox_inches='tight')
# mpl.pyplot.clf()
# mpl.pyplot.cla()
# mpl.pyplot.close()
#
# # mpl.pyplot.scatter(x= x_range,y=y_pred[0, :, 3])
# # mpl.pyplot.scatter(x=x_range,y=label_array[0:,0:predictions_length,:][0, :, 3])
# mpl.pyplot.plot(y_pred[0, :, 3])
# mpl.pyplot.plot(label_array[0:,0:predictions_length,:][0, :, 3])
# mpl.pyplot.yscale((axis_option))
# mpl.pyplot.xscale((axis_option))
# mpl.pyplot.title('pred vs. label_array[0:,0:predictions_length,:]')
# mpl.pyplot.ylabel('crack growth rate, normalized and centered, in/cycle')
# mpl.pyplot.xlabel('epoch')
# #mpl.pyplot.legend(['pred[0]', 'true[0]','pred[1]', 'true[1]','pred[2]', 'true[2]','pred[3]','true[3]'], loc='upper left')
# mpl.pyplot.legend(['pred[1]', 'true[1]'], loc='upper left')
#
# mpl.pyplot.savefig(str(files[0]) + '_results_flaw_3' + '.png', bbox_inches='tight')
# mpl.pyplot.clf()
# mpl.pyplot.cla()
# mpl.pyplot.close()
#print("Score: {}".format(score)) #test_array.shape[0]//generator_batch_size
# #predictions = model.predict_generator(test_generator, steps=(1*test_array.shape[0]//generator_batch_size),max_queue_size=test_array.shape[0],use_multiprocessing=True)
# print("scores: {}".format(score))
# np.savetxt(Base_Path + 'results/TestResult_' + str(num_sequence_draws) + identifier + '.txt', np.asarray(score),
# fmt='%5.6f', delimiter=' ', newline='\n', header='loss, acc',
# footer=str(), comments='# ')
|
gpl-3.0
|
wazeerzulfikar/scikit-learn
|
sklearn/ensemble/tests/test_weight_boosting.py
|
7
|
17975
|
"""Testing for the boost module (sklearn.ensemble.boost)."""
import numpy as np
from sklearn.utils.testing import assert_array_equal, assert_array_less
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal, assert_true, assert_greater
from sklearn.utils.testing import assert_raises, assert_raises_regexp
from sklearn.base import BaseEstimator
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import weight_boosting
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.utils import shuffle
from sklearn import datasets
# Common random state
rng = np.random.RandomState(0)
# Toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y_class = ["foo", "foo", "foo", 1, 1, 1] # test string class labels
y_regr = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
y_t_class = ["foo", 1, 1]
y_t_regr = [-1, 1, 1]
# Load the iris dataset and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data, iris.target = shuffle(iris.data, iris.target, random_state=rng)
# Load the boston dataset and randomly permute it
boston = datasets.load_boston()
boston.data, boston.target = shuffle(boston.data, boston.target,
random_state=rng)
def test_samme_proba():
# Test the `_samme_proba` helper function.
# Define some example (bad) `predict_proba` output.
probs = np.array([[1, 1e-6, 0],
[0.19, 0.6, 0.2],
[-999, 0.51, 0.5],
[1e-6, 1, 1e-9]])
probs /= np.abs(probs.sum(axis=1))[:, np.newaxis]
# _samme_proba calls estimator.predict_proba.
# Make a mock object so I can control what gets returned.
class MockEstimator(object):
def predict_proba(self, X):
assert_array_equal(X.shape, probs.shape)
return probs
mock = MockEstimator()
samme_proba = weight_boosting._samme_proba(mock, 3, np.ones_like(probs))
assert_array_equal(samme_proba.shape, probs.shape)
assert_true(np.isfinite(samme_proba).all())
# Make sure that the correct elements come out as smallest --
# `_samme_proba` should preserve the ordering in each example.
assert_array_equal(np.argmin(samme_proba, axis=1), [2, 0, 0, 2])
assert_array_equal(np.argmax(samme_proba, axis=1), [0, 1, 1, 1])
def test_oneclass_adaboost_proba():
# Test predict_proba robustness for one class label input.
# In response to issue #7501
# https://github.com/scikit-learn/scikit-learn/issues/7501
y_t = np.ones(len(X))
clf = AdaBoostClassifier().fit(X, y_t)
assert_array_equal(clf.predict_proba(X), np.ones((len(X), 1)))
def test_classification_toy():
# Check classification on a toy dataset.
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, random_state=0)
clf.fit(X, y_class)
assert_array_equal(clf.predict(T), y_t_class)
assert_array_equal(np.unique(np.asarray(y_t_class)), clf.classes_)
assert_equal(clf.predict_proba(T).shape, (len(T), 2))
assert_equal(clf.decision_function(T).shape, (len(T),))
def test_regression_toy():
# Check classification on a toy dataset.
clf = AdaBoostRegressor(random_state=0)
clf.fit(X, y_regr)
assert_array_equal(clf.predict(T), y_t_regr)
def test_iris():
# Check consistency on dataset iris.
classes = np.unique(iris.target)
clf_samme = prob_samme = None
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(iris.data, iris.target)
assert_array_equal(classes, clf.classes_)
proba = clf.predict_proba(iris.data)
if alg == "SAMME":
clf_samme = clf
prob_samme = proba
assert_equal(proba.shape[1], len(classes))
assert_equal(clf.decision_function(iris.data).shape[1], len(classes))
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with algorithm %s and score = %f" % \
(alg, score)
# Check we used multiple estimators
assert_greater(len(clf.estimators_), 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in clf.estimators_)),
len(clf.estimators_))
# Somewhat hacky regression test: prior to
# ae7adc880d624615a34bafdb1d75ef67051b8200,
# predict_proba returned SAMME.R values for SAMME.
clf_samme.algorithm = "SAMME.R"
assert_array_less(0,
np.abs(clf_samme.predict_proba(iris.data) - prob_samme))
def test_boston():
# Check consistency on dataset boston house prices.
reg = AdaBoostRegressor(random_state=0)
reg.fit(boston.data, boston.target)
score = reg.score(boston.data, boston.target)
assert score > 0.85
# Check we used multiple estimators
assert_true(len(reg.estimators_) > 1)
# Check for distinct random states (see issue #7408)
assert_equal(len(set(est.random_state for est in reg.estimators_)),
len(reg.estimators_))
def test_staged_predict():
# Check staged predictions.
rng = np.random.RandomState(0)
iris_weights = rng.randint(10, size=iris.target.shape)
boston_weights = rng.randint(10, size=boston.target.shape)
# AdaBoost classification
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg, n_estimators=10)
clf.fit(iris.data, iris.target, sample_weight=iris_weights)
predictions = clf.predict(iris.data)
staged_predictions = [p for p in clf.staged_predict(iris.data)]
proba = clf.predict_proba(iris.data)
staged_probas = [p for p in clf.staged_predict_proba(iris.data)]
score = clf.score(iris.data, iris.target, sample_weight=iris_weights)
staged_scores = [
s for s in clf.staged_score(
iris.data, iris.target, sample_weight=iris_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_probas), 10)
assert_array_almost_equal(proba, staged_probas[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
# AdaBoost regression
clf = AdaBoostRegressor(n_estimators=10, random_state=0)
clf.fit(boston.data, boston.target, sample_weight=boston_weights)
predictions = clf.predict(boston.data)
staged_predictions = [p for p in clf.staged_predict(boston.data)]
score = clf.score(boston.data, boston.target, sample_weight=boston_weights)
staged_scores = [
s for s in clf.staged_score(
boston.data, boston.target, sample_weight=boston_weights)]
assert_equal(len(staged_predictions), 10)
assert_array_almost_equal(predictions, staged_predictions[-1])
assert_equal(len(staged_scores), 10)
assert_array_almost_equal(score, staged_scores[-1])
def test_gridsearch():
# Check that base trees can be grid-searched.
# AdaBoost classification
boost = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2),
'algorithm': ('SAMME', 'SAMME.R')}
clf = GridSearchCV(boost, parameters)
clf.fit(iris.data, iris.target)
# AdaBoost regression
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(),
random_state=0)
parameters = {'n_estimators': (1, 2),
'base_estimator__max_depth': (1, 2)}
clf = GridSearchCV(boost, parameters)
clf.fit(boston.data, boston.target)
def test_pickle():
# Check pickability.
import pickle
# Adaboost classifier
for alg in ['SAMME', 'SAMME.R']:
obj = AdaBoostClassifier(algorithm=alg)
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_equal(score, score2)
# Adaboost regressor
obj = AdaBoostRegressor(random_state=0)
obj.fit(boston.data, boston.target)
score = obj.score(boston.data, boston.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(boston.data, boston.target)
assert_equal(score, score2)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=1)
for alg in ['SAMME', 'SAMME.R']:
clf = AdaBoostClassifier(algorithm=alg)
clf.fit(X, y)
importances = clf.feature_importances_
assert_equal(importances.shape[0], 10)
assert_equal((importances[:3, np.newaxis] >= importances[3:]).all(),
True)
def test_error():
# Test that it gives proper exception on deficient input.
assert_raises(ValueError,
AdaBoostClassifier(learning_rate=-1).fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier(algorithm="foo").fit,
X, y_class)
assert_raises(ValueError,
AdaBoostClassifier().fit,
X, y_class, sample_weight=np.asarray([-1]))
def test_base_estimator():
# Test different base estimators.
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
# XXX doesn't work with y_class because RF doesn't support classes_
# Shouldn't AdaBoost run a LabelBinarizer?
clf = AdaBoostClassifier(RandomForestClassifier())
clf.fit(X, y_regr)
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
clf.fit(X, y_class)
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
clf = AdaBoostRegressor(RandomForestRegressor(), random_state=0)
clf.fit(X, y_regr)
clf = AdaBoostRegressor(SVR(), random_state=0)
clf.fit(X, y_regr)
# Check that an empty discrete ensemble fails in fit, not predict.
X_fail = [[1, 1], [1, 1], [1, 1], [1, 1]]
y_fail = ["foo", "bar", 1, 2]
clf = AdaBoostClassifier(SVC(), algorithm="SAMME")
assert_raises_regexp(ValueError, "worse than random",
clf.fit, X_fail, y_fail)
def test_sample_weight_missing():
from sklearn.cluster import KMeans
clf = AdaBoostClassifier(KMeans(), algorithm="SAMME")
assert_raises(ValueError, clf.fit, X, y_regr)
clf = AdaBoostRegressor(KMeans())
assert_raises(ValueError, clf.fit, X, y_regr)
def test_sparse_classification():
# Check classification with sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVC, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_multilabel_classification(n_classes=1, n_samples=15,
n_features=5,
random_state=42)
# Flatten y to a 1d array
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = AdaBoostClassifier(
base_estimator=CustomSVC(probability=True),
random_state=1,
algorithm="SAMME"
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# decision_function
sparse_results = sparse_classifier.decision_function(X_test_sparse)
dense_results = dense_classifier.decision_function(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_log_proba
sparse_results = sparse_classifier.predict_log_proba(X_test_sparse)
dense_results = dense_classifier.predict_log_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# predict_proba
sparse_results = sparse_classifier.predict_proba(X_test_sparse)
dense_results = dense_classifier.predict_proba(X_test)
assert_array_equal(sparse_results, dense_results)
# score
sparse_results = sparse_classifier.score(X_test_sparse, y_test)
dense_results = dense_classifier.score(X_test, y_test)
assert_array_equal(sparse_results, dense_results)
# staged_decision_function
sparse_results = sparse_classifier.staged_decision_function(
X_test_sparse)
dense_results = dense_classifier.staged_decision_function(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_predict_proba
sparse_results = sparse_classifier.staged_predict_proba(X_test_sparse)
dense_results = dense_classifier.staged_predict_proba(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# staged_score
sparse_results = sparse_classifier.staged_score(X_test_sparse,
y_test)
dense_results = dense_classifier.staged_score(X_test, y_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
# Verify sparsity of data is maintained during training
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sparse_regression():
# Check regression with sparse input.
class CustomSVR(SVR):
"""SVR variant that records the nature of the training set."""
def fit(self, X, y, sample_weight=None):
"""Modification on fit caries data type for later verification."""
super(CustomSVR, self).fit(X, y, sample_weight=sample_weight)
self.data_type_ = type(X)
return self
X, y = datasets.make_regression(n_samples=15, n_features=50, n_targets=1,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for sparse_format in [csc_matrix, csr_matrix, lil_matrix, coo_matrix,
dok_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
# Trained on sparse format
sparse_classifier = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train_sparse, y_train)
# Trained on dense format
dense_classifier = dense_results = AdaBoostRegressor(
base_estimator=CustomSVR(),
random_state=1
).fit(X_train, y_train)
# predict
sparse_results = sparse_classifier.predict(X_test_sparse)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
# staged_predict
sparse_results = sparse_classifier.staged_predict(X_test_sparse)
dense_results = dense_classifier.staged_predict(X_test)
for sprase_res, dense_res in zip(sparse_results, dense_results):
assert_array_equal(sprase_res, dense_res)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([(t == csc_matrix or t == csr_matrix)
for t in types])
def test_sample_weight_adaboost_regressor():
"""
AdaBoostRegressor should work without sample_weights in the base estimator
The random weighted sampling is done internally in the _boost method in
AdaBoostRegressor.
"""
class DummyEstimator(BaseEstimator):
def fit(self, X, y):
pass
def predict(self, X):
return np.zeros(X.shape[0])
boost = AdaBoostRegressor(DummyEstimator(), n_estimators=3)
boost.fit(X, y_regr)
assert_equal(len(boost.estimator_weights_), len(boost.estimator_errors_))
|
bsd-3-clause
|
jbaker92/logistic_control_variate
|
hmc/lrhmc.py
|
1
|
5260
|
import pkg_resources
import pickle
from pystan import StanModel
import numpy as np
from sklearn.metrics import log_loss
class LRHMC:
"""
Methods for performing Bayesian logistic regression using Hamiltonian Monte Carlo
Used to try and debug ZV control variates for sgld
References:
1. Hamiltonian Monte Carlo - https://arxiv.org/pdf/1206.1901.pdf
2. ZV control variates for Hamiltonian Monte Carlo -
https://projecteuclid.org/download/pdfview_1/euclid.ba/1393251772
"""
def __init__(self,X_train,X_test,y_train,y_test):
"""
Initialise LRHMC object
Parameters:
X_train - matrix of explanatory variables for training (assumes numpy array of floats)
X_test - matrix of explanatory variables for testing (assumes numpy array of ints)
y_train - vector of response variables for training (assumes numpy array of ints)
y_test - vector of response variables for testing (assumes numpy array of ints)
"""
self.X = X_train
self.y = y_train
self.X_test = X_test
self.y_test = y_test
# Load STAN model object, if it doesn't exist create a new one
try:
self.stan_pkl = pkg_resources.resource_filename('logistic_control_variate', 'hmc/lr.pkl')
with open( self.stan_pkl ) as stanbin:
self.stan = pickle.load(stanbin)
except ( IOError, EOFError ):
stan_code = pkg_resources.resource_filename('logistic_control_variate', 'hmc/lr.stan')
self.stan = StanModel( stan_code )
# Set dimension constants
self.N = self.X.shape[0]
self.d = self.X.shape[1]
self.test_size = self.X_test.shape[0]
# Get data in the right format for STAN
self.data = { 'N' : self.N,
'D' : self.d,
'y' : self.y,
'X' : self.X }
# Initialise data for fitting
self.fitted = None
self.sample = None
self.logpost_sample = None
self.n_iters = None
def fit(self,n_iters=1000):
"""
Fit HMC model to LogisticRegression object using STAN
Parameters:
lr - LogisticRegression object
Modifies:
self.fitted - updates to STAN fitted object
self.sample - updates to the sampled MCMC chain
self.logpost_sample - updates to the gradient at each point in the chain
"""
self.n_iters = n_iters
self.fitted = self.stan.sampling( data = self.data, iter = 2*self.n_iters, chains = 1 )
# Dump model file once fit to avoid recompiling
with open( self.stan_pkl, 'w' ) as stanbin:
pickle.dump(self.stan, stanbin)
self.sample = self.fitted.extract()['beta']
self.logpost_sample = np.zeros( self.sample.shape )
for i in range(self.n_iters):
self.logpost_sample[i,:] = self.fitted.grad_log_prob( self.sample[i,:] )
temp_file = pkg_resources.resource_filename(
'logistic_control_variate', 'data/hmc_temp/fitted.pkl')
with open(temp_file, 'w') as outfile:
pickle.dump(self, outfile)
def postprocess(self):
"""
Postprocess MCMC chain with ZV control variates
Requires:
Fitted model - self.fitted, self.sample, self.logpost_sample is not None
Modifies:
self.sample - updates with postprocessed chain
"""
pot_energy = - 1 / 2.0 * lr.grad_sample
sample_mean = np.mean( lr.sample, axis = 0 )
grad_mean = np.mean( pot_energy, axis = 0 )
var_grad_inv = np.linalg.inv( np.cov( pot_energy, rowvar = 0 ) )
n_init = 10**3
# Initialise variables
cov_params = np.zeros( lr.d )
a_current = np.zeros( lr.d )
new_sample = np.zeros( lr.sample.shape )
# Calculate covariance for each parameter
for j in range(lr.d):
cov_params = np.zeros(lr.d)
a_current = np.zeros(lr.d)
for i in range(n_init):
cov_params += 1 / float( n_init - 1 ) * (
lr.sample[i,j] - sample_mean[j] ) * ( pot_energy[i,j] - grad_mean[j] )
# Update sample for current dimension
a_current = - np.matmul( var_grad_inv, cov_params )
for i in range(lr.n_iters):
new_sample[i,j] = lr.sample[i,j] + np.dot( a_current, pot_energy[i,:] )
# Compare new samples
print np.var( new_sample[n_init:,1] )
print np.var( lr.sample[n_init:,1] )
def logloss(self,sample):
"""
Calculate the log loss on the test set for specified parameter values beta
Parameters:
beta - a vector of logistic regression parameters (float array)
"""
logloss = 0
for m in range(self.n_iters):
y_pred = np.zeros(self.test_size, dtype = int)
beta = np.squeeze( np.copy( sample[m,:] ) )
for i in range(self.test_size):
x = np.squeeze( np.copy( self.X_test[i,:] ) )
y_pred[i] = int( np.dot( beta, x ) >= 0.0 )
logloss += log_loss( self.y_test, y_pred ) / float( self.n_iters )
return logloss
|
mit
|
chuckchen/spark
|
examples/src/main/python/sql/arrow.py
|
23
|
9242
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A simple example demonstrating Arrow in Spark.
Run with:
./bin/spark-submit examples/src/main/python/sql/arrow.py
"""
# NOTE that this file is imported in user guide in PySpark documentation.
# The codes are referred via line numbers. See also `literalinclude` directive in Sphinx.
from pyspark.sql import SparkSession
from pyspark.sql.pandas.utils import require_minimum_pandas_version, require_minimum_pyarrow_version
require_minimum_pandas_version()
require_minimum_pyarrow_version()
def dataframe_with_arrow_example(spark):
import numpy as np # type: ignore[import]
import pandas as pd # type: ignore[import]
# Enable Arrow-based columnar data transfers
spark.conf.set("spark.sql.execution.arrow.pyspark.enabled", "true")
# Generate a Pandas DataFrame
pdf = pd.DataFrame(np.random.rand(100, 3))
# Create a Spark DataFrame from a Pandas DataFrame using Arrow
df = spark.createDataFrame(pdf)
# Convert the Spark DataFrame back to a Pandas DataFrame using Arrow
result_pdf = df.select("*").toPandas()
print("Pandas DataFrame result statistics:\n%s\n" % str(result_pdf.describe()))
def ser_to_frame_pandas_udf_example(spark):
import pandas as pd
from pyspark.sql.functions import pandas_udf
@pandas_udf("col1 string, col2 long")
def func(s1: pd.Series, s2: pd.Series, s3: pd.DataFrame) -> pd.DataFrame:
s3['col2'] = s1 + s2.str.len()
return s3
# Create a Spark DataFrame that has three columns including a struct column.
df = spark.createDataFrame(
[[1, "a string", ("a nested string",)]],
"long_col long, string_col string, struct_col struct<col1:string>")
df.printSchema()
# root
# |-- long_column: long (nullable = true)
# |-- string_column: string (nullable = true)
# |-- struct_column: struct (nullable = true)
# | |-- col1: string (nullable = true)
df.select(func("long_col", "string_col", "struct_col")).printSchema()
# |-- func(long_col, string_col, struct_col): struct (nullable = true)
# | |-- col1: string (nullable = true)
# | |-- col2: long (nullable = true)
def ser_to_ser_pandas_udf_example(spark):
import pandas as pd
from pyspark.sql.functions import col, pandas_udf
from pyspark.sql.types import LongType
# Declare the function and create the UDF
def multiply_func(a: pd.Series, b: pd.Series) -> pd.Series:
return a * b
multiply = pandas_udf(multiply_func, returnType=LongType())
# The function for a pandas_udf should be able to execute with local Pandas data
x = pd.Series([1, 2, 3])
print(multiply_func(x, x))
# 0 1
# 1 4
# 2 9
# dtype: int64
# Create a Spark DataFrame, 'spark' is an existing SparkSession
df = spark.createDataFrame(pd.DataFrame(x, columns=["x"]))
# Execute function as a Spark vectorized UDF
df.select(multiply(col("x"), col("x"))).show()
# +-------------------+
# |multiply_func(x, x)|
# +-------------------+
# | 1|
# | 4|
# | 9|
# +-------------------+
def iter_ser_to_iter_ser_pandas_udf_example(spark):
from typing import Iterator
import pandas as pd
from pyspark.sql.functions import pandas_udf
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# Declare the function and create the UDF
@pandas_udf("long")
def plus_one(iterator: Iterator[pd.Series]) -> Iterator[pd.Series]:
for x in iterator:
yield x + 1
df.select(plus_one("x")).show()
# +-----------+
# |plus_one(x)|
# +-----------+
# | 2|
# | 3|
# | 4|
# +-----------+
def iter_sers_to_iter_ser_pandas_udf_example(spark):
from typing import Iterator, Tuple
import pandas as pd
from pyspark.sql.functions import pandas_udf
pdf = pd.DataFrame([1, 2, 3], columns=["x"])
df = spark.createDataFrame(pdf)
# Declare the function and create the UDF
@pandas_udf("long")
def multiply_two_cols(
iterator: Iterator[Tuple[pd.Series, pd.Series]]) -> Iterator[pd.Series]:
for a, b in iterator:
yield a * b
df.select(multiply_two_cols("x", "x")).show()
# +-----------------------+
# |multiply_two_cols(x, x)|
# +-----------------------+
# | 1|
# | 4|
# | 9|
# +-----------------------+
def ser_to_scalar_pandas_udf_example(spark):
import pandas as pd
from pyspark.sql.functions import pandas_udf
from pyspark.sql import Window
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
# Declare the function and create the UDF
@pandas_udf("double")
def mean_udf(v: pd.Series) -> float:
return v.mean()
df.select(mean_udf(df['v'])).show()
# +-----------+
# |mean_udf(v)|
# +-----------+
# | 4.2|
# +-----------+
df.groupby("id").agg(mean_udf(df['v'])).show()
# +---+-----------+
# | id|mean_udf(v)|
# +---+-----------+
# | 1| 1.5|
# | 2| 6.0|
# +---+-----------+
w = Window \
.partitionBy('id') \
.rowsBetween(Window.unboundedPreceding, Window.unboundedFollowing)
df.withColumn('mean_v', mean_udf(df['v']).over(w)).show()
# +---+----+------+
# | id| v|mean_v|
# +---+----+------+
# | 1| 1.0| 1.5|
# | 1| 2.0| 1.5|
# | 2| 3.0| 6.0|
# | 2| 5.0| 6.0|
# | 2|10.0| 6.0|
# +---+----+------+
def grouped_apply_in_pandas_example(spark):
df = spark.createDataFrame(
[(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
("id", "v"))
def subtract_mean(pdf):
# pdf is a pandas.DataFrame
v = pdf.v
return pdf.assign(v=v - v.mean())
df.groupby("id").applyInPandas(subtract_mean, schema="id long, v double").show()
# +---+----+
# | id| v|
# +---+----+
# | 1|-0.5|
# | 1| 0.5|
# | 2|-3.0|
# | 2|-1.0|
# | 2| 4.0|
# +---+----+
def map_in_pandas_example(spark):
df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
def filter_func(iterator):
for pdf in iterator:
yield pdf[pdf.id == 1]
df.mapInPandas(filter_func, schema=df.schema).show()
# +---+---+
# | id|age|
# +---+---+
# | 1| 21|
# +---+---+
def cogrouped_apply_in_pandas_example(spark):
import pandas as pd
df1 = spark.createDataFrame(
[(20000101, 1, 1.0), (20000101, 2, 2.0), (20000102, 1, 3.0), (20000102, 2, 4.0)],
("time", "id", "v1"))
df2 = spark.createDataFrame(
[(20000101, 1, "x"), (20000101, 2, "y")],
("time", "id", "v2"))
def asof_join(l, r):
return pd.merge_asof(l, r, on="time", by="id")
df1.groupby("id").cogroup(df2.groupby("id")).applyInPandas(
asof_join, schema="time int, id int, v1 double, v2 string").show()
# +--------+---+---+---+
# | time| id| v1| v2|
# +--------+---+---+---+
# |20000101| 1|1.0| x|
# |20000102| 1|3.0| x|
# |20000101| 2|2.0| y|
# |20000102| 2|4.0| y|
# +--------+---+---+---+
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("Python Arrow-in-Spark example") \
.getOrCreate()
print("Running Pandas to/from conversion example")
dataframe_with_arrow_example(spark)
print("Running pandas_udf example: Series to Frame")
ser_to_frame_pandas_udf_example(spark)
print("Running pandas_udf example: Series to Series")
ser_to_ser_pandas_udf_example(spark)
print("Running pandas_udf example: Iterator of Series to Iterator of Series")
iter_ser_to_iter_ser_pandas_udf_example(spark)
print("Running pandas_udf example: Iterator of Multiple Series to Iterator of Series")
iter_sers_to_iter_ser_pandas_udf_example(spark)
print("Running pandas_udf example: Series to Scalar")
ser_to_scalar_pandas_udf_example(spark)
print("Running pandas function example: Grouped Map")
grouped_apply_in_pandas_example(spark)
print("Running pandas function example: Map")
map_in_pandas_example(spark)
print("Running pandas function example: Co-grouped Map")
cogrouped_apply_in_pandas_example(spark)
spark.stop()
|
apache-2.0
|
w1kke/pylearn2
|
pylearn2/scripts/datasets/step_through_small_norb.py
|
49
|
3123
|
#! /usr/bin/env python
"""
A script for sequentially stepping through SmallNORB, viewing each image and
its label.
Intended as a demonstration of how to iterate through NORB images,
and as a way of testing SmallNORB's StereoViewConverter.
If you just want an image viewer, consider
pylearn2/scripts/show_binocular_grayscale_images.py,
which is not specific to SmallNORB.
"""
__author__ = "Matthew Koichi Grimes"
__copyright__ = "Copyright 2010-2014, Universite de Montreal"
__credits__ = __author__
__license__ = "3-clause BSD"
__maintainer__ = __author__
__email__ = "mkg alum mit edu (@..)"
import argparse, pickle, sys
from matplotlib import pyplot
from pylearn2.datasets.norb import SmallNORB
from pylearn2.utils import safe_zip
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Step-through visualizer for SmallNORB dataset")
parser.add_argument("--which_set",
default='train',
required=True,
help=("'train', 'test', or the path to a "
"SmallNORB .pkl file"))
return parser.parse_args()
def load_norb(args):
if args.which_set in ('test', 'train'):
return SmallNORB(args.which_set, True)
else:
norb_file = open(args.which_set)
return pickle.load(norb_file)
args = parse_args()
norb = load_norb(args)
topo_space = norb.view_converter.topo_space # does not include label space
vec_space = norb.get_data_specs()[0].components[0]
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.suptitle("Press space to step through, or 'q' to quit.")
def draw_and_increment(iterator):
"""
Draws the image pair currently pointed at by the iterator,
then increments the iterator.
"""
def draw(batch_pair):
for axis, image_batch in safe_zip(axes, batch_pair):
assert image_batch.shape[0] == 1
grayscale_image = image_batch[0, :, :, 0]
axis.imshow(grayscale_image, cmap='gray')
figure.canvas.draw()
def get_values_and_increment(iterator):
try:
vec_stereo_pair, labels = norb_iter.next()
except StopIteration:
return (None, None)
topo_stereo_pair = vec_space.np_format_as(vec_stereo_pair,
topo_space)
return topo_stereo_pair, labels
batch_pair, labels = get_values_and_increment(norb_iter)
draw(batch_pair)
norb_iter = norb.iterator(mode='sequential',
batch_size=1,
data_specs=norb.get_data_specs())
def on_key_press(event):
if event.key == ' ':
draw_and_increment(norb_iter)
if event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
draw_and_increment(norb_iter)
pyplot.show()
if __name__ == "__main__":
main()
|
bsd-3-clause
|
akrherz/iem
|
htdocs/plotting/auto/scripts/p78.py
|
1
|
6214
|
"""Avg dew point at temperature."""
from collections import OrderedDict
import datetime
from pandas.io.sql import read_sql
from metpy.units import units
import metpy.calc as mcalc
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.exceptions import NoDataFound
MDICT = OrderedDict(
[
("all", "No Month/Time Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["cache"] = 86400
desc[
"description"
] = """This plot displays the average dew point at
a given air temperature along with the envelope between the 5th and 95th
percentile. The average dew point is computed by taking the
observations of mixing ratio, averaging those, and then back computing
the dew point temperature. With that averaged dew point temperature a
relative humidity value is computed."""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="DSM",
label="Select Station:",
network="IA_ASOS",
),
dict(
type="select",
name="month",
default="all",
label="Month Limiter",
options=MDICT,
),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("asos")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["zstation"]
month = ctx["month"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month, 999]
df = read_sql(
"""
SELECT tmpf::int as tmpf, dwpf, relh,
coalesce(mslp, alti * 33.8639, 1013.25) as slp
from alldata where station = %s
and drct is not null and dwpf is not null and dwpf <= tmpf
and relh is not null
and extract(month from valid) in %s
and report_type = 2
""",
pgconn,
params=(station, tuple(months)),
)
if df.empty:
raise NoDataFound("No Data Found.")
# Cull any low ob count data points
counts = df.groupby("tmpf").count()
drops = []
for tmpf, row in counts.iterrows():
if row["slp"] < 6:
drops.append(tmpf)
# Convert sea level pressure to station pressure
df["pressure"] = mcalc.add_height_to_pressure(
df["slp"].values * units("millibars"),
ctx["_nt"].sts[station]["elevation"] * units("m"),
).to(units("millibar"))
# compute mixing ratio
df["mixingratio"] = mcalc.mixing_ratio_from_relative_humidity(
df["pressure"].values * units("millibars"),
df["tmpf"].values * units("degF"),
df["relh"].values * units("percent"),
)
# compute pressure
df["vapor_pressure"] = mcalc.vapor_pressure(
df["pressure"].values * units("millibars"),
df["mixingratio"].values * units("kg/kg"),
).to(units("kPa"))
qtiles = df.groupby("tmpf").quantile([0.05, 0.25, 0.5, 0.75, 0.95]).copy()
qtiles = qtiles.reset_index()
# Remove low counts
qtiles = qtiles[~qtiles["tmpf"].isin(drops)]
# compute dewpoint now
qtiles["dwpf"] = (
mcalc.dewpoint(qtiles["vapor_pressure"].values * units("kPa"))
.to(units("degF"))
.m
)
# compute RH again
qtiles["relh"] = (
mcalc.relative_humidity_from_dewpoint(
qtiles["tmpf"].values * units("degF"),
qtiles["dwpf"].values * units("degF"),
)
* 100.0
)
(fig, ax) = plt.subplots(1, 1, figsize=(8, 6))
means = qtiles[qtiles["level_1"] == 0.5]
for l0, l1, color in zip(
[0.05, 0.25], [0.95, 0.75], ["lightgreen", "violet"]
):
ax.fill_between(
qtiles[qtiles["level_1"] == l0]["tmpf"].values,
qtiles[qtiles["level_1"] == l0]["dwpf"].values,
qtiles[qtiles["level_1"] == l1]["dwpf"].values,
color=color,
label="%.0f-%.0f %%tile" % (l0 * 100, l1 * 100),
)
ax.plot(
means["tmpf"].values, means["dwpf"].values, c="blue", lw=3, label="Avg"
)
ax.grid(True, zorder=11)
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata.")
ax.set_title(
(
"%s [%s]\nDew Point Distribution by Air Temperature (month=%s) "
"(%s-%s), n=%.0f\n"
"(must have 6+ hourly observations at the given temperature)"
)
% (
ctx["_nt"].sts[station]["name"],
station,
month.upper(),
ab.year,
datetime.datetime.now().year,
len(df.index),
),
size=10,
)
xmin, xmax = means["tmpf"].min() - 2, means["tmpf"].max() + 2
ax.plot([xmin, xmax], [xmin, xmax], color="tan", lw=1.5)
ax.legend(loc=4, ncol=3)
ax.set_ylabel("Dew Point [F]")
y2 = ax.twinx()
y2.plot(means["tmpf"].values, means["relh"].values, color="k")
y2.set_ylabel("Relative Humidity [%] (black line)")
y2.set_yticks([0, 5, 10, 25, 50, 75, 90, 95, 100])
y2.set_ylim(0, 100)
ax.set_ylim(xmin, xmax)
ax.set_xlim(xmin, xmax)
ax.set_xlabel(r"Air Temperature $^\circ$F")
return fig, means[["tmpf", "dwpf", "relh"]]
if __name__ == "__main__":
plotter(dict(month="nov"))
|
mit
|
mrshu/scikit-learn
|
sklearn/datasets/base.py
|
2
|
17317
|
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <[email protected]>
# 2010 Fabian Pedregosa <[email protected]>
# 2010 Olivier Grisel <[email protected]>
# License: Simplified BSD
import os
import csv
import shutil
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, charset=None,
charse_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used has supervised signal label names. The indivial
file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use utf-8 text files in a scikit-learn classification or clustering
algorithm you will first need to use the `sklearn.features.text`
module to build a feature extraction transformer that suits your
problem.
Similar feature extractors should be build for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
charset : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, charset to use to decode text files if load_content is
True.
charset_error: {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `charset`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename).read() for filename in filenames]
if charset is not None:
data = [d.decode(charset, charse_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data', 'iris.csv')))
fdescr = open(join(module_path, 'descr', 'iris.rst'))
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr.read(),
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print digits.data.shape
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
descr = open(join(module_path, 'descr', 'digits.rst')).read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
'target_names', the meaning of the labels, and 'DESCR', the
full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print boston.data.shape
(506, 13)
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data',
'boston_house_prices.csv')))
fdescr = open(join(module_path, 'descr', 'boston_house_prices.rst'))
temp = data_file.next()
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = data_file.next() # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=fdescr.read())
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
|
bsd-3-clause
|
myyc/cruijff
|
cruijff/espn/dbutils.py
|
1
|
6400
|
import getpass
import logging as log
import pandas as pd
from sqlalchemy import *
from cruijff.constants import YEAR
from .getters import get_comps, get_clubs, get_games
def _get_engine():
u = "mysql+mysqldb://{user}@localhost/espn?charset=utf8"
return create_engine(u.format(user=getpass.getuser()))
def _get_md():
return MetaData(_get_engine())
def eq(q):
return pd.read_sql(q, _get_engine())
def connect():
return _get_engine().begin()
def any_table(t, action="get"):
e = _get_engine()
m = MetaData(e)
name = t.name
if action == "drop":
if e.has_table(name):
Table(name, m).drop()
else:
raise KeyError("Table '{}' not found".format(name))
if action == "create":
t.create(e)
if action == "get" or action == "create":
if e.has_table(name):
return Table(name, m)
else:
raise KeyError("Table '{}' not found".format(name))
def comps_table(action="get"):
m = _get_md()
t = Table("comps", m,
Column("id", Integer, primary_key=True,
nullable=False, autoincrement=False),
Column("tid", Text),
Column("name", Text),
Column("league", Boolean),
Column("href", Text),
) if action == "create" else Table("comps", m)
if action == "fill":
with _get_engine().begin() as conn:
conn.execute("delete from espn.comps")
leagues = get_comps(fmt="sql")["data"]
cups = get_comps("cups", fmt="sql")["data"]
q = "insert into espn.comps values (%s, %s, %s, %s, %s)"
for r in leagues + cups:
try:
conn.execute(q, r)
except Exception as e:
log.warning(e)
pass
else:
return any_table(t, action)
def clubs_table(action="get", league=None, year=YEAR):
m = _get_md()
t = Table("clubs", m,
Column("id", Integer, primary_key=True,
nullable=False, autoincrement=False),
Column("tid", Text),
Column("name", Text),
Column("href", Text),
) if action == "create" else Table("clubs", m)
t2 = Table("comps_year", m,
Column("id", Integer, nullable=False, autoincrement=False),
Column("lid", Integer, nullable=False),
Column("year", Integer, nullable=False)
)
if action == "fill":
if league is None:
raise ValueError("'league' can not be empty for 'fill'.")
with _get_engine().begin() as conn:
d = get_clubs(fmt="sql", year=year, league=league)
q = "insert into espn.clubs values (%s, %s, %s, %s)"
q2 = "insert into espn.comps_year values (%s, %s, %s)"
for r in d["data"]:
try:
conn.execute(q2, *(r[0], league, year))
conn.execute(q, r)
except Exception as e:
log.warning(e)
pass
else:
return any_table(t, action)
def games_table(action="get", lid=None, cid=None, year=YEAR):
m = _get_md()
t = Table("games", m,
Column("id", Integer, primary_key=True,
nullable=False, autoincrement=False),
Column("time", DateTime),
Column("status", Text(length=16)),
Column("year", Integer),
Column("comp_id", Integer),
Column("comp_tid", Text),
Column("comp_name", Text),
Column("home_id", Integer),
Column("home_name", Text),
Column("away_id", Integer),
Column("away_name", Text),
Column("home_score", Integer),
Column("away_score", Integer),
Column("home_score_pens", Integer),
Column("away_score_pens", Integer),
) if action == "create" else Table("games", m)
if action == "fill":
if lid is None and cid is None:
raise ValueError("I need at least 'lid' or 'cid'.")
with _get_engine().begin() as conn:
if cid is None:
ids = cids(lid=lid, year=year)
if len(ids) == 0:
raise ValueError("No clubs found for year {}. "
"Fill the table maybe?".format(year))
elif lid is None:
ids = [cid]
lid = 0
for cid in ids:
conn.execute("delete from espn.games where (away_id = %s "
"or home_id = %s) and comp_id = %s "
"and status is null and year = %s", cid, cid,
lid, year)
d = get_games(fmt="sql", year=year, cid=cid, lid=lid)
q = ("insert into espn.games values (%s, %s, %s, %s, %s, %s,"
" %s, %s, %s, %s, %s, %s, %s, %s, %s)")
for r in d["data"]:
try:
conn.execute(q, r)
except Exception as e:
log.warning(e)
pass
if year != YEAR:
conn.execute("delete from espn.games where year = %s and "
"status is null", year)
else:
return any_table(t, action)
def gids(lid, cid=None, year=YEAR):
with _get_engine().begin() as conn:
if cid is None:
s = text("select id from espn.games "
"where comp_id = :c and status is not null "
"and year = :y")
return {i[0] for i in conn.execute(s, c=lid, y=year).fetchall()}
else:
s = text("select id from espn.games "
"where (away_id = :p or home_id = :p) and comp_id = :c "
"and status is not null and year = :y")
return {i[0] for i in conn.execute(s, p=cid, c=lid,
y=year).fetchall()}
def cids(lid, year=YEAR):
with _get_engine().begin() as conn:
s = text("select id from espn.comps_year "
"where year = :y and lid = :c")
return {i[0] for i in conn.execute(s, c=lid, y=year).fetchall()}
|
bsd-3-clause
|
fitzgen/servo
|
tests/heartbeats/process_logs.py
|
139
|
16143
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
from os import path
import sys
import warnings
HB_LOG_IDX_START_TIME = 7
HB_LOG_IDX_END_TIME = HB_LOG_IDX_START_TIME + 1
HB_LOG_IDX_START_ENERGY = 14
HB_LOG_IDX_END_ENERGY = HB_LOG_IDX_START_ENERGY + 1
ENERGY_PROFILER_NAME = 'ApplicationHeartbeat'
SUMMARY_OUTPUT = "summary.txt"
SUMMARY_TIME_IDX = 8
SUMMARY_ENERGY_IDX = SUMMARY_TIME_IDX + 1
SUMMARY_POWER_IDX = SUMMARY_ENERGY_IDX + 1
def autolabel(rects, ax):
"""Attach some text labels.
"""
for rect in rects:
ax.text(rect.get_x() + rect.get_width() / 2., 1.05 * rect.get_height(), '', ha='center', va='bottom')
def plot_raw_totals(config, plot_data, max_time, max_time_std, max_energy, max_energy_std, output_dir, normalize):
"""Plot the raw totals for a configuration.
Keyword arguments:
config -- configuration name
plot_data -- (profiler name, total_time, total_time_std, total_energy, total_energy_std)
max_time, max_time_std, max_energy, max_energy_std -- single values
normalize -- True/False
"""
plot_data = sorted(plot_data)
keys = [p for (p, tt, tts, te, tes) in plot_data]
total_times = [tt for (p, tt, tts, te, tes) in plot_data]
total_times_std = [tts for (p, tt, tts, te, tes) in plot_data]
total_energies = [te for (p, tt, tts, te, tes) in plot_data]
total_energies_std = [tes for (p, tt, tts, te, tes) in plot_data]
fig, ax1 = plt.subplots()
ind = np.arange(len(keys)) # the x locations for the groups
width = 0.35 # the width of the bars
# add some text for labels, title and axes ticks
ax1.set_title('Time/Energy Data for Configuration ' + config)
ax1.set_xticks(ind + width)
ax1.set_xticklabels(keys, rotation=45)
fig.set_tight_layout(True)
fig.set_size_inches(len(plot_data) / 1.5, 8)
ax2 = ax1.twinx()
# Normalize
if normalize:
total_times_std /= np.sum(total_times)
total_times /= np.sum(total_times)
total_energies_std /= np.sum(total_energies)
total_energies /= np.sum(total_energies)
ax1.set_ylabel('Time (Normalized)')
ax2.set_ylabel('Energy (Normalized)')
else:
# set time in us instead of ns
total_times_std /= np.array(1000000.0)
total_times /= np.array(1000000.0)
total_energies_std /= np.array(1000000.0)
total_energies /= np.array(1000000.0)
ax1.set_ylabel('Time (ms)')
ax2.set_ylabel('Energy (Joules)')
rects1 = ax1.bar(ind, total_times, width, color='r', yerr=total_times_std)
rects2 = ax2.bar(ind + width, total_energies, width, color='y', yerr=total_energies_std)
ax1.legend([rects1[0], rects2[0]], ['Time', 'Energy'])
# set axis
x1, x2, y1, y2 = plt.axis()
if normalize:
ax1.set_ylim(ymin=0, ymax=1)
ax2.set_ylim(ymin=0, ymax=1)
else:
ax1.set_ylim(ymin=0, ymax=((max_time + max_time_std) * 1.25 / 1000000.0))
ax2.set_ylim(ymin=0, ymax=((max_energy + max_energy_std) * 1.25 / 1000000.0))
autolabel(rects1, ax1)
autolabel(rects2, ax2)
# plt.show()
plt.savefig(path.join(output_dir, config + ".png"))
plt.close(fig)
def create_raw_total_data(config_data):
"""Get the raw data to plot for a configuration
Return: [(profiler, time_mean, time_stddev, energy_mean, energy_stddev)]
Keyword arguments:
config_data -- (trial, trial_data)
"""
# We can't assume that the same number of heartbeats are always issued across trials
# key: profiler name; value: list of timing sums for each trial
profiler_total_times = {}
# key: profiler name; value: list of energy sums for each trial
profiler_total_energies = {}
for (t, td) in config_data:
for (profiler, ts, te, es, ee) in td:
# sum the total times and energies for each profiler in this trial
total_time = np.sum(te - ts)
total_energy = np.sum(ee - es)
# add to list to be averaged later
time_list = profiler_total_times.get(profiler, [])
time_list.append(total_time)
profiler_total_times[profiler] = time_list
energy_list = profiler_total_energies.get(profiler, [])
energy_list.append(total_energy)
profiler_total_energies[profiler] = energy_list
# Get mean and stddev for time and energy totals
return [(profiler,
np.mean(profiler_total_times[profiler]),
np.std(profiler_total_times[profiler]),
np.mean(profiler_total_energies[profiler]),
np.std(profiler_total_energies[profiler]))
for profiler in profiler_total_times.keys()]
def plot_all_raw_totals(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
raw_total_norm_out_dir = path.join(output_dir, 'raw_totals_normalized')
os.makedirs(raw_total_norm_out_dir)
raw_total_out_dir = path.join(output_dir, 'raw_totals')
os.makedirs(raw_total_out_dir)
# (name, (profiler, (time_mean, time_stddev, energy_mean, energy_stddev)))
raw_totals_data = [(config, create_raw_total_data(config_data)) for (config, config_data) in config_list]
mean_times = []
mean_times_std = []
mean_energies = []
mean_energies_std = []
for profiler_tup in [config_tup[1] for config_tup in raw_totals_data]:
for (p, tt, tts, te, tes) in profiler_tup:
mean_times.append(tt)
mean_times_std.append(tts)
mean_energies.append(te)
mean_energies_std.append(tes)
# get consistent max time/energy values across plots
max_t = np.max(mean_times)
max_t_std = np.max(mean_times_std)
max_e = np.max(mean_energies)
max_e_std = np.max(mean_energies_std)
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_norm_out_dir, True)
for data in raw_totals_data]
[plot_raw_totals(data[0], data[1], max_t, max_t_std, max_e, max_e_std, raw_total_out_dir, False)
for data in raw_totals_data]
def plot_trial_time_series(config, trial, trial_data, max_end_time, max_power, output_dir):
"""Plot time series for a single trial.
Keyword arguments:
config -- the config name
trial -- the trial name
trial_data -- [(profiler, [start times], [end times], [start energies], [end energies])]
max_end_time -- single value to use as max X axis value (for consistency across trials)
output_dir -- the output directory
"""
# TODO: Some profilers may have parallel tasks - need to identify this on plots
max_end_time = max_end_time / 1000000.0
trial_data = sorted(trial_data)
fig, ax1 = plt.subplots()
keys = [p for (p, ts, te, es, ee) in trial_data]
# add some text for labels, title and axes ticks
ax1.set_title('Profiler Activity for ' + config + ', ' + trial)
ax1.set_xlabel('Time (ms)')
ax1.grid(True)
width = 8 # the width of the bars
ax1.set_yticks(10 * np.arange(1, len(keys) + 2))
ax1.set_yticklabels(keys)
ax1.set_ylim(ymin=0, ymax=((len(trial_data) + 1) * 10))
ax1.set_xlim(xmin=0, xmax=max_end_time)
fig.set_tight_layout(True)
fig.set_size_inches(16, len(trial_data) / 3)
i = 10
for (p, ts, te, es, ee) in trial_data:
xranges = [(ts[j] / 1000000.0, (te[j] - ts[j]) / 1000000.0) for j in xrange(len(ts))]
ax1.broken_barh(xranges, (i - 0.5 * width, width))
i += 10
# place a vbar at the final time for this trial
last_profiler_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in trial_data]))
plt.axvline(np.max(last_profiler_times) / 1000000.0, color='black')
power_times = []
power_values = []
for (p, ts, te, es, ee) in trial_data:
if p == ENERGY_PROFILER_NAME:
power_times = te / 1000000.0
power_values = (ee - es) / ((te - ts) / 1000.0)
ax2 = ax1.twinx()
ax2.set_xlim(xmin=0, xmax=max_end_time)
ax2.set_ylim(ymin=0, ymax=max_power)
ax2.set_ylabel('Power (Watts)')
ax2.plot(power_times, power_values, color='r')
# plt.show()
plt.savefig(path.join(output_dir, "ts_" + config + "_" + trial + ".png"))
plt.close(fig)
def hb_energy_times_to_power(es, ee, ts, te):
"""Compute power from start and end energy and times.
Return: power values
"""
return (ee - es) / ((te - ts) / 1000.0)
def plot_all_time_series(config_list, output_dir):
"""Plot column charts of the raw total time/energy spent in each profiler category.
Keyword arguments:
config_list -- [(config, result of process_config_dir(...))]
output_dir -- where to write plots to
"""
time_series_out_dir = path.join(output_dir, 'time_series')
os.makedirs(time_series_out_dir)
max_end_times = []
max_power_values = []
for (c, cd) in config_list:
for (t, td) in cd:
trial_max_end_times = map(np.nanmax, filter(lambda x: len(x) > 0, [te for (p, ts, te, es, ee) in td]))
max_end_times.append(np.nanmax(trial_max_end_times))
for (p, ts, te, es, ee) in td:
# We only care about the energy profiler (others aren't reliable for instant power anyway)
if p == ENERGY_PROFILER_NAME and len(te) > 0:
max_power_values.append(np.nanmax(hb_energy_times_to_power(es, ee, ts, te)))
max_time = np.nanmax(max_end_times)
max_power = np.nanmax(np.array(max_power_values)) * 1.2 # leave a little space at the top
for (config, config_data) in config_list:
[plot_trial_time_series(config, trial, trial_data, max_time, max_power, time_series_out_dir)
for (trial, trial_data) in config_data]
def read_heartbeat_log(profiler_hb_log):
"""Read a heartbeat log file.
Return: (profiler name, [start times], [end times], [start energies], [end energies], [instant powers])
Keyword arguments:
profiler_hb_log -- the file to read
"""
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
time_start, time_end, energy_start, energy_end = \
np.loadtxt(profiler_hb_log,
dtype=np.dtype('uint64'),
skiprows=1,
usecols=(HB_LOG_IDX_START_TIME,
HB_LOG_IDX_END_TIME,
HB_LOG_IDX_START_ENERGY,
HB_LOG_IDX_END_ENERGY),
unpack=True,
ndmin=1)
except ValueError:
time_start, time_end, energy_start, energy_end = [], [], [], []
name = path.split(profiler_hb_log)[1].split('-')[1].split('.')[0]
return (name,
np.atleast_1d(time_start),
np.atleast_1d(time_end),
np.atleast_1d(energy_start),
np.atleast_1d(energy_end))
def process_trial_dir(trial_dir):
"""Process trial directory.
Return: [(profiler name, [start times], [end times], [start energies], [end energies])]
Time and energy are normalized to 0 start values.
Keyword arguments:
trial_dir -- the directory for this trial
"""
log_data = map(lambda h: read_heartbeat_log(path.join(trial_dir, h)),
filter(lambda f: f.endswith(".log"), os.listdir(trial_dir)))
# Find the earliest timestamps and energy readings
min_t = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [ts for (profiler, ts, te, es, ee) in log_data])))
min_e = np.nanmin(map(np.nanmin, filter(lambda x: len(x) > 0, [es for (profiler, ts, te, es, ee) in log_data])))
# Normalize timing/energy data to start values of 0
return [(profiler, ts - min_t, te - min_t, es - min_e, ee - min_e) for (profiler, ts, te, es, ee) in log_data]
def process_config_dir(config_dir):
"""Process a configuration directory.
Return: [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])]
Keyword arguments:
config_dir -- the directory for this configuration - contains subdirectories for each trial
"""
return [(trial_dir, process_trial_dir(path.join(config_dir, trial_dir))) for trial_dir in os.listdir(config_dir)]
def process_logs(log_dir):
"""Process log directory.
Return: [(config, [(trial, [(profiler name, [start times], [end times], [start energies], [end energies])])])]
Keyword arguments:
log_dir -- the log directory to process - contains subdirectories for each configuration
"""
return [((config_dir.split('_')[1], process_config_dir(path.join(log_dir, config_dir))))
for config_dir in os.listdir(log_dir)]
def find_best_executions(log_dir):
"""Get the best time, energy, and power from the characterization summaries.
Return: ((config, trial, min_time), (config, trial, min_energy), (config, trial, min_power))
Keyword arguments:
results -- the results from process_logs(...).
"""
DEFAULT = ('', '', 1000000000.0)
min_time = DEFAULT
min_energy = DEFAULT
min_power = DEFAULT
for config_dir in os.listdir(log_dir):
for trial_dir in os.listdir(path.join(log_dir, config_dir)):
with open(path.join(log_dir, config_dir, trial_dir, SUMMARY_OUTPUT), "r") as s:
lines = s.readlines()
time = float(lines[SUMMARY_TIME_IDX].split(':')[1])
energy = int(lines[SUMMARY_ENERGY_IDX].split(':')[1])
power = float(lines[SUMMARY_POWER_IDX].split(':')[1])
if time < min_time[2]:
min_time = (config_dir, trial_dir, time)
if energy < min_energy[2]:
min_energy = (config_dir, trial_dir, energy)
if power < min_power:
min_power = (config_dir, trial_dir, power)
return (min_time, min_energy, min_power)
def main():
"""This script processes the log files from the "characterize.py" script and produces visualizations.
"""
# Default log directory
directory = 'heartbeat_logs'
# Default output directory
output_dir = 'plots'
# Default android
android = False
# Parsing the input of the script
parser = argparse.ArgumentParser(description="Process Heartbeat log files from characterization")
parser.add_argument("-d", "--directory",
default=directory,
help="Heartbeat log directory \"-d heartbeat_logs\"")
parser.add_argument("-o", "--output",
default=output_dir,
help="Specify the log output directory, for example \"-o plots\"")
parser.add_argument("--android",
action="store_true",
dest="android",
default=False,
help="Specify if processing results from Android")
args = parser.parse_args()
if args.directory:
directory = args.directory
if args.output:
output_dir = args.output
if args.android:
android = args.android
if not os.path.exists(directory):
print "Input directory does not exist: " + directory
sys.exit(1)
if os.path.exists(output_dir):
print "Output directory already exists: " + output_dir
sys.exit(1)
res = process_logs(directory)
if not android:
best = find_best_executions(directory)
print 'Best time:', best[0]
print 'Best energy:', best[1]
print 'Best power:', best[2]
os.makedirs(output_dir)
plot_all_raw_totals(res, output_dir)
plot_all_time_series(res, output_dir)
if __name__ == "__main__":
main()
|
mpl-2.0
|
mbaumBielefeld/popkin
|
examples/example_mmc_network.py
|
1
|
7580
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
from popkin.model.mmc_network import MMCNetwork
from popkin.visualization.placefieldvisualizer import PlaceFieldVisualizer
from popkin.visualization.armestimationvisualizer import ArmEstimationVisualizer
from popkin.utility.misc import include_overwrite
from popkin.utility.data_generator import TrainingDataGenerator
import numpy as np
import pickle
import networkx as nx
import matplotlib.pyplot as plt
import argparse
import datetime
import random
parser = argparse.ArgumentParser(description='Plot Errors under varying parameters')
parser.add_argument('-configuration', action='store', dest='configuration')
parser.add_argument('-load', action='store', dest='load_filename')
parser.add_argument('-overwrite', action='store', dest='overwrite')
parser.add_argument('-noviz', action='store_true', dest='no_visualization')
parser.add_argument('-learn_inverse', action='store', type=int, dest='learn_inverse')
parser.add_argument('-learn_forward', action='store', type=int, dest='learn_forward')
parser.add_argument('-output_directory', action='store', dest='output_directory')
parser.add_argument("-v","--verbose", help="increase output verbosity",
action="store_true", dest='verbose')
args=parser.parse_args()
#Take the space divided list in args.overwrite.
#every 1st element is a configuration name
#every 2nd element is a variable name
#every 3rd element is a variable value
if hasattr(args,'configuration') and args.configuration != None:
splitted=[]
if hasattr(args,'overwrite') and args.overwrite != None:
splitted=args.overwrite.split(' ')
ow_conf_names=splitted[0::4]
ow_conf_variables=splitted[1::4]
ow_conf_types=splitted[2::4]
ow_conf_values=splitted[3::4]
overwrite=zip(ow_conf_names, ow_conf_variables, ow_conf_types, ow_conf_values)
#configuration_filename='configurations/2seg_stripe.pkl'
c=pickle.load(open(args.configuration))
include_overwrite('main',c,overwrite)
configname=args.configuration.split('/')[-1].split('.')[0]
mmc=MMCNetwork()
mmc.construct_from_configuration(configname,c,overwrite)
mmc.initialize()
else:
mmc=pickle.load(open(args.load_filename))
c=mmc.configuration
def set_evidence(representation, position):
mmc.evidences[representation]=position
mmc.stimulate()
pfv.refreshPlot()
aev.refreshPlot()
def simulate_one():
mmc.simulate(1)
pfv.refreshPlot()
aev.refreshPlot()
print "---End-Effector---"
seg_ends=[[0.0,0.0]]
#Segmentenden schätzen lokal
for i_segment,name in enumerate(mmc.segment_names):
segment=mmc.representations[name]
seg_ends.append(segment.estimate())
seg_ends=np.array(seg_ends)
#Segmentenden global berechnen
for i in range(mmc.n_segments-1,-1,-1):
print i
seg_ends[i+1:,:]+=seg_ends[i,:]
target=mmc.evidences[mmc.end_effector]
estima=seg_ends[-1]
differ=np.sum((target-estima)**2.0)**0.5
grid_res=mmc.end_effector.estimation_grid_resolution
ee_radius=mmc.end_effector.radius_max
lower=ee_radius*2.0/float(grid_res)*0.5
print "Target:",target
print "Estima:",estima
print "Differ:",differ
print "Lower:",lower
def simulate_ten():
mmc.simulate(10)
pfv.refreshPlot()
aev.refreshPlot()
def simulate_hundred():
mmc.simulate(100)
pfv.refreshPlot()
aev.refreshPlot()
def clear_evidences():
mmc.evidences={}
def randomize_activations():
mmc.initialize()
def learn_forward_1():
for triangle_name in c['triangular_relations'].keys():
n_samples=100
n_epochs=1
transformation_specifier=triangle_name,'sum'
mmc.learn(transformation_specifier, n_samples, n_epochs)
#pfv.refreshPlot()
#aev.refreshPlot()
def showTrainingDataRandom():
data_generator=TrainingDataGenerator(mmc.representations['m0'],mmc.representations['s2'],mmc.representations['l0'])
s0_activations,s1_activations,m0_activations=data_generator.activation_data_with_repair_data(50, 'sum')
i_random=random.randint(0,50-1)
mmc.representations['m0'].activations[:]=s0_activations[i_random,:]
mmc.representations['s2'].activations[:]=s1_activations[i_random,:]
mmc.representations['l0'].activations[:]=m0_activations[i_random,:]
pfv.refreshPlot()
aev.refreshPlot()
def learn_forward_100():
for i in range(100):
if i%10==0: print i
learn_forward_1()
def learn_forward_1000():
for i in range(1000):
if i%100==0: print i
learn_forward_1()
def learn_forward_10000():
for i in range(10000):
if i%100==0: print i
learn_forward_1()
def learn_dynamically_1():
for triangle_name in c['triangular_relations'].keys():
n_timesteps=c['simdepth']
n_other_ideal_poses=3
mmc.learn_dynamically(triangle_name, n_other_ideal_poses, n_timesteps)
def learn_dynamically_100():
for i in range(100):
if i%10==0: print i
learn_dynamically_1()
def learn_dynamically_1000():
for i in range(1000):
if i%100==0: print i
learn_dynamically_1()
def learn_dynamically_10000():
for i in range(10000):
if i%100==0: print i
learn_dynamically_1()
def write_to_file():
output_dirname=args.output_directory
datestring=datetime.datetime.today().isoformat('_')
if args.load_filename:
specifier=args.load_filename.split('/')[-1].split('2015')[0]
else:
config_name=mmc.configname
datestring=datetime.datetime.today().isoformat('_')
overwrite_string=""
for ow in overwrite:
overwrite_string+=ow[0]+"-"+ow[1]+"-"+ow[3]
specifier=config_name+'_'+overwrite_string+'_'
if not output_dirname[-1]=='/':output_dirname+='/'
output_filename=output_dirname+specifier+datestring+'.pkl'
print "WRITE to file: "+output_filename
with open(output_filename, 'w') as f:
pickle.dump(mmc, f, 0)
if hasattr(args, 'learn_inverse') and args.learn_inverse:
print "-Learning Inverse for epochs:",args.learn_inverse
for i in range(args.learn_inverse):
if i%100==0:
print i,args.overwrite
learn_dynamically_1()
if hasattr(args, 'learn_forward') and args.learn_forward:
print "-Learning Forward for epochs:",args.learn_forward
for i in range(args.learn_forward):
learn_forward_1()
if hasattr(args, 'output_directory') and args.output_directory and (args.learn_forward or args.learn_inverse):
write_to_file()
if not args.no_visualization:
pfv=PlaceFieldVisualizer(mmc.representations.values())
pfv.addCallbackClick(1,set_evidence)
pfv.addCallback('b',simulate_hundred)
pfv.addCallback('n',simulate_ten)
pfv.addCallback('m',simulate_one)
pfv.addCallback('r',randomize_activations)
pfv.addCallback('c',clear_evidences)
pfv.addCallback('l',learn_dynamically_1)
pfv.addCallback('j',learn_dynamically_100)
pfv.addCallback('h',learn_dynamically_1000)
pfv.addCallback('g',learn_dynamically_10000)
pfv.addCallback('p',learn_forward_1)
pfv.addCallback('i',learn_forward_100)
pfv.addCallback('u',learn_forward_1000)
pfv.addCallback('z',learn_forward_10000)
pfv.addCallback('w',write_to_file)
pfv.addCallback('x',showTrainingDataRandom)
pfv.refreshPlot()
aev=ArmEstimationVisualizer(mmc)
aev.refreshPlot()
#plt.figure()
#nx.draw(mmc.graph, with_labels=True)
plt.show()
|
gpl-2.0
|
smorad/ast119
|
hw5.py
|
1
|
2857
|
from numpy import *
from matplotlib.pyplot import *
import scipy.constants as sc
import copy
import scipy.integrate as integ
# test sun/earth with hw5(1.989e30,5.972e24,149.6e6,0.0167,1000)
def hw5(m1, m2, a, e, tmax, tstep=0.001, tplot=0.025, method='leapfrog'):
if method != 'leapfrog' and method != 'odeint':
print("That's not a method")
return()
# initialize commonly used variables
period = sqrt((4*(pi**2)*(a**3)) / (sc.G*(m1 + m2)))
dt = period*tstep
# initialize objects at time 0
q = m1 / m2
r0 = (1-e)*a/(1+q)
v0 = (1/(1+q))*sqrt((1+e)/(1-e))*sqrt(sc.G*(m1+m2)/a)
rv = array([r0, 0, 0, v0, -q*r0, 0, 0, -q*v0])
# set up figure
figure(1)
gca().set_aspect('equal')
xlim([-2*a, 2*a])
ylim([-2*a, 2*a])
rv_list = []
if method == 'leapfrog':
timeCounter = 0
frameCounter = 0
while timeCounter < tmax:
# plot positions if tplot time has passed
if frameCounter >= tplot:
frameCounter = 0
rv_list.append(copy.deepcopy(rv))
# calc positions
rv[0] = rv[0] + rv[2]*dt
rv[1] = rv[1] + rv[3]*dt
rv[4] = rv[4] + rv[6]*dt
rv[5] = rv[5] + rv[7]*dt
# calc acceleration
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
# calc velocity
rv[2] = rv[2] - (force[0]/m1)*dt
rv[3] = rv[3] - (force[1]/m1)*dt
rv[6] = rv[6] + (force[0]/m2)*dt
rv[7] = rv[7] + (force[1]/m2)*dt
# increment counters
timeCounter += tstep
frameCounter += tstep
# plot final position
rv_list.append(copy.deepcopy(rv))
rv_list_plot = rv_list
else:
# odeint
rv_list = integ.odeint(deriv, rv, arange(0, tmax*period, dt), (m1, m2))
# needed to calculate using tstep, but we want to plot
# using tplot,
t_interval = tplot / tstep
rv_list_plot = rv_list[::t_interval]
# plot
for i in range(len(rv_list_plot)):
plot(rv_list_plot[i][0],rv_list_plot[i][1],'bo')
plot(rv_list_plot[i][4],rv_list_plot[i][5],'go')
draw()
def deriv(rv, dt, m1, m2):
# calc position deriv
rv_copy = zeros(8)
rv_copy[0] = rv[2]
rv_copy[1] = rv[3]
rv_copy[4] = rv[6]
rv_copy[5] = rv[7]
# calc velocity deriv
r = array([rv[0] - rv[4], rv[1] - rv[5]])
force = ((sc.G*m1*m2)/(np.linalg.norm(r)**2))*(r/np.linalg.norm(r))
rv_copy[2] = - (force[0]/m1)
rv_copy[3] = - (force[1]/m1)
rv_copy[6] = + (force[0]/m2)
rv_copy[7] = + (force[1]/m2)
return rv_copy
|
gpl-2.0
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/share/doc/networkx-2.2/examples/drawing/plot_sampson.py
|
5
|
1451
|
#!/usr/bin/env python
"""
=======
Sampson
=======
Sampson's monastery data.
Shows how to read data from a zip file and plot multiple frames.
"""
# Author: Aric Hagberg ([email protected])
# Copyright (C) 2010-2018 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import zipfile
try:
from cStringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import matplotlib.pyplot as plt
import networkx as nx
zf = zipfile.ZipFile('sampson_data.zip') # zipfile object
e1 = StringIO(zf.read('samplike1.txt')) # read info file
e2 = StringIO(zf.read('samplike2.txt')) # read info file
e3 = StringIO(zf.read('samplike3.txt')) # read info file
G1 = nx.read_edgelist(e1, delimiter='\t')
G2 = nx.read_edgelist(e2, delimiter='\t')
G3 = nx.read_edgelist(e3, delimiter='\t')
pos = nx.spring_layout(G3, iterations=100)
plt.clf()
plt.subplot(221)
plt.title('samplike1')
nx.draw(G1, pos, node_size=50, with_labels=False)
plt.subplot(222)
plt.title('samplike2')
nx.draw(G2, pos, node_size=50, with_labels=False)
plt.subplot(223)
plt.title('samplike3')
nx.draw(G3, pos, node_size=50, with_labels=False)
plt.subplot(224)
plt.title('samplike1,2,3')
nx.draw(G3, pos, edgelist=list(G3.edges()), node_size=50, with_labels=False)
nx.draw_networkx_edges(G1, pos, alpha=0.25)
nx.draw_networkx_edges(G2, pos, alpha=0.25)
plt.show()
|
gpl-3.0
|
manulera/ModellingCourse
|
ReAct/Python/Example_Repressilator_time2stabilize.py
|
1
|
3825
|
import numpy as np
from Gilles import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# Initial conditions
user_input = ['TetR_site', 1,
'TetR_site_b', 0,
'TetR_mRNA', 10,
'TetR_Prot', 0,
'TetR_Prot2', 0,
'LacI_site', 0,
'LacI_site_b', 1,
'LacI_mRNA', 0,
'LacI_Prot', 0,
'LacI_Prot2', 0,
'Gammacl_site', 0,
'Gammacl_site_b', 1,
'Gammacl_mRNA', 0,
'Gammacl_Prot', 0,
'Gammacl_Prot2', 0,
'GFP_site', 0,
'GFP_site_b', 1,
'GFP_mRNA', 0,
'GFP_Prot', 0
]
# Reaction template ((stoch_1,reactant_1,stoch_2,reactant_2),(stoch_1,product_1,stoch_2,product_2),k)
k = (100, 50, 400, 20, 0.3, 0.1, 0.3, 0.1)
reactions = (
(-1,'TetR_site'), (1, 'TetR_mRNA'), k[0] , # mRNA transcription
(1,'TetR_mRNA'),(), k[1], # mRNA degradation
(-1, 'TetR_mRNA'), (1, 'TetR_Prot'), k[2], # Translation
(1, 'TetR_Prot'), (), k[3], # Protein degradation
(2, 'TetR_Prot'), (1, 'TetR_Prot2'), k[4],
(1, 'TetR_Prot2'), (2, 'TetR_Prot'), k[5],
(1, 'TetR_Prot2',1,'Gammacl_site'), (1,'Gammacl_site_b'), k[6], # Binding of the repressor
(1, 'Gammacl_site_b'), (1, 'TetR_Prot2',1,'Gammacl_site'), k[7], # Unbinding of the repressor
# ------------------------------------------------------------------------------------------------------------------
(-1, 'Gammacl_site'), (1, 'Gammacl_mRNA'), k[0], # mRNA transcription
(1, 'Gammacl_mRNA'), (), k[1], # mRNA degradation
(-1, 'Gammacl_mRNA'), (1, 'Gammacl_Prot'), k[2], # Translation
(1, 'Gammacl_Prot'), (), k[3], # Protein degradation
(2, 'Gammacl_Prot'), (1, 'Gammacl_Prot2'), k[4],
(1, 'Gammacl_Prot2'), (2, 'Gammacl_Prot'), k[5],
(1, 'Gammacl_Prot2', 1, 'LacI_site'), (1, 'LacI_site_b'), k[6], # Binding of the repressor
(1, 'LacI_site_b'), (1, 'Gammacl_Prot2', 1, 'LacI_site'), k[7], # Unbinding of the repressor
# ------------------------------------------------------------------------------------------------------------------
(-1, 'LacI_site'), (1, 'LacI_mRNA'), k[0], # mRNA transcription
(1, 'LacI_mRNA'), (), k[1], # mRNA degradation
(-1, 'LacI_mRNA'), (1, 'LacI_Prot'), k[2], # Translation
(1, 'LacI_Prot'), (), k[3], # Protein degradation
(2, 'LacI_Prot'), (1, 'LacI_Prot2'), k[4],
(1, 'LacI_Prot2'), (2, 'LacI_Prot'), k[5],
(1, 'LacI_Prot2', 1, 'TetR_site'), (1, 'TetR_site_b'), k[6], # Binding of the repressor
(1, 'TetR_site_b'), (1, 'LacI_Prot2', 1, 'TetR_site'), k[7], # Unbinding of the repressor
# ------------------------------------------------------------------------------------------------------------------
(-1, 'GFP_site'), (1, 'GFP_mRNA'), k[0]*3, # mRNA transcription
(1, 'GFP_mRNA'), (), k[1], # mRNA degradation
(-1, 'GFP_mRNA'), (1, 'GFP_Prot'), k[2], # Translation
(1, 'GFP_Prot'), (), k[3], # Protein degradation
(1, 'TetR_Prot2',1,'GFP_site'), (1,'GFP_site_b'), k[4], # Binding of the repressor
(1, 'GFP_site_b'), (1, 'TetR_Prot2',1,'GFP_site'), k[5], # Unbinding of the repressor
)
# dt is used for the deterministic calculation, and the
dt=1
t = np.arange(0, 2000, dt)
(solution,(tgill, valsgill, _, _),rows,mode)=ReAct(user_input,reactions,t,mode=1)
Gillesplot(solution,t,tgill, valsgill,rows,mode,which2plot=['TetR_Prot','Gammacl_Prot','LacI_Prot','GFP_Prot'])
#plt.plot(valsgill[0][0,:], valsgill[0][1,:], linewidth=2)
fig = plt.figure()
ax = fig.gca(projection='3d')
plt.plot(solution[:,rows['TetR_Prot']],solution[:,rows['Gammacl_Prot']],solution[:,rows['LacI_Prot']])
plt.show()
|
gpl-3.0
|
CISprague/Astro.IQ
|
src/JIT_Trial.py
|
1
|
4531
|
''' -----------------------
Landing using JIT and PyGMO
Numba JIT is rather
restrictive in defining
classes, so functions are
defined extrenally. We use
here the Hermite-Simpson
seperated transcription.
------------------------'''
import numba
from numpy import *
from PyGMO.problem import base
from scipy.integrate import odeint
import matplotlib.pyplot as plt
# NOTE: This didn't work, should just migrate finally migrate to C++
c1 = float64(44000)
c2 = float64(311.*9.81)
a = float64(0)
CD = float64(2.2)
rho = float64(1.271)
A = float64(0.01)
g0 = float64(9.81)
g = g0
sdim = 5
cdim = 2
nnodes = 100
nsegs = nnodes - 1
nlpdim = 1 + sdim + cdim + (sdim + cdim)*2*nsegs
condim = sdim*2 - 1 + sdim*2*nsegs
slb = array([-3000, 0, -200, -200, 10], float64)
sub = array([3000, 2000, 200, 200, 11000], float64)
tlb = float64(1)
tub = float64(200)
clb = array([0, 0], float64)
cub = array([1, 2*pi], float64)
nlplb = hstack(([tlb], slb, clb))
nlpub = hstack(([tub], sub, cub))
for i in range(nsegs):
nlplb = hstack((nlplb, slb, clb, slb, clb))
nlpub = hstack((nlpub, sub, cub, sub, cub))
@numba.jit(numba.float64[:](numba.float64[:], numba.float64[:]), nopython=True, cache=True, nogil=True)
def EOM_State(state, control):
x, y, vx, vy, m = state
u, theta = control
x0 = c1*u/m
x1 = abs(vx)**2
x2 = abs(vy)**2
x3 = A*CD*rho*(x1/2. + x2/2.)/sqrt(x1 + x2)
return array([
vx,
vy,
-vx*x3 + x0*sin(theta),
-g - vy*x3 + x0*cos(theta),
-c1*u/c2
], float64)
@numba.jit(
numba.types.Tuple((
numba.float64,
numba.float64[:,:],
numba.float64[:,:],
numba.float64[:,:],
numba.float64[:,:]
))(numba.float64[:]), cache=True, nogil=True)
def HSS_Decode(z):
tf = float64(z[0])
z = array(z[1:], float64)
b1 = zeros(sdim + cdim)
z = hstack((b1, z))
z = z.reshape((nnodes, (sdim + cdim)*2))
i, j = 0, sdim
sb = z[:,i:j]
i, j = j, j + cdim
cb = z[:,i:j]
i, j = j, j + sdim
s = z[:,i:j]
i, j = j, j + cdim
c = z[:,i:j]
return tf, sb, cb, s, c
@numba.jit(numba.float64[:](numba.float64[:], numba.float64[:], numba.float64[:]), cache=True, nogil=True)
def HSS_Defects(z, si, st):
tf, sb, cb, s, c = HSS_Decode(z)
h = tf/nnodes
# Boundary conditions
ceq = s[0] - si
ceq = hstack((ceq, s[-1,:-1] - st[:-1]))
# Dynamic Constraints
for k in range(nsegs):
f1 = EOM_State(s[k], c[k])
f2 = EOM_State(s[k+1], c[k+1])
fb2 = EOM_State(sb[k+1], cb[k+1])
# Hermite interpolation
ceq1 = sb[k+1] - 0.5*(s[k+1] + s[k]) - h/8.*(f1-f2)
# Simpson quadrature
ceq2 = s[k+1] - s[k] - h/6.*(f2 + 4*fb2 + f1)
ceq = hstack((ceq, ceq1, ceq2))
return ceq
@numba.jit(numba.float64[:](numba.float64), cache=True, nogil=True)
def Objective(z):
tf, sb, cb, s, c = HSS_Decode(z)
return -s[-1, -1]
@numba.jit(numba.float64[:](numba.float64[:], numba.float64))
def EOM(state, t):
control = array([0, 0], float64)
return EOM_State(state, control)
@numba.jit(numba.float64[:,:](numba.float64[:], numba.float64))
def Propagate(si, tf):
return odeint(EOM, si, linspace(0, tf, nnodes), rtol=1e-12, atol=1e-12)
@numba.jit(numba.float64[:](numba.float64[:,:], numba.float64), nogil=True, cache=True)
def Code_Ballistic(states, tf):
controls = zeros((nnodes, cdim))
z = hstack((states, controls, states, controls))
z = z.flatten()
z = z[sdim+cdim:]
z = hstack((tf, z))
return z
class HSS_Trajectory(base):
def __init__(
self,
si = array([0, 1000, 20, -5, 10000], float64),
st = array([0, 0, 0, 0, 9000], float64)
):
base.__init__(self, nlpdim, 0, 1, condim, 0, 1e-8)
self.set_bounds(nlplb, nlpub)
self.si = si
self.st = st
def _objfun_impl(self, z):
return (Objective(z),)
def _compute_constraints_impl(self, z):
si, st = self.si, self.st
return HSS_Defects(z, si, st)
if __name__ == "__main__":
s = array([0, 1000, 20, -5, 10000], float64)
st = array([0, 0, 0, 0, 9000], float64)
c = array([0, 0], float64)
tf = float64(20)
z = [tf] + list(s) + list(c) + (list(s) + list(c))*2*nsegs
z = array(z, float64)
s = Propagate(s, tf)
z = Code_Ballistic(s, tf)
tf, sb, cb, s, c = HSS_Decode(z)
plt.plot(s[:,0], s[:,1], 'k.-')
plt.show()
|
mit
|
CDSherrill/psi4
|
psi4/driver/qcdb/psivardefs.py
|
3
|
15690
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2019 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import collections
def sapt_psivars():
"""Returns dictionary of PsiVariable definitions.
"""
pv1 = collections.OrderedDict()
pv1['SAPT EXCHSCAL1'] = {'func': lambda x: 1.0 if x[0] < 1.0e-5 else x[0] / x[1], 'args': ['SAPT EXCH10 ENERGY', 'SAPT EXCH10(S^2) ENERGY']} # special treatment in pandas
pv1['SAPT EXCHSCAL3'] = {'func': lambda x: x[0] ** 3, 'args': ['SAPT EXCHSCAL1']}
pv1['SAPT EXCHSCAL'] = {'func': lambda x: x[0] ** x[1], 'args': ['SAPT EXCHSCAL1', 'SAPT ALPHA']}
pv1['SAPT HF(2) ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]),
'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY',
'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SAPT HF(2),U ALPHA=0.0 ENERGY'] = {'func': lambda x: x[0] - (x[1] + x[2] + x[3] + x[4]),
'args': ['SAPT HF TOTAL ENERGY', 'SAPT ELST10,R ENERGY', 'SAPT EXCH10 ENERGY',
'SAPT IND20,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SAPT HF(2) ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SAPT HF(2),U ENERGY'] = {'func': lambda x: x[1] + (1.0 - x[0]) * x[2],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2),U ALPHA=0.0 ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SAPT HF(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']}
pv1['SAPT MP2(2) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[3] + x[4] + x[0] * (x[5] + x[6] + x[7] + x[8])),
'args': ['SAPT EXCHSCAL', 'SAPT MP2 CORRELATION ENERGY', 'SAPT ELST12,R ENERGY', # MP2 CORRELATION ENERGY renamed here from pandas since this is IE # renamed again SA --> SAPT
'SAPT IND22 ENERGY', 'SAPT DISP20 ENERGY', 'SAPT EXCH11(S^2) ENERGY',
'SAPT EXCH12(S^2) ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT EXCH-DISP20 ENERGY']}
pv1['SAPT MP2(3) ENERGY'] = {'func': lambda x: x[1] - (x[2] + x[0] * x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT MP2(2) ENERGY', 'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']}
pv1['SAPT MP4 DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4] + x[5],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY',
'SAPT DISP21 ENERGY', 'SAPT DISP22(SDQ) ENERGY', 'SAPT EST.DISP22(T) ENERGY']}
pv1['SAPT CCD DISP'] = {'func': lambda x: x[0] * x[1] + x[2] + x[3] + x[4],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP2(CCD) ENERGY',
'SAPT DISP22(S)(CCD) ENERGY', 'SAPT EST.DISP22(T)(CCD) ENERGY']}
pv1['SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY']}
pv1['SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT EXCH10 ENERGY']}
pv1['SAPT0 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SAPT0 IND,U ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2),U ENERGY', 'SAPT IND20,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']}
pv1['SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY', 'SAPT0 EXCH ENERGY', 'SAPT0 IND ENERGY', 'SAPT0 DISP ENERGY']}
pv1['SSAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']}
pv1['SSAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']}
pv1['SSAPT0 IND ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2],
'args': ['SAPT EXCHSCAL3', 'SAPT0 IND ENERGY', 'SAPT EXCH-IND20,R ENERGY']}
pv1['SSAPT0 IND,U ENERGY'] = {'func': lambda x: x[1] + (x[0] - 1.0) * x[2],
'args': ['SAPT EXCHSCAL3', 'SAPT0 IND,U ENERGY', 'SAPT EXCH-IND20,U ENERGY']}
pv1['SSAPT0 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2],
'args': ['SAPT EXCHSCAL3', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']}
pv1['SSAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SSAPT0 ELST ENERGY', 'SSAPT0 EXCH ENERGY', 'SSAPT0 IND ENERGY', 'SSAPT0 DISP ENERGY']}
pv1['SCS-SAPT0 ELST ENERGY'] = {'func': sum, 'args': ['SAPT0 ELST ENERGY']}
pv1['SCS-SAPT0 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT0 EXCH ENERGY']}
pv1['SCS-SAPT0 IND ENERGY'] = {'func': sum, 'args': ['SAPT0 IND ENERGY']}
pv1['SCS-SAPT0 IND,U ENERGY'] = {'func': sum, 'args': ['SAPT0 IND,U ENERGY']}
pv1['SCS-SAPT0 DISP ENERGY'] = {'func': lambda x: (x[0] - x[3]) * (x[1] + x[2]) + x[3] * (x[4] + x[5]),
'args': [0.66, 'SAPT SAME-SPIN EXCH-DISP20 ENERGY', 'SAPT SAME-SPIN DISP20 ENERGY',
1.2, 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']} # note no xs for SCS disp
pv1['SCS-SAPT0 TOTAL ENERGY'] = {'func': sum, 'args': ['SCS-SAPT0 ELST ENERGY', 'SCS-SAPT0 EXCH ENERGY', 'SCS-SAPT0 IND ENERGY', 'SCS-SAPT0 DISP ENERGY']}
pv1['SAPT2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']}
pv1['SAPT2 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']}
pv1['SAPT2 DISP ENERGY'] = {'func': lambda x: x[0] * x[1] + x[2],
'args': ['SAPT EXCHSCAL', 'SAPT EXCH-DISP20 ENERGY', 'SAPT DISP20 ENERGY']}
pv1['SAPT2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2 ELST ENERGY', 'SAPT2 EXCH ENERGY', 'SAPT2 IND ENERGY', 'SAPT2 DISP ENERGY']}
pv1['SAPT2+ ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY']}
pv1['SAPT2+ EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2+ IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']}
pv1['SAPT2+ DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP']}
pv1['SAPT2+ TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY', 'SAPT2+ EXCH ENERGY', 'SAPT2+ IND ENERGY', 'SAPT2+ DISP ENERGY']}
pv1['SAPT2+(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']}
pv1['SAPT2+(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']}
pv1['SAPT2+(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY']}
pv1['SAPT2+(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP']}
pv1['SAPT2+(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) ELST ENERGY', 'SAPT2+(CCD) EXCH ENERGY', 'SAPT2+(CCD) IND ENERGY', 'SAPT2+(CCD) DISP ENERGY']}
pv1['SAPT2+DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']}
pv1['SAPT2+DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']}
pv1['SAPT2+DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+ IND ENERGY', 'SAPT MP2(2) ENERGY']}
pv1['SAPT2+DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+ DISP ENERGY']}
pv1['SAPT2+DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 ELST ENERGY', 'SAPT2+DMP2 EXCH ENERGY', 'SAPT2+DMP2 IND ENERGY', 'SAPT2+DMP2 DISP ENERGY']}
pv1['SAPT2+(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+ ELST ENERGY']}
pv1['SAPT2+(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+ EXCH ENERGY']}
pv1['SAPT2+(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+DMP2 IND ENERGY']}
pv1['SAPT2+(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD) DISP ENERGY']}
pv1['SAPT2+(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(CCD)DMP2 ELST ENERGY', 'SAPT2+(CCD)DMP2 EXCH ENERGY', 'SAPT2+(CCD)DMP2 IND ENERGY', 'SAPT2+(CCD)DMP2 DISP ENERGY']}
pv1['SAPT2+(3) ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']}
pv1['SAPT2+(3) EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2+(3) IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT HF(2) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY']}
pv1['SAPT2+(3) DISP ENERGY'] = {'func': sum, 'args': ['SAPT MP4 DISP', 'SAPT DISP30 ENERGY']}
pv1['SAPT2+(3) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY', 'SAPT2+(3) EXCH ENERGY', 'SAPT2+(3) IND ENERGY', 'SAPT2+(3) DISP ENERGY']}
pv1['SAPT2+(3)(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']}
pv1['SAPT2+(3)(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']}
pv1['SAPT2+(3)(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY']}
pv1['SAPT2+(3)(CCD) DISP ENERGY'] = {'func': sum, 'args': ['SAPT CCD DISP', 'SAPT DISP30 ENERGY']}
pv1['SAPT2+(3)(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) ELST ENERGY', 'SAPT2+(3)(CCD) EXCH ENERGY', 'SAPT2+(3)(CCD) IND ENERGY', 'SAPT2+(3)(CCD) DISP ENERGY']}
pv1['SAPT2+(3)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']}
pv1['SAPT2+(3)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']}
pv1['SAPT2+(3)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) IND ENERGY', 'SAPT MP2(2) ENERGY']}
pv1['SAPT2+(3)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) DISP ENERGY']}
pv1['SAPT2+(3)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 ELST ENERGY', 'SAPT2+(3)DMP2 EXCH ENERGY', 'SAPT2+(3)DMP2 IND ENERGY', 'SAPT2+(3)DMP2 DISP ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) ELST ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+(3) EXCH ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)DMP2 IND ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD) DISP ENERGY']}
pv1['SAPT2+(3)(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+(3)(CCD)DMP2 ELST ENERGY', 'SAPT2+(3)(CCD)DMP2 EXCH ENERGY', 'SAPT2+(3)(CCD)DMP2 IND ENERGY', 'SAPT2+(3)(CCD)DMP2 DISP ENERGY']}
pv1['SAPT2+3 ELST ENERGY'] = {'func': sum, 'args': ['SAPT ELST10,R ENERGY', 'SAPT ELST12,R ENERGY', 'SAPT ELST13,R ENERGY']}
pv1['SAPT2+3 EXCH ENERGY'] = {'func': lambda x: x[1] + x[0] * (x[2] + x[3]),
'args': ['SAPT EXCHSCAL', 'SAPT EXCH10 ENERGY', 'SAPT EXCH11(S^2) ENERGY', 'SAPT EXCH12(S^2) ENERGY']}
pv1['SAPT2+3 IND ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5] + x[6] + x[0] * x[7],
'args': ['SAPT EXCHSCAL', 'SAPT HF(3) ENERGY', 'SAPT IND20,R ENERGY', 'SAPT EXCH-IND20,R ENERGY',
'SAPT IND22 ENERGY', 'SAPT EXCH-IND22 ENERGY', 'SAPT IND30,R ENERGY', 'SAPT EXCH-IND30,R ENERGY']}
pv1['SAPT2+3 DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT MP4 DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY',
'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']}
pv1['SAPT2+3 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY', 'SAPT2+3 EXCH ENERGY', 'SAPT2+3 IND ENERGY', 'SAPT2+3 DISP ENERGY']}
pv1['SAPT2+3(CCD) ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']}
pv1['SAPT2+3(CCD) EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']}
pv1['SAPT2+3(CCD) IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY']}
pv1['SAPT2+3(CCD) DISP ENERGY'] = {'func': lambda x: x[1] + x[2] + x[0] * x[3] + x[4] + x[0] * x[5],
'args': ['SAPT EXCHSCAL', 'SAPT CCD DISP', 'SAPT DISP30 ENERGY', 'SAPT EXCH-DISP30 ENERGY',
'SAPT IND-DISP30 ENERGY', 'SAPT EXCH-IND-DISP30 ENERGY']}
pv1['SAPT2+3(CCD) TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) ELST ENERGY', 'SAPT2+3(CCD) EXCH ENERGY', 'SAPT2+3(CCD) IND ENERGY', 'SAPT2+3(CCD) DISP ENERGY']}
pv1['SAPT2+3DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']}
pv1['SAPT2+3DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']}
pv1['SAPT2+3DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3 IND ENERGY', 'SAPT MP2(3) ENERGY']}
pv1['SAPT2+3DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3 DISP ENERGY']}
pv1['SAPT2+3DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 ELST ENERGY', 'SAPT2+3DMP2 EXCH ENERGY', 'SAPT2+3DMP2 IND ENERGY', 'SAPT2+3DMP2 DISP ENERGY']}
pv1['SAPT2+3(CCD)DMP2 ELST ENERGY'] = {'func': sum, 'args': ['SAPT2+3 ELST ENERGY']}
pv1['SAPT2+3(CCD)DMP2 EXCH ENERGY'] = {'func': sum, 'args': ['SAPT2+3 EXCH ENERGY']}
pv1['SAPT2+3(CCD)DMP2 IND ENERGY'] = {'func': sum, 'args': ['SAPT2+3DMP2 IND ENERGY']}
pv1['SAPT2+3(CCD)DMP2 DISP ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD) DISP ENERGY']}
pv1['SAPT2+3(CCD)DMP2 TOTAL ENERGY'] = {'func': sum, 'args': ['SAPT2+3(CCD)DMP2 ELST ENERGY', 'SAPT2+3(CCD)DMP2 EXCH ENERGY', 'SAPT2+3(CCD)DMP2 IND ENERGY', 'SAPT2+3(CCD)DMP2 DISP ENERGY']}
return pv1
|
lgpl-3.0
|
maheshakya/scikit-learn
|
examples/linear_model/plot_lasso_and_elasticnet.py
|
249
|
1982
|
"""
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
|
bsd-3-clause
|
TaxIPP-Life/Til
|
til/pgm/Archives/of2liam.py
|
2
|
2107
|
# -*- coding:utf-8 -*-
'''
Created on 25 Apr 2013
@author: alexis_e
'''
from pandas import HDFStore, merge # DataFrame
import numpy as np
import pdb
import time
from src.lib.simulation import SurveySimulation
from src.parametres.paramData import XmlReader, Tree2Object
import pandas as pd
import datetime as dt
import pandas.rpy.common as com
from rpy2.robjects import r
import os
import tables
def main(period=None):
temps = time.clock()
input_tab = "C:/openfisca/output/liam/"+"LiamLeg.h5"
output_tab = "C:/Myliam2/Model/SimulTest.h5"
store = HDFStore(input_tab)
goal = HDFStore(output_tab)
name_convertion = {'ind':'person','foy':'declar','men':'menage', 'fam':'menage'}
# on travaille d'abord sur l'ensemble des tables puis on selectionne chaque annee
# step 1
for ent in ('ind','men','foy','fam'):
dest = name_convertion[ent]
tab_in = store[ent]
tab_out = goal['entities/'+dest]
#on jour sur les variable a garder
#TODO: remonter au niveau de of_on_liam mais la c'est pratique du fait de
#l'autre table
ident = 'id'+ent
if ent=='ind':
ident='noi'
# on garde les valeurs de depart
to_remove = [x for x in tab_in.columns if x in tab_out.columns]
#on retire les identifiant sauf celui qui deviendra id
list_id = ['idmen','idfoy','idfam','id','quifoy','quifam','quimen','noi']
list_id.remove(ident)
to_remove = to_remove + [x for x in tab_in.columns if x in list_id]
#on n4oublie pas de garder periode
to_remove.remove('period')
tab_in = tab_in.drop(to_remove,axis=1)
tab_in = tab_in.rename(columns={ident:'id'})
tab_out = merge(tab_in, tab_out , how='right', on=['id','period'], sort=False)
goal.remove('entities/'+dest)
goal.append('entities/'+dest, tab_out)
# new_tab = np.array(tab_out.to_records())
store.close()
goal.close()
# output_file = tables.openFile(output_tab)
if __name__ == "__main__":
main()
|
gpl-3.0
|
ammarkhann/FinalSeniorCode
|
lib/python2.7/site-packages/pandas/core/ops.py
|
3
|
53961
|
"""
Arithmetic operations for PandasObjects
This is not a public API.
"""
# necessary to enforce truediv in Python 2.X
from __future__ import division
import operator
import warnings
import numpy as np
import pandas as pd
import datetime
from pandas._libs import (lib, index as libindex,
tslib as libts, algos as libalgos, iNaT)
from pandas import compat
from pandas.util._decorators import Appender
import pandas.core.computation.expressions as expressions
from pandas.compat import bind_method
import pandas.core.missing as missing
from pandas.errors import PerformanceWarning
from pandas.core.common import _values_from_object, _maybe_match_name
from pandas.core.dtypes.missing import notnull, isnull
from pandas.core.dtypes.common import (
needs_i8_conversion,
is_datetimelike_v_numeric,
is_integer_dtype, is_categorical_dtype,
is_object_dtype, is_timedelta64_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_bool_dtype, is_datetimetz,
is_list_like,
is_scalar,
_ensure_object)
from pandas.core.dtypes.cast import maybe_upcast_putmask, find_common_type
from pandas.core.dtypes.generic import ABCSeries, ABCIndex, ABCPeriodIndex
# -----------------------------------------------------------------------------
# Functions that add arithmetic methods to objects, given arithmetic factory
# methods
def _create_methods(arith_method, comp_method, bool_method,
use_numexpr, special=False, default_axis='columns',
have_divmod=False):
# creates actual methods based upon arithmetic, comp and bool method
# constructors.
# NOTE: Only frame cares about default_axis, specifically: special methods
# have default axis None, whereas flex methods have default axis 'columns'
# if we're not using numexpr, then don't pass a str_rep
if use_numexpr:
op = lambda x: x
else:
op = lambda x: None
if special:
def names(x):
if x[-1] == "_":
return "__%s_" % x
else:
return "__%s__" % x
else:
names = lambda x: x
# Inframe, all special methods have default_axis=None, flex methods have
# default_axis set to the default (columns)
# yapf: disable
new_methods = dict(
add=arith_method(operator.add, names('add'), op('+'),
default_axis=default_axis),
radd=arith_method(lambda x, y: y + x, names('radd'), op('+'),
default_axis=default_axis),
sub=arith_method(operator.sub, names('sub'), op('-'),
default_axis=default_axis),
mul=arith_method(operator.mul, names('mul'), op('*'),
default_axis=default_axis),
truediv=arith_method(operator.truediv, names('truediv'), op('/'),
truediv=True, fill_zeros=np.inf,
default_axis=default_axis),
floordiv=arith_method(operator.floordiv, names('floordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf),
# Causes a floating point exception in the tests when numexpr enabled,
# so for now no speedup
mod=arith_method(operator.mod, names('mod'), None,
default_axis=default_axis, fill_zeros=np.nan),
pow=arith_method(operator.pow, names('pow'), op('**'),
default_axis=default_axis),
# not entirely sure why this is necessary, but previously was included
# so it's here to maintain compatibility
rmul=arith_method(operator.mul, names('rmul'), op('*'),
default_axis=default_axis, reversed=True),
rsub=arith_method(lambda x, y: y - x, names('rsub'), op('-'),
default_axis=default_axis, reversed=True),
rtruediv=arith_method(lambda x, y: operator.truediv(y, x),
names('rtruediv'), op('/'), truediv=True,
fill_zeros=np.inf, default_axis=default_axis,
reversed=True),
rfloordiv=arith_method(lambda x, y: operator.floordiv(y, x),
names('rfloordiv'), op('//'),
default_axis=default_axis, fill_zeros=np.inf,
reversed=True),
rpow=arith_method(lambda x, y: y**x, names('rpow'), op('**'),
default_axis=default_axis, reversed=True),
rmod=arith_method(lambda x, y: y % x, names('rmod'), op('%'),
default_axis=default_axis, fill_zeros=np.nan,
reversed=True),)
# yapf: enable
new_methods['div'] = new_methods['truediv']
new_methods['rdiv'] = new_methods['rtruediv']
# Comp methods never had a default axis set
if comp_method:
new_methods.update(dict(
eq=comp_method(operator.eq, names('eq'), op('==')),
ne=comp_method(operator.ne, names('ne'), op('!='), masker=True),
lt=comp_method(operator.lt, names('lt'), op('<')),
gt=comp_method(operator.gt, names('gt'), op('>')),
le=comp_method(operator.le, names('le'), op('<=')),
ge=comp_method(operator.ge, names('ge'), op('>=')), ))
if bool_method:
new_methods.update(
dict(and_=bool_method(operator.and_, names('and_'), op('&')),
or_=bool_method(operator.or_, names('or_'), op('|')),
# For some reason ``^`` wasn't used in original.
xor=bool_method(operator.xor, names('xor'), op('^')),
rand_=bool_method(lambda x, y: operator.and_(y, x),
names('rand_'), op('&')),
ror_=bool_method(lambda x, y: operator.or_(y, x),
names('ror_'), op('|')),
rxor=bool_method(lambda x, y: operator.xor(y, x),
names('rxor'), op('^'))))
if have_divmod:
# divmod doesn't have an op that is supported by numexpr
new_methods['divmod'] = arith_method(
divmod,
names('divmod'),
None,
default_axis=default_axis,
construct_result=_construct_divmod_result,
)
new_methods = dict((names(k), v) for k, v in new_methods.items())
return new_methods
def add_methods(cls, new_methods, force, select, exclude):
if select and exclude:
raise TypeError("May only pass either select or exclude")
methods = new_methods
if select:
select = set(select)
methods = {}
for key, method in new_methods.items():
if key in select:
methods[key] = method
if exclude:
for k in exclude:
new_methods.pop(k, None)
for name, method in new_methods.items():
if force or name not in cls.__dict__:
bind_method(cls, name, method)
# ----------------------------------------------------------------------
# Arithmetic
def add_special_arithmetic_methods(cls, arith_method=None,
comp_method=None, bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None, have_divmod=False):
"""
Adds the full suite of special arithmetic methods (``__add__``,
``__sub__``, etc.) to the class.
Parameters
----------
arith_method : function (optional)
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
have_divmod : bool, (optional)
should a divmod method be added? this method is special because it
returns a tuple of cls instead of a single element of type cls
"""
# in frame, special methods have default_axis = None, comp methods use
# 'columns'
new_methods = _create_methods(arith_method, comp_method,
bool_method, use_numexpr, default_axis=None,
special=True, have_divmod=have_divmod)
# inplace operators (I feel like these should get passed an `inplace=True`
# or just be removed
def _wrap_inplace_method(method):
"""
return an inplace wrapper for this method
"""
def f(self, other):
result = method(self, other)
# this makes sure that we are aligned like the input
# we are updating inplace so we want to ignore is_copy
self._update_inplace(result.reindex_like(self, copy=False)._data,
verify_is_copy=False)
return self
return f
new_methods.update(
dict(__iadd__=_wrap_inplace_method(new_methods["__add__"]),
__isub__=_wrap_inplace_method(new_methods["__sub__"]),
__imul__=_wrap_inplace_method(new_methods["__mul__"]),
__itruediv__=_wrap_inplace_method(new_methods["__truediv__"]),
__ipow__=_wrap_inplace_method(new_methods["__pow__"]), ))
if not compat.PY3:
new_methods["__idiv__"] = new_methods["__div__"]
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
def add_flex_arithmetic_methods(cls, flex_arith_method,
flex_comp_method=None, flex_bool_method=None,
use_numexpr=True, force=False, select=None,
exclude=None):
"""
Adds the full suite of flex arithmetic methods (``pow``, ``mul``, ``add``)
to the class.
Parameters
----------
flex_arith_method : function
factory for special arithmetic methods, with op string:
f(op, name, str_rep, default_axis=None, fill_zeros=None, **eval_kwargs)
flex_comp_method : function, optional,
factory for rich comparison - signature: f(op, name, str_rep)
use_numexpr : bool, default True
whether to accelerate with numexpr, defaults to True
force : bool, default False
if False, checks whether function is defined **on ``cls.__dict__``**
before defining if True, always defines functions on class base
select : iterable of strings (optional)
if passed, only sets functions with names in select
exclude : iterable of strings (optional)
if passed, will not set functions with names in exclude
"""
# in frame, default axis is 'columns', doesn't matter for series and panel
new_methods = _create_methods(flex_arith_method,
flex_comp_method, flex_bool_method,
use_numexpr, default_axis='columns',
special=False)
new_methods.update(dict(multiply=new_methods['mul'],
subtract=new_methods['sub'],
divide=new_methods['div']))
# opt out of bool flex methods for now
for k in ('ror_', 'rxor', 'rand_'):
if k in new_methods:
new_methods.pop(k)
add_methods(cls, new_methods=new_methods, force=force, select=select,
exclude=exclude)
class _Op(object):
"""
Wrapper around Series arithmetic operations.
Generally, you should use classmethod ``_Op.get_op`` as an entry point.
This validates and coerces lhs and rhs depending on its dtype and
based on op. See _TimeOp also.
Parameters
----------
left : Series
lhs of op
right : object
rhs of op
name : str
name of op
na_op : callable
a function which wraps op
"""
fill_value = np.nan
wrap_results = staticmethod(lambda x: x)
dtype = None
def __init__(self, left, right, name, na_op):
self.left = left
self.right = right
self.name = name
self.na_op = na_op
self.lvalues = left
self.rvalues = right
@classmethod
def get_op(cls, left, right, name, na_op):
"""
Get op dispatcher, returns _Op or _TimeOp.
If ``left`` and ``right`` are appropriate for datetime arithmetic with
operation ``name``, processes them and returns a ``_TimeOp`` object
that stores all the required values. Otherwise, it will generate
either a ``_Op``, indicating that the operation is performed via
normal numpy path.
"""
is_timedelta_lhs = is_timedelta64_dtype(left)
is_datetime_lhs = (is_datetime64_dtype(left) or
is_datetime64tz_dtype(left))
if not (is_datetime_lhs or is_timedelta_lhs):
return _Op(left, right, name, na_op)
else:
return _TimeOp(left, right, name, na_op)
class _TimeOp(_Op):
"""
Wrapper around Series datetime/time/timedelta arithmetic operations.
Generally, you should use classmethod ``_Op.get_op`` as an entry point.
"""
fill_value = iNaT
def __init__(self, left, right, name, na_op):
super(_TimeOp, self).__init__(left, right, name, na_op)
lvalues = self._convert_to_array(left, name=name)
rvalues = self._convert_to_array(right, name=name, other=lvalues)
# left
self.is_offset_lhs = self._is_offset(left)
self.is_timedelta_lhs = is_timedelta64_dtype(lvalues)
self.is_datetime64_lhs = is_datetime64_dtype(lvalues)
self.is_datetime64tz_lhs = is_datetime64tz_dtype(lvalues)
self.is_datetime_lhs = (self.is_datetime64_lhs or
self.is_datetime64tz_lhs)
self.is_integer_lhs = left.dtype.kind in ['i', 'u']
self.is_floating_lhs = left.dtype.kind == 'f'
# right
self.is_offset_rhs = self._is_offset(right)
self.is_datetime64_rhs = is_datetime64_dtype(rvalues)
self.is_datetime64tz_rhs = is_datetime64tz_dtype(rvalues)
self.is_datetime_rhs = (self.is_datetime64_rhs or
self.is_datetime64tz_rhs)
self.is_timedelta_rhs = is_timedelta64_dtype(rvalues)
self.is_integer_rhs = rvalues.dtype.kind in ('i', 'u')
self.is_floating_rhs = rvalues.dtype.kind == 'f'
self._validate(lvalues, rvalues, name)
self.lvalues, self.rvalues = self._convert_for_datetime(lvalues,
rvalues)
def _validate(self, lvalues, rvalues, name):
# timedelta and integer mul/div
if ((self.is_timedelta_lhs and
(self.is_integer_rhs or self.is_floating_rhs)) or
(self.is_timedelta_rhs and
(self.is_integer_lhs or self.is_floating_lhs))):
if name not in ('__div__', '__truediv__', '__mul__', '__rmul__'):
raise TypeError("can only operate on a timedelta and an "
"integer or a float for division and "
"multiplication, but the operator [%s] was"
"passed" % name)
# 2 timedeltas
elif ((self.is_timedelta_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)) or
(self.is_timedelta_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs))):
if name not in ('__div__', '__rdiv__', '__truediv__',
'__rtruediv__', '__add__', '__radd__', '__sub__',
'__rsub__'):
raise TypeError("can only operate on a timedeltas for "
"addition, subtraction, and division, but the"
" operator [%s] was passed" % name)
# datetime and timedelta/DateOffset
elif (self.is_datetime_lhs and
(self.is_timedelta_rhs or self.is_offset_rhs)):
if name not in ('__add__', '__radd__', '__sub__'):
raise TypeError("can only operate on a datetime with a rhs of "
"a timedelta/DateOffset for addition and "
"subtraction, but the operator [%s] was "
"passed" % name)
elif (self.is_datetime_rhs and
(self.is_timedelta_lhs or self.is_offset_lhs)):
if name not in ('__add__', '__radd__', '__rsub__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"with a rhs of a datetime for addition, "
"but the operator [%s] was passed" % name)
# 2 datetimes
elif self.is_datetime_lhs and self.is_datetime_rhs:
if name not in ('__sub__', '__rsub__'):
raise TypeError("can only operate on a datetimes for"
" subtraction, but the operator [%s] was"
" passed" % name)
# if tz's must be equal (same or None)
if getattr(lvalues, 'tz', None) != getattr(rvalues, 'tz', None):
raise ValueError("Incompatible tz's on datetime subtraction "
"ops")
elif ((self.is_timedelta_lhs or self.is_offset_lhs) and
self.is_datetime_rhs):
if name not in ('__add__', '__radd__'):
raise TypeError("can only operate on a timedelta/DateOffset "
"and a datetime for addition, but the "
"operator [%s] was passed" % name)
else:
raise TypeError('cannot operate on a series without a rhs '
'of a series/ndarray of type datetime64[ns] '
'or a timedelta')
def _convert_to_array(self, values, name=None, other=None):
"""converts values to ndarray"""
from pandas.core.tools.timedeltas import to_timedelta
ovalues = values
supplied_dtype = None
if not is_list_like(values):
values = np.array([values])
# if this is a Series that contains relevant dtype info, then use this
# instead of the inferred type; this avoids coercing Series([NaT],
# dtype='datetime64[ns]') to Series([NaT], dtype='timedelta64[ns]')
elif (isinstance(values, pd.Series) and
(is_timedelta64_dtype(values) or is_datetime64_dtype(values))):
supplied_dtype = values.dtype
inferred_type = lib.infer_dtype(values)
if (inferred_type in ('datetime64', 'datetime', 'date', 'time') or
is_datetimetz(inferred_type)):
# if we have a other of timedelta, but use pd.NaT here we
# we are in the wrong path
if (supplied_dtype is None and other is not None and
(other.dtype in ('timedelta64[ns]', 'datetime64[ns]')) and
isnull(values).all()):
values = np.empty(values.shape, dtype='timedelta64[ns]')
values[:] = iNaT
# a datelike
elif isinstance(values, pd.DatetimeIndex):
values = values.to_series()
# datetime with tz
elif (isinstance(ovalues, datetime.datetime) and
hasattr(ovalues, 'tzinfo')):
values = pd.DatetimeIndex(values)
# datetime array with tz
elif is_datetimetz(values):
if isinstance(values, ABCSeries):
values = values._values
elif not (isinstance(values, (np.ndarray, ABCSeries)) and
is_datetime64_dtype(values)):
values = libts.array_to_datetime(values)
elif inferred_type in ('timedelta', 'timedelta64'):
# have a timedelta, convert to to ns here
values = to_timedelta(values, errors='coerce', box=False)
elif inferred_type == 'integer':
# py3 compat where dtype is 'm' but is an integer
if values.dtype.kind == 'm':
values = values.astype('timedelta64[ns]')
elif isinstance(values, pd.PeriodIndex):
values = values.to_timestamp().to_series()
elif name not in ('__truediv__', '__div__', '__mul__', '__rmul__'):
raise TypeError("incompatible type for a datetime/timedelta "
"operation [{0}]".format(name))
elif inferred_type == 'floating':
if (isnull(values).all() and
name in ('__add__', '__radd__', '__sub__', '__rsub__')):
values = np.empty(values.shape, dtype=other.dtype)
values[:] = iNaT
return values
elif self._is_offset(values):
return values
else:
raise TypeError("incompatible type [{0}] for a datetime/timedelta"
" operation".format(np.array(values).dtype))
return values
def _convert_for_datetime(self, lvalues, rvalues):
from pandas.core.tools.timedeltas import to_timedelta
mask = isnull(lvalues) | isnull(rvalues)
# datetimes require views
if self.is_datetime_lhs or self.is_datetime_rhs:
# datetime subtraction means timedelta
if self.is_datetime_lhs and self.is_datetime_rhs:
if self.name in ('__sub__', '__rsub__'):
self.dtype = 'timedelta64[ns]'
else:
self.dtype = 'datetime64[ns]'
elif self.is_datetime64tz_lhs:
self.dtype = lvalues.dtype
elif self.is_datetime64tz_rhs:
self.dtype = rvalues.dtype
else:
self.dtype = 'datetime64[ns]'
# if adding single offset try vectorized path
# in DatetimeIndex; otherwise elementwise apply
def _offset(lvalues, rvalues):
if len(lvalues) == 1:
rvalues = pd.DatetimeIndex(rvalues)
lvalues = lvalues[0]
else:
warnings.warn("Adding/subtracting array of DateOffsets to "
"Series not vectorized", PerformanceWarning)
rvalues = rvalues.astype('O')
# pass thru on the na_op
self.na_op = lambda x, y: getattr(x, self.name)(y)
return lvalues, rvalues
if self.is_offset_lhs:
lvalues, rvalues = _offset(lvalues, rvalues)
elif self.is_offset_rhs:
rvalues, lvalues = _offset(rvalues, lvalues)
else:
# with tz, convert to UTC
if self.is_datetime64tz_lhs:
lvalues = lvalues.tz_convert('UTC').tz_localize(None)
if self.is_datetime64tz_rhs:
rvalues = rvalues.tz_convert('UTC').tz_localize(None)
lvalues = lvalues.view(np.int64)
rvalues = rvalues.view(np.int64)
# otherwise it's a timedelta
else:
self.dtype = 'timedelta64[ns]'
# convert Tick DateOffset to underlying delta
if self.is_offset_lhs:
lvalues = to_timedelta(lvalues, box=False)
if self.is_offset_rhs:
rvalues = to_timedelta(rvalues, box=False)
lvalues = lvalues.astype(np.int64)
if not self.is_floating_rhs:
rvalues = rvalues.astype(np.int64)
# time delta division -> unit less
# integer gets converted to timedelta in np < 1.6
if ((self.is_timedelta_lhs and self.is_timedelta_rhs) and
not self.is_integer_rhs and not self.is_integer_lhs and
self.name in ('__div__', '__truediv__')):
self.dtype = 'float64'
self.fill_value = np.nan
lvalues = lvalues.astype(np.float64)
rvalues = rvalues.astype(np.float64)
# if we need to mask the results
if mask.any():
def f(x):
# datetime64[ns]/timedelta64[ns] masking
try:
x = np.array(x, dtype=self.dtype)
except TypeError:
x = np.array(x, dtype='datetime64[ns]')
np.putmask(x, mask, self.fill_value)
return x
self.wrap_results = f
return lvalues, rvalues
def _is_offset(self, arr_or_obj):
""" check if obj or all elements of list-like is DateOffset """
if isinstance(arr_or_obj, pd.DateOffset):
return True
elif is_list_like(arr_or_obj) and len(arr_or_obj):
return all(isinstance(x, pd.DateOffset) for x in arr_or_obj)
return False
def _align_method_SERIES(left, right, align_asobject=False):
""" align lhs and rhs Series """
# ToDo: Different from _align_method_FRAME, list, tuple and ndarray
# are not coerced here
# because Series has inconsistencies described in #13637
if isinstance(right, ABCSeries):
# avoid repeated alignment
if not left.index.equals(right.index):
if align_asobject:
# to keep original value's dtype for bool ops
left = left.astype(object)
right = right.astype(object)
left, right = left.align(right, copy=False)
return left, right
def _construct_result(left, result, index, name, dtype):
return left._constructor(result, index=index, name=name, dtype=dtype)
def _construct_divmod_result(left, result, index, name, dtype):
"""divmod returns a tuple of like indexed series instead of a single series.
"""
constructor = left._constructor
return (
constructor(result[0], index=index, name=name, dtype=dtype),
constructor(result[1], index=index, name=name, dtype=dtype),
)
def _arith_method_SERIES(op, name, str_rep, fill_zeros=None, default_axis=None,
construct_result=_construct_result, **eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
if isinstance(y, (np.ndarray, ABCSeries, pd.Index)):
dtype = find_common_type([x.dtype, y.dtype])
result = np.empty(x.size, dtype=dtype)
mask = notnull(x) & notnull(y)
result[mask] = op(x[mask], _values_from_object(y[mask]))
elif isinstance(x, np.ndarray):
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
else:
raise TypeError("{typ} cannot perform the operation "
"{op}".format(typ=type(x).__name__,
op=str_rep))
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
def safe_na_op(lvalues, rvalues):
try:
with np.errstate(all='ignore'):
return na_op(lvalues, rvalues)
except Exception:
if isinstance(rvalues, ABCSeries):
if is_object_dtype(rvalues):
# if dtype is object, try elementwise op
return libalgos.arrmap_object(rvalues,
lambda x: op(lvalues, x))
else:
if is_object_dtype(lvalues):
return libalgos.arrmap_object(lvalues,
lambda x: op(x, rvalues))
raise
def wrapper(left, right, name=name, na_op=na_op):
if isinstance(right, pd.DataFrame):
return NotImplemented
left, right = _align_method_SERIES(left, right)
converted = _Op.get_op(left, right, name, na_op)
left, right = converted.left, converted.right
lvalues, rvalues = converted.lvalues, converted.rvalues
dtype = converted.dtype
wrap_results = converted.wrap_results
na_op = converted.na_op
if isinstance(rvalues, ABCSeries):
name = _maybe_match_name(left, rvalues)
lvalues = getattr(lvalues, 'values', lvalues)
rvalues = getattr(rvalues, 'values', rvalues)
# _Op aligns left and right
else:
name = left.name
if (hasattr(lvalues, 'values') and
not isinstance(lvalues, pd.DatetimeIndex)):
lvalues = lvalues.values
result = wrap_results(safe_na_op(lvalues, rvalues))
return construct_result(
left,
result,
index=left.index,
name=name,
dtype=dtype,
)
return wrapper
def _comp_method_OBJECT_ARRAY(op, x, y):
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)):
if not is_object_dtype(y.dtype):
y = y.astype(np.object_)
if isinstance(y, (ABCSeries, ABCIndex)):
y = y.values
result = lib.vec_compare(x, y, op)
else:
result = lib.scalar_compare(x, y, op)
return result
def _comp_method_SERIES(op, name, str_rep, masker=False):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
# dispatch to the categorical if we have a categorical
# in either operand
if is_categorical_dtype(x):
return op(x, y)
elif is_categorical_dtype(y) and not is_scalar(y):
return op(y, x)
if is_object_dtype(x.dtype):
result = _comp_method_OBJECT_ARRAY(op, x, y)
else:
# we want to compare like types
# we only want to convert to integer like if
# we are not NotImplemented, otherwise
# we would allow datetime64 (but viewed as i8) against
# integer comparisons
if is_datetimelike_v_numeric(x, y):
raise TypeError("invalid type comparison")
# numpy does not like comparisons vs None
if is_scalar(y) and isnull(y):
if name == '__ne__':
return np.ones(len(x), dtype=bool)
else:
return np.zeros(len(x), dtype=bool)
# we have a datetime/timedelta and may need to convert
mask = None
if (needs_i8_conversion(x) or
(not is_scalar(y) and needs_i8_conversion(y))):
if is_scalar(y):
mask = isnull(x)
y = libindex.convert_scalar(x, _values_from_object(y))
else:
mask = isnull(x) | isnull(y)
y = y.view('i8')
x = x.view('i8')
try:
with np.errstate(all='ignore'):
result = getattr(x, name)(y)
if result is NotImplemented:
raise TypeError("invalid type comparison")
except AttributeError:
result = op(x, y)
if mask is not None and mask.any():
result[mask] = masker
return result
def wrapper(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
if not self._indexed_same(other):
msg = 'Can only compare identically-labeled Series objects'
raise ValueError(msg)
return self._constructor(na_op(self.values, other.values),
index=self.index, name=name)
elif isinstance(other, pd.DataFrame): # pragma: no cover
return NotImplemented
elif isinstance(other, (np.ndarray, pd.Index)):
# do not check length of zerodim array
# as it will broadcast
if (not is_scalar(lib.item_from_zerodim(other)) and
len(self) != len(other)):
raise ValueError('Lengths must match to compare')
if isinstance(other, ABCPeriodIndex):
# temp workaround until fixing GH 13637
# tested in test_nat_comparisons
# (pandas.tests.series.test_operators.TestSeriesOperators)
return self._constructor(na_op(self.values,
other.asobject.values),
index=self.index)
return self._constructor(na_op(self.values, np.asarray(other)),
index=self.index).__finalize__(self)
elif isinstance(other, pd.Categorical):
if not is_categorical_dtype(self):
msg = ("Cannot compare a Categorical for op {op} with Series "
"of dtype {typ}.\nIf you want to compare values, use "
"'series <op> np.asarray(other)'.")
raise TypeError(msg.format(op=op, typ=self.dtype))
if is_categorical_dtype(self):
# cats are a special case as get_values() would return an ndarray,
# which would then not take categories ordering into account
# we can go directly to op, as the na_op would just test again and
# dispatch to it.
with np.errstate(all='ignore'):
res = op(self.values, other)
else:
values = self.get_values()
if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
with np.errstate(all='ignore'):
res = na_op(values, other)
if is_scalar(res):
raise TypeError('Could not compare %s type with Series' %
type(other))
# always return a full value series here
res = _values_from_object(res)
res = pd.Series(res, index=self.index, name=self.name, dtype='bool')
return res
return wrapper
def _bool_method_SERIES(op, name, str_rep):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
if isinstance(y, list):
y = lib.list_to_object_array(y)
if isinstance(y, (np.ndarray, ABCSeries)):
if (is_bool_dtype(x.dtype) and is_bool_dtype(y.dtype)):
result = op(x, y) # when would this be hit?
else:
x = _ensure_object(x)
y = _ensure_object(y)
result = lib.vec_binop(x, y, op)
else:
try:
# let null fall thru
if not isnull(y):
y = bool(y)
result = lib.scalar_binop(x, y, op)
except:
raise TypeError("cannot compare a dtyped [{0}] array with "
"a scalar of type [{1}]".format(
x.dtype, type(y).__name__))
return result
def wrapper(self, other):
is_self_int_dtype = is_integer_dtype(self.dtype)
fill_int = lambda x: x.fillna(0)
fill_bool = lambda x: x.fillna(False).astype(bool)
self, other = _align_method_SERIES(self, other, align_asobject=True)
if isinstance(other, ABCSeries):
name = _maybe_match_name(self, other)
is_other_int_dtype = is_integer_dtype(other.dtype)
other = fill_int(other) if is_other_int_dtype else fill_bool(other)
filler = (fill_int if is_self_int_dtype and is_other_int_dtype
else fill_bool)
return filler(self._constructor(na_op(self.values, other.values),
index=self.index, name=name))
elif isinstance(other, pd.DataFrame):
return NotImplemented
else:
# scalars, list, tuple, np.array
filler = (fill_int if is_self_int_dtype and
is_integer_dtype(np.asarray(other)) else fill_bool)
return filler(self._constructor(
na_op(self.values, other),
index=self.index)).__finalize__(self)
return wrapper
_op_descriptions = {'add': {'op': '+',
'desc': 'Addition',
'reversed': False,
'reverse': 'radd'},
'sub': {'op': '-',
'desc': 'Subtraction',
'reversed': False,
'reverse': 'rsub'},
'mul': {'op': '*',
'desc': 'Multiplication',
'reversed': False,
'reverse': 'rmul'},
'mod': {'op': '%',
'desc': 'Modulo',
'reversed': False,
'reverse': 'rmod'},
'pow': {'op': '**',
'desc': 'Exponential power',
'reversed': False,
'reverse': 'rpow'},
'truediv': {'op': '/',
'desc': 'Floating division',
'reversed': False,
'reverse': 'rtruediv'},
'floordiv': {'op': '//',
'desc': 'Integer division',
'reversed': False,
'reverse': 'rfloordiv'},
'divmod': {'op': 'divmod',
'desc': 'Integer division and modulo',
'reversed': False,
'reverse': None},
'eq': {'op': '==',
'desc': 'Equal to',
'reversed': False,
'reverse': None},
'ne': {'op': '!=',
'desc': 'Not equal to',
'reversed': False,
'reverse': None},
'lt': {'op': '<',
'desc': 'Less than',
'reversed': False,
'reverse': None},
'le': {'op': '<=',
'desc': 'Less than or equal to',
'reversed': False,
'reverse': None},
'gt': {'op': '>',
'desc': 'Greater than',
'reversed': False,
'reverse': None},
'ge': {'op': '>=',
'desc': 'Greater than or equal to',
'reversed': False,
'reverse': None}}
_op_names = list(_op_descriptions.keys())
for k in _op_names:
reverse_op = _op_descriptions[k]['reverse']
_op_descriptions[reverse_op] = _op_descriptions[k].copy()
_op_descriptions[reverse_op]['reversed'] = True
_op_descriptions[reverse_op]['reverse'] = k
_flex_doc_SERIES = """
%s of series and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series or scalar value
fill_value : None or float value, default None (NaN)
Fill missing (NaN) values with this value. If both Series are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
result : Series
See also
--------
Series.%s
"""
def _flex_method_SERIES(op, name, str_rep, default_axis=None, fill_zeros=None,
**eval_kwargs):
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' series'
else:
equiv = 'series ' + op_desc['op'] + ' other'
doc = _flex_doc_SERIES % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
@Appender(doc)
def flex_wrapper(self, other, level=None, fill_value=None, axis=0):
# validate axis
if axis is not None:
self._get_axis_number(axis)
if isinstance(other, ABCSeries):
return self._binop(other, op, level=level, fill_value=fill_value)
elif isinstance(other, (np.ndarray, list, tuple)):
if len(other) != len(self):
raise ValueError('Lengths must be equal')
return self._binop(self._constructor(other, self.index), op,
level=level, fill_value=fill_value)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._constructor(op(self, other),
self.index).__finalize__(self)
flex_wrapper.__name__ = name
return flex_wrapper
series_flex_funcs = dict(flex_arith_method=_flex_method_SERIES,
flex_comp_method=_flex_method_SERIES)
series_special_funcs = dict(arith_method=_arith_method_SERIES,
comp_method=_comp_method_SERIES,
bool_method=_bool_method_SERIES,
have_divmod=True)
_arith_doc_FRAME = """
Binary operator %s with support to substitute a fill_value for missing data in
one of the inputs
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame locations are
missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
"""
_flex_doc_FRAME = """
%s of dataframe and other, element-wise (binary operator `%s`).
Equivalent to ``%s``, but with support to substitute a fill_value for
missing data in one of the inputs.
Parameters
----------
other : Series, DataFrame, or constant
axis : {0, 1, 'index', 'columns'}
For Series input, axis to match Series index on
fill_value : None or float value, default None
Fill missing (NaN) values with this value. If both DataFrame
locations are missing, the result will be missing
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Notes
-----
Mismatched indices will be unioned together
Returns
-------
result : DataFrame
See also
--------
DataFrame.%s
"""
def _align_method_FRAME(left, right, axis):
""" convert rhs to meet lhs dims if input is list, tuple or np.ndarray """
def to_series(right):
msg = 'Unable to coerce to Series, length must be {0}: given {1}'
if axis is not None and left._get_axis_name(axis) == 'index':
if len(left.index) != len(right):
raise ValueError(msg.format(len(left.index), len(right)))
right = left._constructor_sliced(right, index=left.index)
else:
if len(left.columns) != len(right):
raise ValueError(msg.format(len(left.columns), len(right)))
right = left._constructor_sliced(right, index=left.columns)
return right
if isinstance(right, (list, tuple)):
right = to_series(right)
elif isinstance(right, np.ndarray) and right.ndim: # skips np scalar
if right.ndim == 1:
right = to_series(right)
elif right.ndim == 2:
if left.shape != right.shape:
msg = ("Unable to coerce to DataFrame, "
"shape must be {0}: given {1}")
raise ValueError(msg.format(left.shape, right.shape))
right = left._constructor(right, index=left.index,
columns=left.columns)
else:
msg = 'Unable to coerce to Series/DataFrame, dim must be <= 2: {0}'
raise ValueError(msg.format(right.shape, ))
return right
def _arith_method_FRAME(op, name, str_rep=None, default_axis='columns',
fill_zeros=None, **eval_kwargs):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
xrav = x.ravel()
if isinstance(y, (np.ndarray, ABCSeries)):
dtype = np.find_common_type([x.dtype, y.dtype], [])
result = np.empty(x.size, dtype=dtype)
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
xrav = xrav[mask]
# we may need to manually
# broadcast a 1 element array
if yrav.shape != mask.shape:
yrav = np.empty(mask.shape, dtype=yrav.dtype)
yrav.fill(yrav.item())
yrav = yrav[mask]
if np.prod(xrav.shape) and np.prod(yrav.shape):
with np.errstate(all='ignore'):
result[mask] = op(xrav, yrav)
elif hasattr(x, 'size'):
result = np.empty(x.size, dtype=x.dtype)
mask = notnull(xrav)
xrav = xrav[mask]
if np.prod(xrav.shape):
with np.errstate(all='ignore'):
result[mask] = op(xrav, y)
else:
raise TypeError("cannot perform operation {op} between "
"objects of type {x} and {y}".format(
op=name, x=type(x), y=type(y)))
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = result.reshape(x.shape)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
if name in _op_descriptions:
op_name = name.replace('__', '')
op_desc = _op_descriptions[op_name]
if op_desc['reversed']:
equiv = 'other ' + op_desc['op'] + ' dataframe'
else:
equiv = 'dataframe ' + op_desc['op'] + ' other'
doc = _flex_doc_FRAME % (op_desc['desc'], op_name, equiv,
op_desc['reverse'])
else:
doc = _arith_doc_FRAME % name
@Appender(doc)
def f(self, other, axis=default_axis, level=None, fill_value=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._combine_frame(other, na_op, fill_value, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, fill_value, axis, level)
else:
if fill_value is not None:
self = self.fillna(fill_value)
return self._combine_const(other, na_op)
f.__name__ = name
return f
# Masker unused for now
def _flex_comp_method_FRAME(op, name, str_rep=None, default_axis='columns',
masker=False):
def na_op(x, y):
try:
result = op(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, (np.ndarray, ABCSeries)):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for flexible comparison methods %s' % name)
def f(self, other, axis=default_axis, level=None):
other = _align_method_FRAME(self, other, axis)
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._flex_compare_frame(other, na_op, str_rep, level)
elif isinstance(other, ABCSeries):
return self._combine_series(other, na_op, None, axis, level)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
def _comp_method_FRAME(func, name, str_rep, masker=False):
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, pd.DataFrame): # Another DataFrame
return self._compare_frame(other, func, str_rep)
elif isinstance(other, ABCSeries):
return self._combine_series_infer(other, func)
else:
# straight boolean comparisions we want to allow all columns
# (regardless of dtype to pass thru) See #4537 for discussion.
res = self._combine_const(other, func, raise_on_error=False)
return res.fillna(True).astype(bool)
f.__name__ = name
return f
frame_flex_funcs = dict(flex_arith_method=_arith_method_FRAME,
flex_comp_method=_flex_comp_method_FRAME)
frame_special_funcs = dict(arith_method=_arith_method_FRAME,
comp_method=_comp_method_FRAME,
bool_method=_arith_method_FRAME)
def _arith_method_PANEL(op, name, str_rep=None, fill_zeros=None,
default_axis=None, **eval_kwargs):
# copied from Series na_op above, but without unnecessary branch for
# non-scalar
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True, **eval_kwargs)
except TypeError:
# TODO: might need to find_common_type here?
result = np.empty(len(x), dtype=x.dtype)
mask = notnull(x)
result[mask] = op(x[mask], y)
result, changed = maybe_upcast_putmask(result, ~mask, np.nan)
result = missing.fill_zeros(result, x, y, name, fill_zeros)
return result
# work only for scalars
def f(self, other):
if not is_scalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' %
self._constructor.__name__)
return self._combine(other, op)
f.__name__ = name
return f
def _comp_method_PANEL(op, name, str_rep=None, masker=False):
def na_op(x, y):
try:
result = expressions.evaluate(op, str_rep, x, y,
raise_on_error=True)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=bool)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = op(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = op(np.array(list(xrav[mask])), y)
if op == operator.ne: # pragma: no cover
np.putmask(result, ~mask, True)
else:
np.putmask(result, ~mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other, axis=None):
# Validate the axis parameter
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, self._constructor):
return self._compare_constructor(other, na_op)
elif isinstance(other, (self._constructor_sliced, pd.DataFrame,
ABCSeries)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
panel_special_funcs = dict(arith_method=_arith_method_PANEL,
comp_method=_comp_method_PANEL,
bool_method=_arith_method_PANEL)
|
mit
|
ilo10/scikit-learn
|
sklearn/cluster/setup.py
|
263
|
1449
|
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
cblas_libs, blas_info = get_blas_info()
libraries = []
if os.name == 'posix':
cblas_libs.append('m')
libraries.append('m')
config = Configuration('cluster', parent_package, top_path)
config.add_extension('_dbscan_inner',
sources=['_dbscan_inner.cpp'],
include_dirs=[numpy.get_include()],
language="c++")
config.add_extension('_hierarchical',
sources=['_hierarchical.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension(
'_k_means',
libraries=cblas_libs,
sources=['_k_means.c'],
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args', []),
**blas_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
sfalkner/pySMAC
|
examples/sklearn_example.py
|
1
|
3509
|
from __future__ import print_function, division
import pysmac
import sklearn.ensemble
import sklearn.datasets
import sklearn.cross_validation
# First, let us generate some random classification problem. Note that, due to the
# way pysmac implements parallelism, the data is either a global variable, or
# the function loads it itself. Please refere to the python manual about the
# multiprocessing module for limitations. In the future, we might include additional
# parameters to the function, but for now that is not possible.
X,Y = sklearn.datasets.make_classification(1000, 20, random_state=2) # seed yields a mediocre initial accuracy on my machine
X_train, X_test, Y_train, Y_test = sklearn.cross_validation.train_test_split(X,Y, test_size=0.33, random_state=1)
# The function to be minimezed for this example is the mean accuracy of a random
# forest on the test data set. Note: because SMAC minimizes the objective, we return
# the negative accuracy in order to maximize it.
def random_forest(n_estimators,criterion, max_features, max_depth):
predictor = sklearn.ensemble.RandomForestClassifier(n_estimators, criterion, max_features, max_depth)
predictor.fit(X_train, Y_train)
return -predictor.score(X_test, Y_test)
parameter_definition=dict(\
max_depth =("integer", [1,10], 4),
max_features=("integer", [1,20], 10),
n_estimators=("integer", [10,100], 10, 'log'),
criterion =("categorical", ['gini', 'entropy'], 'entropy'),
)
# a litle bit of explanation: the first two lines define integer parameters
# ranging from 1 to 10/20 with some default values. The third line also defines
# a integer parameter, but the additional 'log' string tells SMAC to vary it
# uniformly on a logarithmic scale. Here it means, that 1<=n_estimators<=10 is
# as likely as 10<n_estimators<=100.
# The last line defines a categorical parameter. For now ,the values are always
# treated as strings. This means you would have to cast that inside your function
# when this is not appropriate, e.g., when discretizing an interval.
# Now we create the optimizer object again. This time with some parameters
opt = pysmac.SMAC_optimizer( working_directory = '/tmp/pysmac_test/',# the folder where SMAC generates output
persistent_files=False, # whether the output will persist beyond the python object's lifetime
debug = False # if something goes wrong, enable this for diagnostic output
)
# first we try the sklearn default, so we can see if SMAC can improve the performance
predictor = sklearn.ensemble.RandomForestClassifier()
predictor.fit(X_train, Y_train)
print(('The default accuracy is %f'%predictor.score(X_test, Y_test)))
# The minimize method also has optional arguments
value, parameters = opt.minimize(random_forest,
100 , parameter_definition, # in a real setting, you probably want to do more than 100 evaluations here
num_runs = 2, # number of independent SMAC runs
seed = 2, # the random seed used. can be an int or a list of ints of length num_runs
num_procs = 2, # pysmac can harness multicore architecture. Specify the number of processes to use here.
mem_limit_function_mb=1000, # There are a build-in mechanisms to limit the resources available to each function call:
t_limit_function_s = 20 # You can limit the memory available and the wallclock time for each function call
)
print(('The highest accuracy found: %f'%(-value)))
print(('Parameter setting %s'%parameters))
|
agpl-3.0
|
warrenmcg/spladder
|
python/spladder_test.py
|
1
|
35821
|
import scipy as sp
import scipy.io as scio
import statsmodels.api as sm
import statsmodels.sandbox as sms
import h5py
import sys
import os
import pdb
import cPickle
import warnings
import time
import datetime
from scipy.optimize import minimize_scalar
from scipy.special import polygamma
from scipy.stats import chi2,nbinom
import numpy.random as npr
from modules.classes.gene import Gene
import modules.alt_splice.quantify as quantify
import modules.testing.likelihood as likelihood
import modules.settings as settings
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import multiprocessing as mp
import signal as sig
TIME0 = time.time()
class Dummy():
"""Dummy class to mimic matlab structs"""
pass
def parse_options(argv):
"""Parses options from the command line """
from optparse import OptionParser, OptionGroup
parser = OptionParser()
required = OptionGroup(parser, 'MANDATORY')
required.add_option('-o', '--outdir', dest='outdir', metavar='DIR', help='spladder output directory', default='-')
required.add_option('-a', '--conditionA', dest='conditionA', metavar='idA1,idA2,idA3,...', help='comma separated list of samples files for condition A', default='-')
required.add_option('-b', '--conditionB', dest='conditionB', metavar='idB1,idB2,idB3,...', help='comma separated list of samples files for condition B', default='-')
input = OptionGroup(parser, 'INPUT OPTIONS')
input.add_option('-n', '--readlen', dest='readlen', metavar='INT', type='int', help='read length [50]', default=50)
input.add_option('-c', '--confidence', dest='confidence', metavar='INT', type='int', help='confidence level (0 lowest to 3 highest) [3]', default=3)
input.add_option('-t', '--event_types', dest='event_types', metavar='y|n', help='list of alternative splicing events to be tested [exon_skip,intron_retention,alt_3prime,alt_5prime,mult_exon_skip]', default='exon_skip,intron_retention,alt_3prime,alt_5prime,mult_exon_skip')
input.add_option('-M', '--merge_strat', dest='merge', metavar='<STRAT>', help='merge strategy, where <STRAT> is one of: merge_bams, merge_graphs, merge_all [merge_graphs]', default='merge_graphs')
input.add_option('-V', '--validate_sg', dest='validate_sg', metavar='y|n', help='validate splice graph [n]', default='n')
input.add_option('-m', '--matlab', dest='matlab', metavar='y|n', help='input data was generated with matlab version [n]', default='n')
input.add_option('-s', '--subset_samples', dest='subset_samples', metavar='y|n', help='gene expression counting will be only done on the tested subset of samples [n]', default='n')
testing = OptionGroup(parser, 'TESTING OPTIONS')
testing.add_option('-C', '--correction', dest='correction', metavar='STR', help='method for multiple testing correction (BH, Bonferroni, Holm, Hochberg, BY, TSBH) [BH]', default='BH')
testing.add_option('-0', '--max_zero_frac', dest='max_0_frac', metavar='FLOAT', type='float', help='max fraction of 0 values per event isoform quantification over all tested samples [0.5]', default=0.5)
testing.add_option('-i', '--min_count', dest='min_count', metavar='INT', help='min read count sum over all samples for an event isoform to be tested [10]', default=10)
output = OptionGroup(parser, 'OUTPUT OPTIONS')
output.add_option('-v', '--verbose', dest='verbose', metavar='y|n', help='verbosity', default='n')
output.add_option('-d', '--debug', dest='debug', metavar='y|n', help='use debug mode [n]', default='n')
output.add_option('--timestamp', dest='timestamp', metavar='y|n', help='add timestamp to output directory [n]', default='n')
output.add_option('--labelA', dest='labelA', metavar='STRING', help='label for condition A (used for output naming)', default='condA')
output.add_option('--labelB', dest='labelB', metavar='STRING', help='label for condition B (used for output naming)', default='condB')
experimental = OptionGroup(parser, 'EXPERIMENTAL - BETA STATE')
experimental.add_option('', '--parallel', dest='parallel', metavar='<INT>', type='int', help='use multiple processors [1]', default=1)
parser.add_option_group(required)
parser.add_option_group(input)
parser.add_option_group(testing)
parser.add_option_group(output)
parser.add_option_group(experimental)
(options, args) = parser.parse_args()
if len(argv) < 2:
parser.print_help()
sys.exit(2)
options.parser = parser
return options
def get_non_alt_seg_ids_matlab(gene):
tmp = sp.ones((gene.segmentgraph[0, 2].shape[0],), dtype='bool')
for i in range(gene.segmentgraph[0, 2].shape[0] - 1):
### get index of last acceptor
idx = sp.where(gene.segmentgraph[0, 2][i, i + 1:])[0]
### mask all segments between current segment and acceptor
if idx.shape[0] > 0:
tmp[i + 1:idx[-1] + i + 1] = 0
return sp.where(tmp)[0]
def get_gene_expression(CFG, fn_out=None, strain_subset=None):
if CFG['verbose']:
sys.stdout.write('Quantifying gene expression ...\n')
### load gene information
if CFG['is_matlab']:
genes = scio.loadmat(CFG['fname_genes'], struct_as_record=False)['genes'][0, :]
numgenes = len(genes)
else:
genes = cPickle.load(open(CFG['fname_genes'], 'r'))[0]
numgenes = genes.shape[0]
### open hdf5 file containing graph count information
IN = h5py.File(CFG['fname_count_in'], 'r')
strains = IN['strains'][:].astype('str')
if strain_subset is None:
strain_idx = sp.arange(strains.shape[0])
else:
strain_idx = sp.where(sp.in1d(strains, strain_subset))[0]
gene_counts = sp.zeros((numgenes, strain_idx.shape[0]), dtype='float')
gene_names = sp.array([x.name for x in genes], dtype='str')
if CFG['is_matlab']:
seg_lens = IN['seg_len'][:, 0]
gene_ids_segs = IN['gene_ids_segs'][0, :].astype('int') - 1
else:
seg_lens = IN['seg_len'][:]
gene_ids_segs = IN['gene_ids_segs'][:].astype('int')
### no longer assume that the gene_ids_segs are sorted by gene ID
s_idx = sp.argsort(gene_ids_segs[:, 0], kind='mergesort')
_, u_idx = sp.unique(gene_ids_segs[s_idx, 0], return_index=True)
s_idx = s_idx[u_idx]
### iterate over genes
#seg_offset = 0
#tut = sp.where(gene_names == 'ENSG00000163812.9')[0]
#for gidx in tut:
for gidx, iidx in enumerate(s_idx):
if CFG['verbose']:
log_progress(gidx, numgenes, 100)
### get idx of non alternative segments
if CFG['is_matlab']:
#non_alt_idx = get_non_alt_seg_ids_matlab(genes[gidx])
#seg_idx = sp.arange(seg_offset, seg_offset + genes[gidx].segmentgraph[0, 2].shape[0])
seg_idx = sp.arange(iidx, iidx + genes[gidx].segmentgraph[0, 2].shape[0])
if len(seg_idx) == 0:
continue
else:
#non_alt_idx = genes[gidx].get_non_alt_seg_ids()
#seg_idx = sp.arange(seg_offset, seg_offset + genes[gidx].segmentgraph.seg_edges.shape[0])
seg_idx = sp.arange(iidx, iidx + genes[gidx].segmentgraph.seg_edges.shape[0])
gene_idx = gene_ids_segs[seg_idx]
if len(gene_idx.shape) > 0:
gene_idx = gene_idx[0]
if CFG['is_matlab']:
assert(IN['gene_names'][gene_idx] == genes[gidx].name)
else:
assert(IN['gene_names'][:][gene_idx] == genes[gidx].name)
assert(genes[gidx].name == gene_names[gidx])
#seg_idx = seg_idx[non_alt_idx]
### compute gene expression as the read count over all non alternative segments
if CFG['is_matlab']:
#gene_counts[gidx, :] = sp.dot(IN['segments'][:, seg_idx], IN['seg_len'][seg_idx, 0]) / sp.sum(IN['seg_len'][seg_idx, 0])
gene_counts[gidx, :] = sp.dot(IN['segments'][:, seg_idx][strain_idx], seg_lens[seg_idx]) / CFG['read_length']
#seg_offset += genes[gidx].segmentgraph[0, 2].shape[0]
else:
#gene_counts[gidx, :] = sp.dot(IN['segments'][seg_idx, :].T, IN['seg_len'][:][seg_idx]) / sp.sum(IN['seg_len'][:][seg_idx])
if seg_idx.shape[0] > 1:
gene_counts[gidx, :] = sp.dot(IN['segments'][seg_idx, :][:, strain_idx].T, seg_lens[seg_idx, 0]) / CFG['read_length']
else:
gene_counts[gidx, :] = IN['segments'][seg_idx, :][strain_idx] * seg_lens[seg_idx, 0] / CFG['read_length']
#seg_offset += genes[gidx].segmentgraph.seg_edges.shape[0]
IN.close()
if CFG['verbose']:
sys.stdout.write('\n... done.\n')
### write results to hdf5
if fn_out is not None:
OUT = h5py.File(fn_out, 'w')
OUT.create_dataset(name='strains', data=strains[strain_idx])
OUT.create_dataset(name='genes', data=gene_names)
OUT.create_dataset(name='raw_count', data=gene_counts, compression="gzip")
OUT.close()
return (gene_counts, strains, gene_names)
def get_size_factors(gene_counts, CFG):
if CFG['verbose']:
print 'Estimating size factors'
### take geometric mean of counts
gmean = sp.exp(sp.mean(sp.log(gene_counts + 1), axis=1))
size_factors = []
for i in xrange(gene_counts.shape[1]):
idx = gene_counts[:, i] > 0
size_factors.append(sp.median(gene_counts[idx, i] / gmean[idx]))
size_factors = sp.array(size_factors, dtype='float')
return size_factors
def re_quantify_events(CFG):
"""This is more a legacy function for testing that requantifies events on a given graph"""
### load events
if CFG['is_matlab']:
if CFG['fname_events'].endswith('mat'):
try:
ev = scio.loadmat(CFG['fname_events'], struct_as_record=False)['events_all'][0, :]
except NotImplementedError:
print >> sys.stderr, 'The event file in matlab format is too big to be loaded with python correctly. Please use the script events_to_hdf5.m in the matlab/src part of SplAdder to convert your event file to HDF5 and use it here instead.'
sys.exit(1)
else:
ev = []
IN = h5py.File(CFG['fname_events'], 'r', driver='core')
shp = IN['chr'].shape[0]
for i in range(shp):
if CFG['verbose']:
sys.stderr.write('.')
sys.stderr.flush()
if (i + 1) % 100 == 0:
sys.stderr.write('%i/%i\n' % (i + 1, shp + 1))
tmp = Dummy()
for k in IN.keys():
if IN[k].shape[0] == shp:
exec('tmp.%s = IN[\'%s\'][%i]' % (k, k, i))
elif IN[k].shape[1] == shp:
exec('tmp.%s = IN[\'%s\'][:, %i]' % (k, k, i))
elif IN[k].shape[2] == shp:
exec('tmp.%s = IN[\'%s\'][:, :, %i]' % (k, k, i))
if k == 'gene_idx':
tmp.gene_idx = int(tmp.gene_idx[0])
ev.append(tmp)
IN.close()
ev = sp.array(ev, dtype='object')
else:
ev = cPickle.load(open(CFG['fname_events'], 'r'))[0]
cov = quantify.quantify_from_graph(ev, sp.arange(1000), 'exon_skip', CFG, fn_merge=sys.argv[1])
return cov
def log_progress(idx, total, bins=50):
global TIME0
binsize = max(total / bins, 1)
if idx % binsize == 0:
time1 = time.time()
if idx == 0:
TIME0 = time1
progress = idx / binsize
sys.stdout.write('\r[' + ('#' * progress) + (' ' * (bins - progress)) + ']' + ' %i / %i (%.0f%%)' % (idx, total, float(idx) / max(total, 1) * 100) + ' - took %i sec (ETA: %i sec)' % (time1 - TIME0, int((bins - progress) * float(time1 - TIME0) / max(progress, 1))))
sys.stdout.flush()
def estimate_dispersion_chunk(gene_counts, matrix, sf, CFG, idx, log=False):
disp_raw = sp.empty((idx.shape[0], 1), dtype='float')
disp_raw.fill(sp.nan)
disp_raw_conv = sp.zeros((idx.shape[0], 1), dtype='bool')
for i in range(idx.shape[0]):
if log:
log_progress(i, idx.shape[0])
disp = 0.1
resp = gene_counts[i, :].astype('int')
if sum(resp / sf) < CFG['min_count'] or sp.mean(resp == 0) > 0.6:
continue
for j in range(10):
modNB = sm.GLM(resp, matrix, family=sm.families.NegativeBinomial(alpha=disp), offset=sp.log(sf))
result = modNB.fit()
last_disp = disp
yhat = result.mu
sign = -1.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = minimize_scalar(likelihood.adj_loglikelihood_scalar, args=(matrix, resp, yhat, sign), method='Bounded', bounds=(0, 10.0), tol=1e-5)
disp = res.x
if abs(sp.log(disp) - sp.log(last_disp)) < 1e-4:
disp_raw[i] = disp
disp_raw_conv[i] = True
break
else:
disp_raw[i] = disp
disp_raw_conv[i] = False
if log:
log_progress(idx.shape[0], idx.shape[0])
return (disp_raw, disp_raw_conv, idx)
def estimate_dispersion(gene_counts, matrix, sf, CFG):
if CFG['verbose']:
print 'Estimating raw dispersions'
if CFG['parallel'] > 1:
disp_raw = sp.empty((gene_counts.shape[0], 1), dtype='float')
disp_raw.fill(sp.nan)
disp_raw_conv = sp.zeros((gene_counts.shape[0], 1), dtype='bool')
pool = mp.Pool(processes=CFG['parallel'], initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
binsize = 30
idx_chunks = [sp.arange(x, min(x + binsize, gene_counts.shape[0])) for x in range(0, gene_counts.shape[0], binsize)]
try:
result = [pool.apply_async(estimate_dispersion_chunk, args=(gene_counts[idx, :], matrix, sf, CFG, idx,)) for idx in idx_chunks]
res_cnt = 0
while result:
tmp = result.pop(0).get()
for i, j in enumerate(tmp[2]):
if CFG['verbose']:
log_progress(res_cnt, gene_counts.shape[0])
res_cnt += 1
disp_raw[j] = tmp[0][i]
disp_raw_conv[j] = tmp[1][i]
if CFG['verbose']:
log_progress(gene_counts.shape[0], gene_counts.shape[0])
print ''
pool.terminate()
pool.join()
except KeyboardInterrupt:
print >> sys.stderr, 'Keyboard Interrupt - exiting'
pool.terminate()
pool.join()
sys.exit(1)
else:
(disp_raw, disp_raw_conv, _) = estimate_dispersion_chunk(gene_counts, matrix, sf, CFG, sp.arange(gene_counts.shape[0]), log=CFG['verbose'])
if CFG['debug']:
fig = plt.figure(figsize=(8, 6), dpi=100)
ax = fig.add_subplot(111)
idx = sp.where(~sp.isnan(disp_raw))[0]
ax.plot(sp.mean(sp.log10(gene_counts + 1), axis=1)[idx], disp_raw[idx], 'bo')
ax.set_title('Raw Dispersion Estimate')
ax.set_xlabel('Mean expression count')
ax.set_ylabel('Dispersion')
plt.savefig('dispersion_raw.pdf', format='pdf', bbox_inches='tight')
plt.close(fig)
return (disp_raw, disp_raw_conv)
def fit_dispersion(counts, disp_raw, disp_conv, sf, CFG):
mean_count = sp.mean(counts / sf, axis=1)[:, sp.newaxis]
index = sp.where(disp_conv)[0]
lowerBound = sp.percentile(sp.unique(disp_raw[index]), 1)
upperBound = sp.percentile(sp.unique(disp_raw[index]), 99)
idx = sp.where((disp_raw > lowerBound) & (disp_raw < upperBound))[0]
matrix = sp.ones((idx.shape[0], 2), dtype='float')
matrix[:, 0] /= mean_count[idx].ravel()
modGamma = sm.GLM(disp_raw[idx], matrix, family=sm.families.Gamma(sm.families.links.identity))
res = modGamma.fit()
Lambda = res.params
disp_fitted = disp_raw.copy()
ok_idx = sp.where(~sp.isnan(disp_fitted))[0]
disp_fitted[ok_idx] = Lambda[0] / mean_count[ok_idx] + Lambda[1]
if sp.sum(disp_fitted > 0) > 0:
print "Found dispersion fit"
if CFG['debug']:
fig = plt.figure(figsize=(8, 6), dpi=100)
ax = fig.add_subplot(111)
idx = sp.where(~sp.isnan(disp_fitted))[0]
ax.plot(sp.mean(sp.log10(counts + 1), axis=1)[idx], disp_fitted[idx], 'bo')
ax.set_title('Fitted Dispersion Estimate')
ax.set_xlabel('Mean expression count')
ax.set_ylabel('Dispersion')
plt.savefig('dispersion_fitted.pdf', format='pdf', bbox_inches='tight')
plt.close(fig)
return (disp_fitted, Lambda, idx)
def adj_loglikelihood_shrink_scalar_onedisper(disp, explanatory, response, yhat, dispFitted, varPrior, sign):
"""
"""
loglik_adj = adj_loglikelihood_scalar(disp, explanatory, response, yhat, 1.0)
logprior = (sp.log(disp) - sp.log(dispFitted)) ** 2 / (2 * varPrior ** 2)
loglik_adj_shrk = loglik_adj - logprior
return loglik_adj_shrk * sign
def adj_loglikelihood_scalar(disp, X, y, mu, sign):
n = 1 / disp
p = n / (n + mu)
loglik = sum(nbinom.logpmf(y, n, p))
diagVec = mu / (1 + mu * disp)
diagWM = sp.diag(diagVec)
xtwx = sp.dot(sp.dot(X.T, diagWM), X)
coxreid = 0.5 * sp.log(sp.linalg.det(xtwx))
return (loglik - coxreid) * sign
def adjust_dispersion_chunk(counts, dmatrix1, disp_raw, disp_fitted, varPrior, sf, CFG, idx, log=False):
disp_adj = sp.empty((counts.shape[0], 1))
disp_adj.fill(sp.nan)
disp_adj_conv = sp.zeros_like(disp_adj, dtype='bool')
for i in range(idx.shape[0]):
if log:
log_progress(i, idx.shape[0])
if not sp.isnan(disp_raw[i]):
### init dispersion and response
disp = 0.1
resp = counts[i, :].astype('int')
### run for max 10 iterations
for j in range(10):
modNB = sm.GLM(resp, dmatrix1, family=sm.families.NegativeBinomial(alpha=disp), offset=sp.log(sf))
result = modNB.fit()
dispBef = disp
yhat = result.mu
sign = -1.0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = minimize_scalar(adj_loglikelihood_shrink_scalar_onedisper, args=(dmatrix1, resp, yhat, disp_fitted[i], varPrior, sign), method='Bounded', bounds=(0, 10.0), tol=1e-5)
disp = res.x
if abs(sp.log(disp) - sp.log(dispBef)) < 1e-4:
disp_adj[i] = disp
disp_adj_conv[i] = True
break
else:
disp_adj[i] = disp
disp_adj_conv[i] = False
if log:
log_progress(idx.shape[0], idx.shape[0])
print ''
return (disp_adj, disp_adj_conv, idx)
def adjust_dispersion(counts, dmatrix1, disp_raw, disp_fitted, idx, sf, CFG):
if CFG['verbose']:
print 'Start to estimate adjusted dispersions.'
varLogDispSamp = polygamma(1, (dmatrix1.shape[0] - dmatrix1.shape[1] ) / 2) ## number of samples - number of coefficients
varPrior = calculate_varPrior(disp_raw, disp_fitted, idx, varLogDispSamp)
if CFG['parallel'] > 1:
disp_adj = sp.empty((counts.shape[0], 1))
disp_adj.fill(sp.nan)
disp_adj_conv = sp.zeros_like(disp_adj, dtype='bool')
pool = mp.Pool(processes=CFG['parallel'], initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
binsize = 30
idx_chunks = [sp.arange(x, min(x + binsize, counts.shape[0])) for x in range(0, counts.shape[0], binsize)]
try:
result = [pool.apply_async(adjust_dispersion_chunk, args=(counts[cidx, :], dmatrix1, disp_raw[cidx], disp_fitted[cidx], varPrior, sf, CFG, cidx,)) for cidx in idx_chunks]
res_cnt = 0
while result:
tmp = result.pop(0).get()
for i, j in enumerate(tmp[2]):
if CFG['verbose']:
log_progress(res_cnt, counts.shape[0])
res_cnt += 1
disp_adj[j] = tmp[0][i]
disp_adj_conv[j] = tmp[1][i]
if CFG['verbose']:
log_progress(counts.shape[0], counts.shape[0])
print ''
pool.terminate()
pool.join()
except KeyboardInterrupt:
print >> sys.stderr, 'Keyboard Interrupt - exiting'
pool.terminate()
pool.join()
sys.exit(1)
else:
(disp_adj, disp_adj_conv, _) = adjust_dispersion_chunk(counts, dmatrix1, disp_raw, disp_fitted, varPrior, sf, CFG, sp.arange(counts.shape[0]), log=CFG['verbose'])
if CFG['debug']:
fig = plt.figure(figsize=(8, 6), dpi=100)
ax = fig.add_subplot(111)
idx = sp.where(~sp.isnan(disp_adj))[0]
ax.plot(sp.mean(sp.log10(counts + 1), axis=1)[idx], disp_adj[idx], 'bo')
ax.set_title('Adjusted Dispersion Estimate')
ax.set_xlabel('Mean expression count')
ax.set_ylabel('Dispersion')
plt.savefig('dispersion_adjusted.pdf', format='pdf', bbox_inches='tight')
plt.close(fig)
return (disp_adj, disp_adj_conv)
def test_count_chunk(gene_counts, disp_adj, sf, dmatrix0, dmatrix1, CFG, idx, log=False):
pval = sp.zeros((gene_counts.shape[0], 1), dtype='float')
pval.fill(sp.nan)
for i in xrange(idx.shape[0]):
if log:
log_progress(i, idx.shape[0])
if sp.isnan(disp_adj[i]):
continue
response = gene_counts[i, :].astype('int')
if sp.sum(response[:response.shape[0] / 2] == 0) >= CFG['max_0_frac'] * response.shape[0] / 2:
pval[i] = 1
continue
modNB0 = sm.GLM(response, dmatrix0, family=sm.families.NegativeBinomial(alpha=disp_adj[i]), offset=sp.log(sf))
modNB1 = sm.GLM(response, dmatrix1, family=sm.families.NegativeBinomial(alpha=disp_adj[i]), offset=sp.log(sf))
result0 = modNB0.fit()
result1 = modNB1.fit()
pval[i] = 1 - chi2.cdf(result0.deviance - result1.deviance, dmatrix1.shape[1] - dmatrix0.shape[1])
if log:
log_progress(idx.shape[0], idx.shape[0])
print ''
return (pval, idx)
def test_count(gene_counts, disp_adj, sf, dmatrix0, dmatrix1, CFG):
if CFG['verbose']:
print 'Start the statistical test.'
if CFG['parallel'] > 1:
pval = sp.zeros((gene_counts.shape[0], 1), dtype='float')
pval.fill(sp.nan)
pool = mp.Pool(processes=CFG['parallel'], initializer=lambda: sig.signal(sig.SIGINT, sig.SIG_IGN))
binsize = 30
idx_chunks = [sp.arange(x, min(x + binsize, gene_counts.shape[0])) for x in range(0, gene_counts.shape[0], binsize)]
try:
result = [pool.apply_async(test_count_chunk, args=(gene_counts[cidx, :], disp_adj[cidx], sf, dmatrix0, dmatrix1, CFG, cidx,)) for cidx in idx_chunks]
res_cnt = 0
while result:
tmp = result.pop(0).get()
for i, j in enumerate(tmp[1]):
if CFG['verbose']:
log_progress(res_cnt, gene_counts.shape[0])
res_cnt += 1
pval[j] = tmp[0][i]
if CFG['verbose']:
log_progress(gene_counts.shape[0], gene_counts.shape[0])
print ''
pool.terminate()
pool.join()
except KeyboardInterrupt:
print >> sys.stderr, 'Keyboard Interrupt - exiting'
pool.terminate()
pool.join()
sys.exit(1)
else:
(pval, _) = test_count_chunk(gene_counts, disp_adj, sf, dmatrix0, dmatrix1, CFG, sp.arange(gene_counts.shape[0]), log=CFG['verbose'])
if CFG['verbose']:
print ''
return pval
def adj_pval(pvals, CFG):
"""
Perform multiple testing correction.
"""
pvals_adj = pvals.copy()
idx = ~sp.isnan(pvals)
if CFG['multiTest'] == 'BH':
method = 'fdr_bh'
elif CFG['multiTest'] == 'Bonferroni':
method = 'bonferroni'
elif CFG['multiTest'] == 'Holm':
method = 'holm'
elif CFG['multiTest'] == 'Hochberg':
method = 'simes-hochberg'
elif CFG['multiTest'] == 'Hommel':
method = 'hommel'
elif CFG['multiTest'] == 'BY':
method = 'fdr_by'
elif CFG['multiTest'] == 'TSBH':
method = 'tsbh'
else:
sys.stderr.write('ERROR: The methods for multiple test correction can only accept \'Bonferroni\', \'Holm\', \'Hochberg\', \'Hommel\', \'BH\', \'BY\' or \'TSBH\' as its input.\n')
sys.exit()
mtc = sms.stats.multicomp.multipletests(pvals[idx], alpha=0.1, method=method, returnsorted=False)
pvals_adj[idx] = mtc[1]
return pvals_adj
def calculate_varPrior(disp_raw, disp_fitted, idx, varLogDispSamp):
logRes = sp.log(disp_raw[idx]) - sp.log(disp_fitted[idx])
stdLogRes = sp.median(abs(logRes - sp.median(logRes))) * 1.4826
varLogRes = stdLogRes ** 2
varPrior = varLogRes - varLogDispSamp
return max(varPrior, 0.1)
def run_testing(cov, dmatrix0, dmatrix1, sf, CFG):
### estimate dispersion
(disp_raw, disp_raw_conv) = estimate_dispersion(cov, dmatrix1, sf, CFG)
### fit dispersion
(disp_fitted, Lambda, disp_idx) = fit_dispersion(cov, disp_raw, disp_raw_conv, sf, CFG)
### adjust dispersion estimates
(disp_adj, disp_adj_conv) = adjust_dispersion(cov, dmatrix1, disp_raw, disp_fitted, disp_idx, sf, CFG)
### do test
pvals = test_count(cov, disp_adj, sf, dmatrix0, dmatrix1, CFG)
### reshape and qdjust p-values
pvals = 2 * pvals.reshape((2, pvals.shape[0] / 2)).T.min(axis=1)
pvals[pvals > 1] = 1
return pvals
def main():
### get command line options
options = parse_options(sys.argv)
### parse parameters from options object
CFG = settings.parse_args(options, identity='test')
### generate output directory
outdir = os.path.join(options.outdir, 'testing')
if options.timestamp == 'y':
outdir = '%s_%s' % (outdir, str(datetime.datetime.now()).replace(' ', '_'))
if options.labelA != 'condA' and options.labelB != 'condB':
outdir = '%s_%s_vs_%s' % (outdir, options.labelA, options.labelB)
if not os.path.exists(outdir):
os.makedirs(outdir)
if CFG['debug']:
print "Generating simulated dataset"
npr.seed(23)
CFG['is_matlab'] = False
#cov = npr.permutation(20000-20).astype('float').reshape(999, 20)
#cov = sp.r_[cov, sp.c_[sp.ones((1, 10)) *10, sp.ones((1, 10)) * 500000] + npr.normal(10, 1, 20)]
#sf = sp.ones((cov.shape[1], ), dtype='float')
setsize = 50
### diff event counts
cov = sp.zeros((500, 2 * setsize), dtype='int')
for i in range(10):
cov[i, :setsize] = nbinom.rvs(30, 0.8, size=setsize)
cov[i, setsize:] = nbinom.rvs(10, 0.8, size=setsize)
for i in range(10, cov.shape[0]):
cov[i, :] = nbinom.rvs(30, 0.8, size=2*setsize)
### diff gene expression
cov2 = sp.zeros((500, 2 * setsize), dtype='int')
for i in range(20):
cov2[i, :setsize] = nbinom.rvs(2000, 0.2, size=setsize)
cov2[i, setsize:] = nbinom.rvs(2000, 0.3, size=setsize)
for i in range(20, cov2.shape[0]):
cov2[i, :] = nbinom.rvs(2000, 0.3, size=2*setsize)
cov = sp.c_[cov, cov2] * 10000
tidx = sp.arange(setsize)
sf = npr.uniform(0, 5, 2*setsize)
sf = sp.r_[sf, sf]
#dmatrix0 = sp.ones((cov.shape[1], 3), dtype='bool')
dmatrix1 = sp.zeros((cov.shape[1], 4), dtype='float')
dmatrix1[:, 0] = 1
dmatrix1[tidx, 1] = 1
#dmatrix1[tidx, 2] = 1
dmatrix1[tidx + (2*setsize), 2] = 1
dmatrix1[(2*setsize):, 3] = 1
#dmatrix1[:, 4] = sp.log(sf)
dmatrix0 = dmatrix1[:, [0, 2, 3]]
cov = cov * sf
#sf = sp.ones((cov.shape[1], ), dtype='float')
pvals = run_testing(cov, dmatrix0, dmatrix1, sf, CFG)
pvals_adj = adj_pval(pvals, CFG)
pdb.set_trace()
else:
val_tag = ''
if CFG['validate_splicegraphs']:
val_tag = '.validated'
if CFG['is_matlab']:
CFG['fname_genes'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.mat' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
CFG['fname_count_in'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.count.mat' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
else:
CFG['fname_genes'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.pickle' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
CFG['fname_count_in'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.count.pickle' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
condition_strains = None
CFG['fname_exp_hdf5'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.gene_exp.hdf5' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag))
if os.path.exists(CFG['fname_exp_hdf5']):
if CFG['verbose']:
print 'Loading expression counts from %s' % CFG['fname_exp_hdf5']
IN = h5py.File(CFG['fname_exp_hdf5'], 'r')
gene_counts = IN['raw_count'][:]
gene_strains = IN['strains'][:]
gene_ids = IN['genes'][:]
IN.close()
else:
if options.subset_samples == 'y':
condition_strains = sp.unique(sp.r_[sp.array(CFG['conditionA']), sp.array(CFG['conditionB'])])
CFG['fname_exp_hdf5'] = os.path.join(CFG['out_dirname'], 'spladder', 'genes_graph_conf%i.%s%s.gene_exp.%i.hdf5' % (CFG['confidence_level'], CFG['merge_strategy'], val_tag, hash(tuple(sp.unique(condition_strains))) * -1))
if os.path.exists(CFG['fname_exp_hdf5']):
if CFG['verbose']:
print 'Loading expression counts from %s' % CFG['fname_exp_hdf5']
IN = h5py.File(CFG['fname_exp_hdf5'], 'r')
gene_counts = IN['raw_count'][:]
gene_strains = IN['strains'][:]
gene_ids = IN['genes'][:]
IN.close()
else:
gene_counts, gene_strains, gene_ids = get_gene_expression(CFG, fn_out=CFG['fname_exp_hdf5'], strain_subset=condition_strains)
gene_strains = sp.array([x.split(':')[1] if ':' in x else x for x in gene_strains])
### estimate size factors for library size normalization
sf = get_size_factors(gene_counts, CFG)
### get index of samples for difftest
idx1 = sp.where(sp.in1d(gene_strains, CFG['conditionA']))[0]
idx2 = sp.where(sp.in1d(gene_strains, CFG['conditionB']))[0]
### for TESTING
#setsize = 100
#idx1 = sp.arange(0, setsize / 2)
#idx2 = sp.arange(setsize / 2, setsize)
### subset expression counts to tested samples
gene_counts = gene_counts[:, sp.r_[idx1, idx2]]
sf = sf[sp.r_[idx1, idx2]]
sf = sp.r_[sf, sf]
### test each event type individually
for event_type in CFG['event_types']:
if CFG['verbose']:
print 'Testing %s events' % event_type
CFG['fname_events'] = os.path.join(CFG['out_dirname'], 'merge_graphs_%s_C%i.counts.hdf5' % (event_type, CFG['confidence_level']))
### quantify events
(cov, gene_idx, event_idx, event_strains) = quantify.quantify_from_counted_events(CFG['fname_events'], sp.r_[idx1, idx2], event_type, CFG)
assert(sp.all(gene_strains == event_strains))
### map gene expression to event order
curr_gene_counts = gene_counts[gene_idx, :]
### filter for min expression
if event_type == 'intron_retention':
k_idx = sp.where((sp.mean(cov[0] == 0, axis=1) < CFG['max_0_frac']) | (sp.mean(cov[1] == 0, axis=1) < CFG['max_0_frac']))[0]
else:
k_idx = sp.where(((sp.mean(cov[0] == 0, axis=1) < CFG['max_0_frac']) | (sp.mean(cov[1] == 0, axis=1) < CFG['max_0_frac'])) & (sp.mean(sp.c_[cov[0][:, :idx1.shape[0]], cov[1][:, :idx1.shape[0]]] == 0, axis=1) < CFG['max_0_frac']) & (sp.mean(sp.c_[cov[0][:, idx2.shape[0]:], cov[1][:, idx2.shape[0]:]] == 0, axis=1) < CFG['max_0_frac']))[0]
if CFG['verbose']:
print 'Exclude %i of %i %s events (%.2f percent) from testing due to low coverage' % (cov[0].shape[0] - k_idx.shape[0], cov[0].shape[0], event_type, (1 - float(k_idx.shape[0]) / cov[0].shape[0]) * 100)
if k_idx.shape[0] == 0:
print 'All events of type %s were filtered out due to low coverage. Please try re-running with less stringent filter criteria' % event_type
continue
# k_idx = sp.where((sp.mean(sp.c_[cov[0], cov[1]], axis=1) > 2))[0]
# k_idx = sp.where((sp.mean(cov[0], axis=1) > 2) & (sp.mean(cov[1], axis=1) > 2))[0]
cov[0] = cov[0][k_idx, :]
cov[1] = cov[1][k_idx, :]
curr_gene_counts = curr_gene_counts[k_idx, :]
event_idx = event_idx[k_idx]
gene_idx = gene_idx[k_idx]
cov[0] = sp.around(sp.hstack([cov[0], curr_gene_counts]))
cov[1] = sp.around(sp.hstack([cov[1], curr_gene_counts]))
cov = sp.vstack(cov)
tidx = sp.arange(idx1.shape[0])
#if CFG['debug']:
# for i in range(cov.shape[0]):
# fig = plt.figure(figsize=(8, 6), dpi=100)
# ax = fig.add_subplot(111)
# ax.hist(cov[i, :] * sf, 50, histtype='bar', rwidth=0.8)
# #ax.plot(sp.arange(cov.shape[1]), sorted(cov[i, :]), 'bo')
# ax.set_title('Count Distribution - Sample %i' % i )
# plt.savefig('count_dist.%i.pdf' % i, format='pdf', bbox_inches='tight')
# plt.close(fig)
### build design matrix for testing
dmatrix1 = sp.zeros((cov.shape[1], 4), dtype='bool')
dmatrix1[:, 0] = 1 # intercept
dmatrix1[tidx, 1] = 1 # delta a
dmatrix1[tidx, 2] = 1 # delta g
dmatrix1[tidx + (idx1.shape[0] + idx2.shape[0]), 2] = 1 # delta g
dmatrix1[(idx1.shape[0] + idx2.shape[0]):, 3] = 1 # is g
dmatrix0 = dmatrix1[:, [0, 2, 3]]
pvals = run_testing(cov, dmatrix0, dmatrix1, sf, CFG)
pvals_adj = adj_pval(pvals, CFG)
### write output
out_fname = os.path.join(outdir, 'test_results_C%i_%s.tsv' % (options.confidence, event_type))
if CFG['verbose']:
print 'Writing test results to %s' % out_fname
s_idx = sp.argsort(pvals_adj)
header = sp.array(['event_id', 'gene', 'p_val', 'p_val_adj'])
event_ids = sp.array(['%s_%i' % (event_type, i + 1) for i in event_idx], dtype='str')
if CFG['is_matlab']:
data_out = sp.c_[event_ids[s_idx], gene_ids[gene_idx[s_idx], 0], pvals[s_idx].astype('str'), pvals_adj[s_idx].astype('str')]
else:
data_out = sp.c_[event_ids[s_idx], gene_ids[gene_idx[s_idx]], pvals[s_idx].astype('str'), pvals_adj[s_idx].astype('str')]
data_out = sp.r_[header[sp.newaxis, :], data_out]
sp.savetxt(out_fname, data_out, delimiter='\t', fmt='%s')
if __name__ == "__main__":
main()
|
bsd-3-clause
|
qifeigit/scikit-learn
|
examples/linear_model/plot_sgd_weighted_samples.py
|
344
|
1458
|
"""
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
wrichert/BuildingMachineLearningSystemsWithPython
|
ch06/03_clean.py
|
6
|
5976
|
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
#
# This script tries to improve the classifier by cleaning the tweets a bit
#
import time
start_time = time.time()
import re
import numpy as np
from sklearn.metrics import precision_recall_curve, roc_curve, auc
from sklearn.cross_validation import ShuffleSplit
from sklearn.pipeline import Pipeline
from utils import plot_pr
from utils import load_sanders_data
from utils import tweak_labels
from utils import log_false_positives
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from utils import load_sent_word_net
sent_word_net = load_sent_word_net()
phase = "03"
emo_repl = {
# positive emoticons
"<3": " good ",
":d": " good ", # :D in lower case
":dd": " good ", # :DD in lower case
"8)": " good ",
":-)": " good ",
":)": " good ",
";)": " good ",
"(-:": " good ",
"(:": " good ",
# negative emoticons:
":/": " bad ",
":>": " sad ",
":')": " sad ",
":-(": " bad ",
":(": " bad ",
":S": " bad ",
":-S": " bad ",
}
emo_repl_order = [k for (k_len, k) in reversed(
sorted([(len(k), k) for k in emo_repl.keys()]))]
re_repl = {
r"\br\b": "are",
r"\bu\b": "you",
r"\bhaha\b": "ha",
r"\bhahaha\b": "ha",
r"\bdon't\b": "do not",
r"\bdoesn't\b": "does not",
r"\bdidn't\b": "did not",
r"\bhasn't\b": "has not",
r"\bhaven't\b": "have not",
r"\bhadn't\b": "had not",
r"\bwon't\b": "will not",
r"\bwouldn't\b": "would not",
r"\bcan't\b": "can not",
r"\bcannot\b": "can not",
}
def create_ngram_model(params=None):
def preprocessor(tweet):
global emoticons_replaced
tweet = tweet.lower()
for k in emo_repl_order:
tweet = tweet.replace(k, emo_repl[k])
for r, repl in re_repl.iteritems():
tweet = re.sub(r, repl, tweet)
return tweet
tfidf_ngrams = TfidfVectorizer(preprocessor=preprocessor,
analyzer="word")
clf = MultinomialNB()
pipeline = Pipeline([('tfidf', tfidf_ngrams), ('clf', clf)])
if params:
pipeline.set_params(**params)
return pipeline
def train_model(clf, X, Y, name="NB ngram", plot=False):
# create it again for plotting
cv = ShuffleSplit(
n=len(X), n_iter=10, test_size=0.3, indices=True, random_state=0)
train_errors = []
test_errors = []
scores = []
pr_scores = []
precisions, recalls, thresholds = [], [], []
clfs = [] # just to later get the median
for train, test in cv:
X_train, y_train = X[train], Y[train]
X_test, y_test = X[test], Y[test]
clf.fit(X_train, y_train)
clfs.append(clf)
train_score = clf.score(X_train, y_train)
test_score = clf.score(X_test, y_test)
train_errors.append(1 - train_score)
test_errors.append(1 - test_score)
scores.append(test_score)
proba = clf.predict_proba(X_test)
fpr, tpr, roc_thresholds = roc_curve(y_test, proba[:, 1])
precision, recall, pr_thresholds = precision_recall_curve(
y_test, proba[:, 1])
pr_scores.append(auc(recall, precision))
precisions.append(precision)
recalls.append(recall)
thresholds.append(pr_thresholds)
if plot:
scores_to_sort = pr_scores
median = np.argsort(scores_to_sort)[len(scores_to_sort) / 2]
plot_pr(pr_scores[median], name, phase, precisions[median],
recalls[median], label=name)
log_false_positives(clfs[median], X_test, y_test, name)
summary = (np.mean(scores), np.std(scores),
np.mean(pr_scores), np.std(pr_scores))
print "%.3f\t%.3f\t%.3f\t%.3f\t" % summary
return np.mean(train_errors), np.mean(test_errors)
def print_incorrect(clf, X, Y):
Y_hat = clf.predict(X)
wrong_idx = Y_hat != Y
X_wrong = X[wrong_idx]
Y_wrong = Y[wrong_idx]
Y_hat_wrong = Y_hat[wrong_idx]
for idx in xrange(len(X_wrong)):
print "clf.predict('%s')=%i instead of %i" %\
(X_wrong[idx], Y_hat_wrong[idx], Y_wrong[idx])
def get_best_model():
best_params = dict(tfidf__ngram_range=(1, 2),
tfidf__min_df=1,
tfidf__stop_words=None,
tfidf__smooth_idf=False,
tfidf__use_idf=False,
tfidf__sublinear_tf=True,
tfidf__binary=False,
clf__alpha=0.01,
)
best_clf = create_ngram_model(best_params)
return best_clf
if __name__ == "__main__":
X_orig, Y_orig = load_sanders_data()
classes = np.unique(Y_orig)
for c in classes:
print "#%s: %i" % (c, sum(Y_orig == c))
print "== Pos vs. neg =="
pos_neg = np.logical_or(Y_orig == "positive", Y_orig == "negative")
X = X_orig[pos_neg]
Y = Y_orig[pos_neg]
Y = tweak_labels(Y, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs neg", plot=True)
print "== Pos/neg vs. irrelevant/neutral =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive", "negative"])
# best_clf = grid_search_model(create_union_model, X, Y, name="sent vs
# rest", plot=True)
train_model(get_best_model(), X, Y, name="pos+neg vs rest", plot=True)
print "== Pos vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["positive"])
train_model(get_best_model(), X, Y, name="pos vs rest",
plot=True)
print "== Neg vs. rest =="
X = X_orig
Y = tweak_labels(Y_orig, ["negative"])
train_model(get_best_model(), X, Y, name="neg vs rest",
plot=True)
print "time spent:", time.time() - start_time
|
mit
|
elieux/kaira
|
gui/charts.py
|
3
|
34203
|
#
# Copyright (C) 2012 Martin Surkovsky,
# Stanislav Bohm
#
# This file is part of Kaira.
#
# Kaira is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License, or
# (at your option) any later version.
#
# Kaira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kaira. If not, see <http://www.gnu.org/licenses/>.
import gtk
import os
import paths
import utils
import events as evt
import matplotlib.cm as cm
from matplotlib.axes import Axes as mpl_Axes
from matplotlib.container import Container as mpl_Container
from matplotlib.artist import Artist as mpl_Artist
from matplotlib.lines import Line2D as mpl_Line
from matplotlib.patches import Rectangle as mpl_Rectangle
from matplotlib.text import Annotation as mpl_Annotation
from matplotlib.transforms import IdentityTransform as mpl_IdentityTransform
from matplotlib.ticker import FuncFormatter as mpl_FuncFormatter
from matplotlib.projections import register_projection \
as mpl_register_projection
from matplotlib.figure import Figure as mpl_Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg \
as mpl_FigureCanvas
class LineConfig:
""" Information about concrete line in a chart.
Keyword arguments:
mpl_line1 -- an instance of matplotlib.lines.Line2D, which is used in
the original chart.
x_values -- x-values of the line.
y_values -- y-values of the line.
color -- used color.
"""
def __init__(
self,
mpl_line1,
x_values,
y_values,
color):
self.mpl_line1 = mpl_line1
self.x_values = x_values
self.y_values = y_values
self.color = color
# instance of line used in legend
self.mpl_legline = None
# alternative view of line1. If they are same, then line2 = line1.
self.mpl_line2 = None
# line1 is default visible
self.mpl_line1_visible = True
# line2 is default imvisible. It is switching between them.
self.mpl_line2_visible = False
def get_mpl_line1(self):
return self.mpl_line1
def copy_mpl_line1(self):
# TODO: make more general. Not allways is first line of this type!
l = mpl_Line(
self.x_values, self.y_values, marker='o',
drawstyle='steps-post', color=self.color)
return l
def get_x_values(self):
return self.x_values
def get_y_values(self):
return self.y_values
def get_color(self):
return self.color
def get_mpl_legline(self):
return self.mpl_legline
def set_mpl_legline(self, mpl_legline):
self.mpl_legline = mpl_legline
def get_mpl_line2(self):
return self.mpl_line2
def set_mpl_line2(self, mpl_line2):
self.mpl_line2 = mpl_line2
def get_mpl_line1_visible(self):
return self.mpl_line1_visible
def set_mpl_line1_visible(self, visible):
self.mpl_line1_visible = visible
self.__set_visible(visible, self.mpl_line1)
def get_mpl_line2_visible(self):
return self.mpl_line2_visible;
def set_mpl_line2_visible(self, visible):
if self.mpl_line2 is not None:
self.mpl_line2_visible = visible
self.__set_visible(visible, self.mpl_line2)
else:
raise Exception("Line 2 is not created!")
def __set_visible(self, visible, line):
if line is None:
raise Exception("Line is None!")
if isinstance(line, mpl_Artist):
line.set_visible(visible)
elif isinstance(line, mpl_Container):
for child in line.get_children():
self.__set_visible(visible, child)
class DrawLinesConfig:
''' The class which stores information about how to show selected lines.'''
def __init__(self, lines_config):
self.lines_config = lines_config
self.count_of_changes = 0
def change_lines_config(self, change, change_legline_fn=utils.empty_fn):
old_count_of_changes = self.count_of_changes
if not change.get_mpl_line2_visible():
self.count_of_changes += 1
change_legline_fn(change.get_mpl_legline(), 'on')
else:
self.count_of_changes -= 1
change_legline_fn(change.get_mpl_legline(), 'off')
# all lines will be shown (default state)
if old_count_of_changes == 1 and self.count_of_changes == 0:
for line_config in self.lines_config:
line_config.set_mpl_line1_visible(True)
legline = line_config.get_mpl_legline()
change_legline_fn(legline, 'original')
if line_config.get_mpl_line2() is not None:
line_config.set_mpl_line2_visible(False)
# one selected lines will be shown and other lines will be hide
elif old_count_of_changes == 0 and self.count_of_changes == 1:
for line_config in self.lines_config:
line_config.set_mpl_line1_visible(False)
legline = line_config.get_mpl_legline()
change_legline_fn(legline, 'off')
change_legline_fn(change.get_mpl_legline(), 'on')
# will be shown next selected line
if self.count_of_changes > 0:
if change.get_mpl_line2() is not None:
change.set_mpl_line2_visible(not change.get_mpl_line2_visible())
else:
change.set_mpl_line2('create_new')
class BasicChart(mpl_Axes, evt.EventSource):
name = 'basic_chart'
def __init__(self,
fig,
rec,
axisbg=None, # defaults to rc axes.facecolor
frameon=True,
sharex=None,
sharey=None,
label="",
xscale=None,
yscale=None,
**kwargs):
mpl_Axes.__init__(
self, fig, rec, axisbg, frameon, sharex, sharey, label,
xscale, yscale, **kwargs)
evt.EventSource.__init__(self)
# zoom properties
self.zoom_stack = []
self.zoom_rect = None
# move properties
self.xypress = None
self.original_view_dim = None
# legend
self.plegend = None
self.mouse_on_legend = False
# locking axes
self.xlock = False
self.ylock = False
# move with canvas
self.moving_flag = False
# redraw properties (backgrounds)
self.cross_bg = None
self.rect_bg = None
# coonect standard features, for Kaira graphs
# updade background after change window
fig.canvas.mpl_connect("draw_event", self._update_background)
# register left button click
fig.canvas.mpl_connect("button_press_event", self._drag_point)
fig.canvas.mpl_connect("button_release_event", self._drop_point)
# register drawing of position cross
fig.canvas.mpl_connect("motion_notify_event", self._draw_cross)
# register zooming methods
fig.canvas.mpl_connect("motion_notify_event", self._draw_rectangle)
fig.canvas.mpl_connect("button_release_event", self._zoom_in)
fig.canvas.mpl_connect("button_press_event", self._zoom_out)
# register moving events
fig.canvas.mpl_connect("button_press_event", self._move_start)
fig.canvas.mpl_connect("motion_notify_event", self._moving)
fig.canvas.mpl_connect(
"key_press_event", self._switch_moving_flag_action)
# register axes locking events
fig.canvas.mpl_connect("key_press_event", self._switch_xlock_action)
fig.canvas.mpl_connect("key_release_event", self._switch_xlock_action)
fig.canvas.mpl_connect("key_press_event", self._switch_ylock_action)
fig.canvas.mpl_connect("key_release_event", self._switch_ylock_action)
# register event which stop is drawing cross if it's cursorn over legend
fig.canvas.mpl_connect("motion_notify_event", self._mouse_over_legend)
def __convert_axes_to_data(self, x, y):
xdisplay, ydisplay = self.transAxes.transform((x,y))
return self.transData.inverted().transform((xdisplay, ydisplay))
def _update_background(self, event):
self.cross_bg = self.figure.canvas.copy_from_bbox(self.bbox)
def _drag_point(self, event):
if event.button == 1:
self.xypress = (event.x, event.y)
def _drop_point(self, event):
if event.button == 1:
self.xypress = None
def _draw_cross(self, event, select_bg=None):
def crop_coordinate(c_axes, flag):
"""
It crops the coordinate and returns two values:
- coordinate in axes format <0, 1>
- coordinate in data format
c_axes -- cordinate in axes format
flag -- flag says which coordinate is required, possible
values: 'x' or 'y'.
returns (<0,1>, <min_value, max_value>)
"""
idx = 0 if flag == 'x' else 1
if c_axes < 0:
return (0, self.__convert_axes_to_data(0, 0)[idx])
elif c_axes > 1:
return (1, self.__convert_axes_to_data(1, 1)[idx])
else:
return (c_axes,
self.__convert_axes_to_data(c_axes, c_axes)[idx])
def format_value(value, formatter):
if value is not None and isinstance(formatter, mpl_FuncFormatter):
return formatter.format_data(value)
else:
return str(value)
if not self.mouse_on_legend and \
(self.xypress is None or select_bg is not None):
if self.cross_bg is None:
self.cross_bg = self.figure.canvas.copy_from_bbox(self.bbox)
if select_bg is not None:
self.figure.canvas.restore_region(select_bg)
elif self.cross_bg is not None:
self.figure.canvas.restore_region(self.cross_bg)
inv = self.transAxes.inverted()
x, y = inv.transform((event.x, event.y))
(x, xdata) = crop_coordinate(x, 'x')
(y, ydata) = crop_coordinate(y, 'y')
xtext = format_value(xdata, self.xaxis.get_major_formatter())
ytext = format_value(ydata, self.yaxis.get_major_formatter())
# the coefficient 7 is good result from an experiment :)
xtext_pos = -7 * len(xtext) - 10 if x > 0.5 else 10
ytext_pos = -20 if y > 0.5 else 30
if not self.xlock:
l1 = mpl_Line(
[x, x], [0, 1], c="#ff0000", lw=1,
transform=self.transAxes, figure=self.figure)
self.draw_artist(l1)
a1 = mpl_Annotation(
xtext,
xy=(x, y), xycoords='axes fraction',
xytext=(xtext_pos, ytext_pos),
textcoords='offset points',
bbox=dict(boxstyle="round", fc="#ffff00"))
a1.set_transform(mpl_IdentityTransform())
self._set_artist_props(a1)
self.draw_artist(a1)
if not self.ylock:
l2 = mpl_Line(
[0, 1], [y, y], c="#ff0000", lw=1,
transform=self.transAxes, figure=self.figure)
self.draw_artist(l2)
if self.xlock:
ytext_pos = -20 if y > 0.5 else 10
else:
ytext_pos -= 20
a2 = mpl_Annotation(
ytext,
xy=(x, y), xycoords='axes fraction',
xytext=(xtext_pos, ytext_pos),
textcoords='offset points',
bbox=dict(boxstyle="round", fc="#ffff00"))
a2.set_transform(mpl_IdentityTransform())
self._set_artist_props(a2)
self.draw_artist(a2)
self.figure.canvas.blit(self.bbox)
def _draw_rectangle(self, event):
if not self.moving_flag \
and not self.mouse_on_legend \
and self.xypress is not None:
x_start, y_start = self.xypress
x_end, y_end = event.x, event.y
if self.rect_bg is None:
self.rect_bg = self.figure.canvas.copy_from_bbox(self.bbox)
else:
self.figure.canvas.restore_region(self.rect_bg)
inv = self.transData.inverted()
ax_x_start, ax_y_start = inv.transform((x_start, y_start))
ax_x_end, ax_y_end = inv.transform((x_end, y_end))
if self.xlock:
ax_x_start = self.__convert_axes_to_data(0, 0)[0]
ax_x_end = self.__convert_axes_to_data(1, 1)[0]
if self.ylock:
ax_y_start = self.__convert_axes_to_data(0, 0)[1]
ax_y_end = self.__convert_axes_to_data(1, 1)[1]
self.zoom_rect = (
min(ax_x_start, ax_x_end),
min(ax_y_start, ax_y_end),
max(ax_x_start, ax_x_end),
max(ax_y_start, ax_y_end))
rec = mpl_Rectangle(
(ax_x_start, ax_y_start),
width=(ax_x_end - ax_x_start),
height=(ax_y_end - ax_y_start),
fc="#0000ff", ec="#000000", alpha=0.1, lw=1,
transform=self.transData, figure=self.figure)
self.draw_artist(rec)
self.figure.canvas.blit(self.bbox)
# draw ending cross
select_bg = self.figure.canvas.copy_from_bbox(self.bbox)
self._draw_cross(event, select_bg)
def _zoom_in(self, event):
if self.zoom_rect is not None:
vmin_x, vmax_x = self.xaxis.get_view_interval()
vmin_y, vmax_y = self.yaxis.get_view_interval()
self.zoom_stack.append((vmin_x, vmax_x, vmin_y, vmax_y))
xmin, ymin, xmax, ymax = self.zoom_rect
self.set_xlim(xmin, xmax)
self.set_ylim(ymin, ymax)
self.zoom_rect = None
self.cross_bg = None
self.rect_bg = None
self.figure.canvas.draw_idle()
def _zoom_out(self, event):
if event.button == 3:
xmin, xmax = None, None
ymin, ymax = None, None
if len(self.zoom_stack) == 0:
if self.original_view_dim is not None:
xmin, xmax, ymin, ymax = self.original_view_dim
self.original_view_dim = None
else:
xmin, xmax, ymin, ymax = self.zoom_stack.pop()
if xmin is not None and xmax is not None and \
ymin is not None and ymax is not None:
self.set_xlim(xmin, xmax)
self.set_ylim(ymin, ymax)
self.figure.canvas.draw_idle()
def _move_start(self, event):
''' Save original view dimension, if it's still possible, if it's
still unused zoom.'''
if len(self.zoom_stack) == 0 and self.original_view_dim is None:
# Save original view for restoring a chart.
vmin_x, vmax_x = self.xaxis.get_view_interval()
vmin_y, vmax_y = self.yaxis.get_view_interval()
self.original_view_dim = (vmin_x, vmax_x, vmin_y, vmax_y)
def _moving(self, event):
''' Moving with chart. Coordinates must be transform
bettween two coordinates system, because using pixel
coordinates is better for moving with chart. '''
if self.moving_flag and self.xypress is not None:
xpress, ypress = self.xypress
x, y = event.x, event.y
diffx = xpress - x
diffy = ypress - y
# coordinates in display (pixels) view
xmin, ymin = self.transAxes.transform((0,0))
xmax, ymax = self.transAxes.transform((1,1))
shift_xmin, shift_xmax = xmin + diffx, xmax + diffx
shift_ymin, shift_ymax = ymin + diffy, ymax + diffy
# coordinates in data view
inv = self.transData.inverted()
data_xmin, data_ymin = inv.transform((shift_xmin, shift_ymin))
data_xmax, data_ymax = inv.transform((shift_xmax, shift_ymax))
# set new view dimension
self.set_xlim(data_xmin, data_xmax)
self.set_ylim(data_ymin, data_ymax)
# shift for next step
self.xypress = (x, y)
self.figure.canvas.draw_idle()
def _switch_xlock_action(self, event):
# hint: ctrl+control is returned after release ctrl key.
# It coul'd be a bug of the matplotlib.
if not self.moving_flag and \
event.guiEvent.keyval == gtk.keysyms.Control_L:
self.set_xlock(not self.xlock)
if event.x is not None and event.y is not None:
self._draw_cross(event)
def _switch_ylock_action(self, event):
if not self.moving_flag and \
event.guiEvent.keyval == gtk.keysyms.Shift_L:
self.set_ylock(not self.ylock)
if event.x is not None and event.y is not None:
self._draw_cross(event)
def _switch_moving_flag_action(self, event):
if event.key == 'm':
self.set_moving_flag(not self.moving_flag)
def _mouse_over_legend(self, event):
if self.plegend is not None and self.plegend.get_visible():
bbox = self.plegend.get_frame()
x, y = bbox.get_x(), bbox.get_y()
width, height = bbox.get_width(), bbox.get_height()
if event.x >= x and event.x <= x + width and \
event.y >= y and event.y <= y + height:
if self.cross_bg is not None:
self.figure.canvas.restore_region(self.cross_bg)
self.figure.canvas.blit(self.bbox)
self.mouse_on_legend = True
else:
self.mouse_on_legend = False
def set_xlock(self, lock):
self.xlock = lock
self.emit_event("xlock_changed", lock)
def set_ylock(self, lock):
self.ylock = lock
self.emit_event("ylock_changed", lock)
def set_moving_flag(self, move):
self.moving_flag = move
self.emit_event("moving_flag_changed", move)
def hide_legend(self, hide):
if self.plegend is not None:
self.plegend.set_visible(not(hide))
self.figure.canvas.draw_idle()
def register_pick_legend(self, legend, lines_config):
def change_legline_fn(legline, action='original'):
if action == 'original' or action == 'on':
legline.set_alpha(1.0)
legline._legmarker.set_alpha(1.0)
elif action == 'off':
legline.set_alpha(0.3)
legline._legmarker.set_alpha(0.3)
else:
raise Exception(
'Unexpected parameter \'which_use={0}\''.format(action))
lined = dict()
for legline, line_config in zip(legend.get_lines(), lines_config):
legline.set_picker(5)
lined[legline] = line_config
line_config.set_mpl_legline(legline)
dlc = DrawLinesConfig(lines_config)
def on_pick(event):
legline = event.artist
line_config = lined[legline]
dlc.change_lines_config(line_config, change_legline_fn)
if line_config.get_mpl_line2() == 'create_new':
line2 = line_config.copy_mpl_line1()
self.add_line(line2)
line_config.set_mpl_line2(line2)
line_config.set_mpl_line2_visible(True)
self.figure.canvas.draw_idle()
self.figure.canvas.mpl_connect('pick_event', on_pick)
class TimeChart(BasicChart):
'''This chart is connect to replay. It's realize through the
'x or y (time) axis'. It's important so that the axis of time corresponds
with the replay slider!'''
name = 'time_chart'
def __init__(self,
fig,
rec,
axisbg=None, # defaults to rc axes.facecolor
frameon=True,
sharex=None,
sharey=None,
label="",
xscale=None,
yscale=None,
**kwargs):
self.__init__(
self, fig, rec, axisbg, frameon, sharex, sharey, label,
xscale, yscale, kwargs)
# Connect the connection to replay slider. Event is connected through
# gtk connect not mpl_connect, because canvas extends gtk.DrawingArea.
fig.canvas.mpl_connect("button_press_event", self._double_click)
def set_time_axis(self, axis):
pass
def _double_click(self, event):
'''Connect to the replay window.'''
if event.button == 1 and event.guiEvent.type == gtk.gdk._2BUTTON_PRESS:
print 'double click'
# self.emit_event("change_slider", event.xdata)
class ChartWidget(gtk.VBox):
def __init__(self, figure, with_legend=True, xlock=False, ylock=False):
gtk.VBox.__init__(self)
self.figure = figure
ax = figure.gca()
# chart toolbar
toolbar = self._chart_toolbar(ax, with_legend)
self.pack_start(toolbar, False, False)
# It's necessary to set thouse lock arguments after creating a toolbar.
ax.set_xlock(xlock)
ax.set_ylock(ylock)
# set size of canvas
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.get_dpi()
self.figure.canvas.set_size_request(int(w * dpi), int(h * dpi))
sc = gtk.ScrolledWindow()
sc.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
sc.add_with_viewport(self.figure.canvas)
self.pack_start(sc, True, True, 0)
def get_figure(self):
return self.figure;
def _chart_toolbar(self, ax, with_legend):
toolbar = gtk.Toolbar()
toolbar.set_icon_size(gtk.ICON_SIZE_SMALL_TOOLBAR)
toolbar.set_tooltips(True)
btn_save = gtk.ToolButton()
btn_save.connect("clicked", self._btn_save_action)
btn_save.set_stock_id(gtk.STOCK_SAVE)
btn_save.set_tooltip_text("Save graph")
toolbar.add(btn_save)
toolbar.add(gtk.SeparatorToolItem())
btn_restore = gtk.ToolButton()
btn_restore.connect(
"clicked",
lambda w: self._btn_restore_view_action(self.figure.gca()))
btn_restore.set_stock_id(gtk.STOCK_ZOOM_100)
btn_restore.set_tooltip_text("Restore view")
toolbar.add(btn_restore)
toolbar.add(gtk.SeparatorToolItem())
icon_hide_legend = gtk.image_new_from_file(
os.path.join(paths.ICONS_DIR, "hide_legend.svg"))
btn_hide_legend = gtk.ToggleToolButton()
btn_hide_legend.set_icon_widget(icon_hide_legend)
btn_hide_legend.set_tooltip_text("Hide legend")
btn_hide_legend.connect("toggled", self._btn_hide_legend_action)
btn_hide_legend.set_sensitive(with_legend)
toolbar.add(btn_hide_legend)
toolbar.add(gtk.SeparatorToolItem())
icon_xlock = gtk.image_new_from_file(
os.path.join(paths.ICONS_DIR, "xlock.svg"))
btn_xlock = gtk.ToggleToolButton()
btn_xlock.set_icon_widget(icon_xlock)
btn_xlock.set_tooltip_text("Lock X-axis (keep CTRL)")
btn_xlock.connect("toggled", self._btn_xlock_action)
ax.set_callback(
"xlock_changed", lambda xlock: btn_xlock.set_active(xlock))
toolbar.add(btn_xlock)
icon_ylock = gtk.image_new_from_file(
os.path.join(paths.ICONS_DIR, "ylock.svg"))
btn_ylock = gtk.ToggleToolButton()
btn_ylock.set_icon_widget(icon_ylock)
btn_ylock.set_tooltip_text("Lock Y-axis (keep CTRL)")
btn_ylock.connect("toggled", self._btn_ylock_action)
ax.set_callback(
"ylock_changed", lambda ylock: btn_ylock.set_active(ylock))
toolbar.add(btn_ylock)
icon_moving = gtk.image_new_from_file(
os.path.join(paths.ICONS_DIR, "moving.svg"))
btn_moving = gtk.ToggleToolButton()
btn_moving.set_icon_widget(icon_moving)
btn_moving.set_tooltip_text("Catch canvas (press key 'm')")
btn_moving.connect("toggled", self._btn_moving_action)
ax.set_callback(
"moving_flag_changed",
lambda moving_flag: self._moving_flag_changed(
moving_flag, btn_moving, btn_xlock, btn_ylock))
toolbar.add(btn_moving)
return toolbar
def _moving_flag_changed(
self, moving_flag, btn_moving, btn_xlock, btn_ylock):
btn_moving.set_active(moving_flag)
btn_xlock.set_sensitive(not moving_flag)
btn_ylock.set_sensitive(not moving_flag)
def _btn_moving_action(self, widget):
ax = self.figure.gca()
moving_flag = widget.get_active()
ax.set_moving_flag(moving_flag)
def _btn_xlock_action(self, widget):
ax = self.figure.gca()
lock = widget.get_active()
ax.set_xlock(lock)
def _btn_ylock_action(self, widget):
ax = self.figure.gca()
lock = widget.get_active()
ax.set_ylock(lock)
def _btn_hide_legend_action(self, widget):
ax = self.figure.gca()
hide = widget.get_active()
ax.hide_legend(hide)
def _btn_restore_view_action(self, ax):
restore = False
if ax.original_view_dim is not None:
xmin, xmax, ymin, ymax = ax.original_view_dim
restore = True
elif len(ax.zoom_stack) > 0:
xmin, xmax, ymin, ymax = ax.zoom_stack[0]
restore = True
if restore:
ax.original_view_dim = None
ax.zoom_stack = []
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw_idle()
def _btn_save_action(self, widget):
# TODO: poradne navrhnout ukladaci okno!!
dialog = gtk.FileChooserDialog(
"Save graph", None, gtk.FILE_CHOOSER_ACTION_SAVE,
(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
dialog.set_default_response(gtk.RESPONSE_OK)
svg_filter = gtk.FileFilter()
svg_filter.set_name("Vector images")
svg_filter.add_mime_type("image/svg")
svg_filter.add_pattern("*.svg")
dialog.add_filter(svg_filter)
raster_filter = gtk.FileFilter()
raster_filter.set_name("Raster images")
raster_filter.add_mime_type("image/png")
raster_filter.add_mime_type("image/jpeg")
raster_filter.add_mime_type("image/gif")
raster_filter.add_pattern("*.png")
raster_filter.add_pattern("*.jpg")
raster_filter.add_pattern("*.gif")
dialog.add_filter(raster_filter)
response = dialog.run()
if response == gtk.RESPONSE_OK:
self.figure.savefig(dialog.get_filename())
dialog.destroy()
#*******************************************************************************
# Defined method for "standard" graphs:
def _empty_chart(title="", xlabel="", ylabel=""):
figure = mpl_Figure()
canvas = mpl_FigureCanvas(figure)
figure.set_canvas(canvas)
ax = figure.add_subplot(111, projection=BasicChart.name)
ax.text(0.5, 0.5, 'No measured data.', color='#aa0000', fontsize=36,
ha='center', va='center', alpha=1.0, transform=ax.transAxes)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ChartWidget(figure)
def _register_histogram_pick_legend(ax, legend, lines_config):
def change_legline_fn(legline, action='orig'):
if action == 'original':
legline.set_linewidth(1.0)
legline.set_alpha(1.0)
elif action == 'on':
legline.set_linewidth(6.0)
legline.set_alpha(1.0)
elif action == 'off':
legline.set_linewidth(6.0)
legline.set_alpha(0.3)
else:
raise Exception('Unexpected parameter \'{0}\''.format(action))
lined = dict()
for legline, line_config in zip(legend.get_lines(), lines_config):
legline.set_picker(5)
lined[legline] = line_config
line_config.set_mpl_legline(legline)
dlc = DrawLinesConfig(lines_config)
def on_pick(event):
legline = event.artist
line_config = lined[legline]
dlc.change_lines_config(line_config, change_legline_fn)
if line_config.get_mpl_line2() == 'create_new':
bar = ax.bar(line_config.get_x_values(), line_config.get_y_values(),
color=line_config.get_color(), alpha=0.6)
line_config.set_mpl_line2(bar)
line_config.set_mpl_line2_visible(True)
ax.figure.canvas.draw_idle()
ax.figure.canvas.mpl_connect('pick_event', on_pick)
def histogram(names, values, title="", xlabel="", ylabel=""):
if not names or not values:
return _empty_chart(title, xlabel, ylabel)
figure = mpl_Figure()
canvas = mpl_FigureCanvas(figure)
figure.set_canvas(canvas)
ax = figure.add_subplot(111, projection=BasicChart.name)
colors = [cm.hsv(float(i)/len(values)) for i in xrange(len(values))]
n, bins, patches = ax.hist(
values, 10, normed=0, histtype="bar", label=names, color=colors)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(-35)
label.set_horizontalalignment('left')
ax.plegend = ax.legend(loc="upper right", fancybox=True, shadow=True)
ax.xaxis.set_major_formatter(mpl_FuncFormatter(
lambda time, pos: utils.time_to_string(time)[:-7]))
ax.set_xlim(xmin=0)
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ChartWidget(figure, xlock=True)
def utilization_chart(names,
values,
title="",
xlabel="",
ylabel="",
idles=None):
if not names or not values:
return _empty_chart(title, xlabel, ylabel)
figure = mpl_Figure()
canvas = mpl_FigureCanvas(figure)
figure.set_canvas(canvas)
# TODO: Change it to TimeChart
ax = figure.add_subplot(111, projection=BasicChart.name)
ywidth = 2
yticks = []
if idles is not None:
for i, lidle in enumerate(idles):
y = ((i+1) * ywidth) + (i+1)
ax.broken_barh(
lidle, (y, ywidth),
edgecolor='face', facecolor='#EAA769')
for i, ldata in enumerate(values):
y = (ywidth+1) * (i+ 1)
yticks.append(y + ywidth/2)
ax.broken_barh(
ldata, (y, ywidth),
edgecolor='face', facecolor='green')
ax.set_yticks(yticks)
ax.set_yticklabels(names)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(-35)
label.set_horizontalalignment('left')
for i, label in enumerate(ax.yaxis.get_ticklabels()):
# add 3 white space on the begining of name
names[i] = " %s" % names[i]
label.set_horizontalalignment("left")
label.set_verticalalignment('center')
p = mpl_Rectangle((0, 0), 1, 1, edgecolor='green', fc='green', alpha=0.75)
if idles is not None:
idle_leg = mpl_Rectangle((0,0), 1, 1, edgecolor='#eaa769', fc='#eaa769', alpha=0.75)
ax.plegend = ax.legend(
[p,idle_leg], ["Running", "Idle"], loc="upper left", fancybox=True, shadow=True)
else:
ax.plegend = ax.legend(
[p], ["Running"], loc="upper left", fancybox=True, shadow=True)
ax.xaxis.grid(True, linestyle="-", which='major', color='black', alpha=0.7)
ax.xaxis.set_major_formatter(mpl_FuncFormatter(
lambda time, pos: utils.time_to_string(time)[:-7]))
ax.set_xlim(xmin=0)
ax.get_figure().tight_layout()
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# resize figure
w, h = figure.get_size_inches()
figure.set_size_inches(w, len(values) * 0.4)
return ChartWidget(figure, ylock=True)
def place_chart(names, values, title="", xlabel="", ylabel=""):
if not names or not values:
return _empty_chart(title, xlabel, ylabel)
figure = mpl_Figure()
canvas = mpl_FigureCanvas(figure)
figure.set_canvas(canvas)
ax = figure.add_subplot(111, projection=BasicChart.name)
# fill data
lines_config = []
for i, (xvalues, yvalues) in enumerate(values):
line, = ax.plot(
xvalues, yvalues, 'o-', drawstyle="steps-post", label=names[i])
lines_config.append(
LineConfig(line, xvalues, yvalues, line.get_color()))
for label in ax.xaxis.get_ticklabels():
label.set_rotation(-35)
label.set_horizontalalignment('left')
# set legend
ax.plegend = ax.legend(loc="upper left", fancybox=True, shadow=True)
ax.register_pick_legend(ax.plegend, lines_config)
ax.xaxis.set_major_formatter(mpl_FuncFormatter(
lambda time, pos: utils.time_to_string(time)[:-7]))
# set basic properties
ax.set_xlim(xmin = 0)
ax.get_figure().tight_layout()
ax.set_title(title)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return ChartWidget(figure)
def _register_new_types_charts():
mpl_register_projection(BasicChart)
mpl_register_projection(TimeChart)
_register_new_types_charts()
|
gpl-3.0
|
macks22/scikit-learn
|
examples/mixture/plot_gmm_pdf.py
|
284
|
1528
|
"""
=============================================
Density Estimation for a mixture of Gaussians
=============================================
Plot the density estimation of a mixture of two Gaussians. Data is
generated from two Gaussians with different centers and covariance
matrices.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from sklearn import mixture
n_samples = 300
# generate random sample, two components
np.random.seed(0)
# generate spherical data centered on (20, 20)
shifted_gaussian = np.random.randn(n_samples, 2) + np.array([20, 20])
# generate zero centered stretched Gaussian data
C = np.array([[0., -0.7], [3.5, .7]])
stretched_gaussian = np.dot(np.random.randn(n_samples, 2), C)
# concatenate the two datasets into the final training set
X_train = np.vstack([shifted_gaussian, stretched_gaussian])
# fit a Gaussian Mixture Model with two components
clf = mixture.GMM(n_components=2, covariance_type='full')
clf.fit(X_train)
# display predicted scores by the model as a contour plot
x = np.linspace(-20.0, 30.0)
y = np.linspace(-20.0, 40.0)
X, Y = np.meshgrid(x, y)
XX = np.array([X.ravel(), Y.ravel()]).T
Z = -clf.score_samples(XX)[0]
Z = Z.reshape(X.shape)
CS = plt.contour(X, Y, Z, norm=LogNorm(vmin=1.0, vmax=1000.0),
levels=np.logspace(0, 3, 10))
CB = plt.colorbar(CS, shrink=0.8, extend='both')
plt.scatter(X_train[:, 0], X_train[:, 1], .8)
plt.title('Negative log-likelihood predicted by a GMM')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
glennq/scikit-learn
|
sklearn/tests/test_dummy.py
|
186
|
17778
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from sklearn.base import clone
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.stats import _weighted_percentile
from sklearn.dummy import DummyClassifier, DummyRegressor
@ignore_warnings
def _check_predict_proba(clf, X, y):
proba = clf.predict_proba(X)
# We know that we can have division by zero
log_proba = clf.predict_log_proba(X)
y = np.atleast_1d(y)
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n_outputs = y.shape[1]
n_samples = len(X)
if n_outputs == 1:
proba = [proba]
log_proba = [log_proba]
for k in range(n_outputs):
assert_equal(proba[k].shape[0], n_samples)
assert_equal(proba[k].shape[1], len(np.unique(y[:, k])))
assert_array_equal(proba[k].sum(axis=1), np.ones(len(X)))
# We know that we can have division by zero
assert_array_equal(np.log(proba[k]), log_proba[k])
def _check_behavior_2d(clf):
# 1d case
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([1, 2, 1, 1])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
# 2d case
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_behavior_2d_for_constant(clf):
# 2d case only
X = np.array([[0], [0], [0], [0]]) # ignored
y = np.array([[1, 0, 5, 4, 3],
[2, 0, 1, 2, 5],
[1, 0, 4, 5, 2],
[1, 3, 3, 2, 0]])
est = clone(clf)
est.fit(X, y)
y_pred = est.predict(X)
assert_equal(y.shape, y_pred.shape)
def _check_equality_regressor(statistic, y_learn, y_pred_learn,
y_test, y_pred_test):
assert_array_equal(np.tile(statistic, (y_learn.shape[0], 1)),
y_pred_learn)
assert_array_equal(np.tile(statistic, (y_test.shape[0], 1)),
y_pred_test)
def test_most_frequent_and_prior_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [1, 2, 1, 1]
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
if strategy == "prior":
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)))
else:
assert_array_equal(clf.predict_proba([X[0]]),
clf.class_prior_.reshape((1, -1)) > 0.5)
def test_most_frequent_and_prior_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[1, 0],
[2, 0],
[1, 0],
[1, 3]])
n_samples = len(X)
for strategy in ("prior", "most_frequent"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_stratified_strategy():
X = [[0]] * 5 # ignored
y = [1, 2, 1, 1, 2]
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
def test_stratified_strategy_multioutput():
X = [[0]] * 5 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 1],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[2], 2. / 5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_uniform_strategy():
X = [[0]] * 4 # ignored
y = [1, 2, 1, 1]
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
p = np.bincount(y_pred) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
def test_uniform_strategy_multioutput():
X = [[0]] * 4 # ignored
y = np.array([[2, 1],
[2, 2],
[1, 2],
[1, 1]])
clf = DummyClassifier(strategy="uniform", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 0.5, decimal=1)
assert_almost_equal(p[2], 0.5, decimal=1)
_check_predict_proba(clf, X, y)
_check_behavior_2d(clf)
def test_string_labels():
X = [[0]] * 5
y = ["paris", "paris", "tokyo", "amsterdam", "berlin"]
clf = DummyClassifier(strategy="most_frequent")
clf.fit(X, y)
assert_array_equal(clf.predict(X), ["paris"] * 5)
def test_classifier_exceptions():
clf = DummyClassifier(strategy="unknown")
assert_raises(ValueError, clf.fit, [], [])
assert_raises(ValueError, clf.predict, [])
assert_raises(ValueError, clf.predict_proba, [])
def test_mean_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 4 # ignored
y = random_state.randn(4)
reg = DummyRegressor()
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.mean(y)] * len(X))
def test_mean_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
mean = np.mean(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor()
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(mean, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_regressor_exceptions():
reg = DummyRegressor()
assert_raises(ValueError, reg.predict, [])
def test_median_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="median")
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
def test_median_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="median")
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="quantile", quantile=0.5)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.median(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.min(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=1)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.max(y)] * len(X))
reg = DummyRegressor(strategy="quantile", quantile=0.3)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [np.percentile(y, q=30)] * len(X))
def test_quantile_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
median = np.median(y_learn, axis=0).reshape((1, -1))
quantile_values = np.percentile(y_learn, axis=0, q=80).reshape((1, -1))
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.5)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
median, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
# Correctness oracle
est = DummyRegressor(strategy="quantile", quantile=0.8)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
quantile_values, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d(est)
def test_quantile_invalid():
X = [[0]] * 5 # ignored
y = [0] * 5 # ignored
est = DummyRegressor(strategy="quantile")
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=None)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=[0])
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=-0.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile=1.1)
assert_raises(ValueError, est.fit, X, y)
est = DummyRegressor(strategy="quantile", quantile='abc')
assert_raises(TypeError, est.fit, X, y)
def test_quantile_strategy_empty_train():
est = DummyRegressor(strategy="quantile", quantile=0.4)
assert_raises(ValueError, est.fit, [], [])
def test_constant_strategy_regressor():
random_state = np.random.RandomState(seed=1)
X = [[0]] * 5 # ignored
y = random_state.randn(5)
reg = DummyRegressor(strategy="constant", constant=[43])
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
reg = DummyRegressor(strategy="constant", constant=43)
reg.fit(X, y)
assert_array_equal(reg.predict(X), [43] * len(X))
def test_constant_strategy_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X_learn = random_state.randn(10, 10)
y_learn = random_state.randn(10, 5)
# test with 2d array
constants = random_state.randn(5)
X_test = random_state.randn(20, 10)
y_test = random_state.randn(20, 5)
# Correctness oracle
est = DummyRegressor(strategy="constant", constant=constants)
est.fit(X_learn, y_learn)
y_pred_learn = est.predict(X_learn)
y_pred_test = est.predict(X_test)
_check_equality_regressor(
constants, y_learn, y_pred_learn, y_test, y_pred_test)
_check_behavior_2d_for_constant(est)
def test_y_mean_attribute_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
# when strategy = 'mean'
est = DummyRegressor(strategy='mean')
est.fit(X, y)
assert_equal(est.constant_, np.mean(y))
def test_unknown_strategey_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='gona')
assert_raises(ValueError, est.fit, X, y)
def test_constants_not_specified_regressor():
X = [[0]] * 5
y = [1, 2, 4, 6, 8]
est = DummyRegressor(strategy='constant')
assert_raises(TypeError, est.fit, X, y)
def test_constant_size_multioutput_regressor():
random_state = np.random.RandomState(seed=1)
X = random_state.randn(10, 10)
y = random_state.randn(10, 5)
est = DummyRegressor(strategy='constant', constant=[1, 2, 3, 4])
assert_raises(ValueError, est.fit, X, y)
def test_constant_strategy():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0, constant=1)
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.ones(len(X)))
_check_predict_proba(clf, X, y)
X = [[0], [0], [0], [0]] # ignored
y = ['two', 'one', 'two', 'two']
clf = DummyClassifier(strategy="constant", random_state=0, constant='one')
clf.fit(X, y)
assert_array_equal(clf.predict(X), np.array(['one'] * 4))
_check_predict_proba(clf, X, y)
def test_constant_strategy_multioutput():
X = [[0], [0], [0], [0]] # ignored
y = np.array([[2, 3],
[1, 3],
[2, 3],
[2, 0]])
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[1, 0])
clf.fit(X, y)
assert_array_equal(clf.predict(X),
np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
_check_predict_proba(clf, X, y)
def test_constant_strategy_exceptions():
X = [[0], [0], [0], [0]] # ignored
y = [2, 1, 2, 2]
clf = DummyClassifier(strategy="constant", random_state=0)
assert_raises(ValueError, clf.fit, X, y)
clf = DummyClassifier(strategy="constant", random_state=0,
constant=[2, 0])
assert_raises(ValueError, clf.fit, X, y)
def test_classification_sample_weight():
X = [[0], [0], [1]]
y = [0, 1, 0]
sample_weight = [0.1, 1., 0.1]
clf = DummyClassifier().fit(X, y, sample_weight)
assert_array_almost_equal(clf.class_prior_, [0.2 / 1.2, 1. / 1.2])
def test_constant_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[0, 1],
[4, 0],
[1, 1],
[1, 4],
[1, 1]]))
n_samples = len(X)
clf = DummyClassifier(strategy="constant", random_state=0, constant=[1, 0])
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), np.hstack([np.ones((n_samples, 1)),
np.zeros((n_samples, 1))]))
def test_uniform_strategy_sparse_target_warning():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[2, 1],
[2, 2],
[1, 4],
[4, 2],
[1, 1]]))
clf = DummyClassifier(strategy="uniform", random_state=0)
assert_warns_message(UserWarning,
"the uniform strategy would not save memory",
clf.fit, X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 1 / 3, decimal=1)
assert_almost_equal(p[2], 1 / 3, decimal=1)
assert_almost_equal(p[4], 1 / 3, decimal=1)
def test_stratified_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[4, 1],
[0, 0],
[1, 1],
[1, 4],
[1, 1]]))
clf = DummyClassifier(strategy="stratified", random_state=0)
clf.fit(X, y)
X = [[0]] * 500
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
y_pred = y_pred.toarray()
for k in range(y.shape[1]):
p = np.bincount(y_pred[:, k]) / float(len(X))
assert_almost_equal(p[1], 3. / 5, decimal=1)
assert_almost_equal(p[0], 1. / 5, decimal=1)
assert_almost_equal(p[4], 1. / 5, decimal=1)
def test_most_frequent_and_prior_strategy_sparse_target():
X = [[0]] * 5 # ignored
y = sp.csc_matrix(np.array([[1, 0],
[1, 3],
[4, 0],
[0, 1],
[1, 0]]))
n_samples = len(X)
y_expected = np.hstack([np.ones((n_samples, 1)), np.zeros((n_samples, 1))])
for strategy in ("most_frequent", "prior"):
clf = DummyClassifier(strategy=strategy, random_state=0)
clf.fit(X, y)
y_pred = clf.predict(X)
assert_true(sp.issparse(y_pred))
assert_array_equal(y_pred.toarray(), y_expected)
def test_dummy_regressor_sample_weight(n_samples=10):
random_state = np.random.RandomState(seed=1)
X = [[0]] * n_samples
y = random_state.rand(n_samples)
sample_weight = random_state.rand(n_samples)
est = DummyRegressor(strategy="mean").fit(X, y, sample_weight)
assert_equal(est.constant_, np.average(y, weights=sample_weight))
est = DummyRegressor(strategy="median").fit(X, y, sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 50.))
est = DummyRegressor(strategy="quantile", quantile=.95).fit(X, y,
sample_weight)
assert_equal(est.constant_, _weighted_percentile(y, sample_weight, 95.))
|
bsd-3-clause
|
lol/BCI-BO-old
|
calculate_accuracies.py
|
1
|
5471
|
import numpy as np
import sys
import os
import json
sys.path.append('./BCI_Framework')
import Configuration_BCI
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
if __name__ == '__main__':
##################################################input values##########################################################################
dataset = 'BCICIII3b'
classifier_name = 'LogisticRegression'
feature_extractor_name = 'BP'
#########################################################################################################################################
true_labels_folder = 'calc_results_labels/'
config = Configuration_BCI.Configuration_BCI('BCI_Framework', dataset, 'ALL')
opt_res_folder = os.path.join(config.configuration["results_opt_path_str"],classifier_name)
opt_res_folder = os.path.join(opt_res_folder, feature_extractor_name)
res_folder = os.path.join(config.configuration["results_path_str"],classifier_name)
res_folder = os.path.join(res_folder, feature_extractor_name)
opt_file_names = [ f for f in os.listdir(opt_res_folder) if os.path.isfile(os.path.join(opt_res_folder,f)) ]
# O3_Y_train = np.array([1,2,2,1,1,1,2,1,1,2,1,2,1,1,2,1,2,1,2,2,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,2,1,2,2,2,1,2,1,1,2,2,2,2,2,1,1,1,1,1,1,2,1,2,2,1,2,2,2,2,1,1,2,1,1,1,1,1,1,1,2,2,2,2,2,2,2,1,1,2,1,2,1,1,2,1,1,2,1,1,1,1,2,1,2,2,1,1,2,1,1,2,2,1,1,1,1,1,2,1,2,2,2,2,2,1,2,2,2,2,1,2,2,2,1,1,1,1,1,1,2,2,2,2,1,1,2,1,2,1,1,2,1,2,2,2,2,1,2,1,1,1,2,1,1,2,1,2,1,1,1,2,1,2,2,1,2,2,2,1,2,2,1,1,1,2,1,1,2,1,2,1,2,2,2,1,2,2,1,1,1,2,1,1,2,2,2,1,1,1,2,2,1,2,2,1,1,1,2,1,1,2,1,2,1,1,2,2,1,2,2,2,1,1,1,2,1,1,1,2,2,1,1,1])
# S4_Y_train = np.array([2,2,1,2,2,2,1,2,1,2,1,2,1,1,1,1,2,1,1,2,2,1,2,1,1,2,2,1,2,2,1,2,2,1,2,1,2,2,1,1,2,2,1,2,1,1,1,1,2,1,2,1,2,2,1,1,1,1,2,2,1,2,1,2,1,2,2,2,2,1,2,2,1,1,2,1,2,1,1,1,2,1,2,2,1,1,2,1,2,2,2,1,2,2,2,1,2,1,1,1,1,2,1,2,1,2,2,1,2,2,1,1,2,1,1,2,1,2,2,2,2,2,2,2,2,1,2,2,1,1,2,1,2,1,1,2,1,1,1,1,1,1,2,2,2,2,1,2,2,2,1,2,1,2,2,1,1,1,2,2,1,2,2,1,1,1,1,1,2,1,2,1,2,2,2,2,1,2,2,1,2,1,2,2,1,1,1,1,1,1,1,2,1,2,1,1,1,2,2,1,2,2,2,1,1,2,1,1,1,2,2,2,2,2,2,1,2,1,1,1,2,2,2,2,2,2,1,1,1,1,2,1,1,2,1,2,1,2,2,2,2,1,2,2,2,1,1,1,2,2,1,2,2,2,1,1,2,2,2,2,1,1,2,1,1,2,1,1,2,2,1,2,2,2,1,2,2,1,2,2,1,2,1,1,2,1,1,1,2,2,2,1,1,2,1,1,1,1,2,2,1,2,1,2,1,2,1,1,2,2,2,2,2,2,1,1,2,2,2,1,1,2,2,1,1,1,2,2,2,1,1,1,2,1,2,2,2,1,1,2,1,2,2,1,1,2,1,1,1,1,2,1,1,2,2,2,2,1,1,2,1,2,1,1,2,1,1,1,1,1,2,1,1,1,1,2,1,2,2,2,1,1,2,2,1,1,1,2,2,1,1,2,2,2,2,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,1,2,1,2,1,1,1,2,1,2,2,2,1,1,1,1,1,1,2,1,1,1,1,2,2,2,2,1,1,2,1,1,2,2,1,1,2,1,2,1,1,2,2,1,1,2,1,1,2,2,2,2,2,1,1,2,1,2,2,2,1,2,1,2,2,2,1,1,2,2,2,1,2,1,2,2,2,1,2,2,1,1,1,1,1,2,1,1,1,2,1,1,1,2,2,2,1,1,2,2,1,1,1,2,1,1,2,2,2,2,2,2,1,1,2,1,1,2,2,2,1,1,1,2,1,1,2,2,2,1])
# X11_Y_train = np.array([1,1,2,1,2,1,1,1,1,2,2,1,2,2,2,1,2,2,2,2,2,1,2,2,1,2,1,1,1,1,2,1,2,1,1,1,1,1,1,2,1,2,1,1,1,2,2,2,1,2,1,2,1,1,1,2,2,2,1,2,2,1,2,2,1,2,1,1,1,2,2,1,2,2,1,1,1,1,1,2,2,1,2,2,1,1,2,1,2,2,1,1,1,1,1,1,2,2,1,2,1,1,2,1,2,2,2,2,1,1,2,2,2,2,1,1,2,2,2,1,2,1,1,1,1,2,2,1,2,2,1,2,2,1,1,1,2,2,1,1,2,2,1,2,2,1,1,1,2,1,2,1,2,1,2,2,2,2,1,1,2,1,1,2,2,1,1,1,1,2,1,1,1,1,2,2,1,2,1,1,1,2,1,2,1,2,2,2,1,2,2,2,2,1,2,1,2,1,1,1,2,2,1,1,1,2,2,2,1,2,2,1,1,2,2,1,1,2,1,1,1,1,2,2,2,2,2,1,1,1,1,2,2,1,1,2,2,2,2,1,1,2,1,1,2,2,1,1,1,1,1,1,2,2,1,2,1,1,2,2,1,1,2,2,1,1,1,2,2,1,2,2,2,2,2,1,1,2,1,2,2,1,1,2,2,2,2,2,1,2,2,1,2,2,1,1,1,2,1,2,1,2,2,1,1,1,1,1,1,2,2,1,2,1,2,2,1,2,2,1,2,1,2,2,2,1,1,1,1,2,1,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,2,1,1,1,2,2,2,1,2,2,2,2,2,2,2,1,1,1,2,2,1,2,1,1,2,1,2,1,1,2,1,1,1,1,2,1,1,2,1,2,1,2,2,1,1,1,2,2,1,2,1,1,1,2,2,1,2,1,1,2,2,1,1,1,1,1,2,2,1,1,1,2,1,2,1,2,1,1,2,2,1,1,2,2,1,1,2,1,2,2,2,1,1,2,2,2,1,2,1,2,2,2,1,2,1,2,2,1,2,1,1,1,2,2,2,2,1,2,2,2,2,2,1,2,1,1,2,1,2,1,1,1,2,2,2,1,1,1,2,2,2,2,1,1,2,2,2,1,2,1,2,1,2,2,1,1,2,2,1,2,2,2,1,1,2,2,1,1,2,2,2,1,2,2,2,2,1,1,1,1,2,2])
true_dict = {}
all_subjects_test_probs_dict = dict()
for subject in config.configuration["subject_names_str"]:
true_labels = np.loadtxt(os.path.join(true_labels_folder, subject + '_Y_test.txt'))
true_dict[subject] = true_labels
all_subjects_test_probs_dict[subject] = np.zeros((len(true_dict[subject]),2))
for subject in config.configuration["subject_names_str"]:
X_train = []
X_test = []
y_train = []
cv_accs = []
opt_accs = []
for opt_file_name in opt_file_names:
if (subject + '.npz') in opt_file_name:
opt_file_path = os.path.join(opt_res_folder,opt_file_name)
file_path = os.path.join(res_folder,opt_file_name)
cv_file_path = file_path[0:-4]
with open(cv_file_path,'r') as file:
all_res = json.load(file)
cv_acc = 1.0 - all_res['error']
npzfile = np.load(opt_file_path)
probs_test = npzfile['probs_test']
all_subjects_test_probs_dict[subject] += cv_acc * probs_test
for subject in config.configuration["subject_names_str"]:
pred = np.argmax(all_subjects_test_probs_dict[subject], axis = 1)
print 'subject ' + subject + ' weighted avg result: ', 100.0*sum(pred == true_dict[subject])/float(len(true_dict[subject]))
|
gpl-3.0
|
yl565/statsmodels
|
statsmodels/graphics/gofplots.py
|
29
|
26714
|
from statsmodels.compat.python import lzip, string_types
import numpy as np
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
from statsmodels.tools.decorators import (resettable_cache,
cache_readonly,
cache_writable)
from . import utils
__all__ = ['qqplot', 'qqplot_2samples', 'qqline', 'ProbPlot']
class ProbPlot(object):
"""
Class for convenient construction of Q-Q, P-P, and probability plots.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under kwargs.)
Parameters
----------
data : array-like
1d data array
dist : A scipy.stats or statsmodels distribution
Compare x against dist. The default is
scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
loc : float
Location parameter for dist
a : float
Offset for the plotting position of an expected order
statistic, for example. The plotting positions are given
by (i - a)/(nobs - 2*a + 1) for i in range(0,nobs+1)
scale : float
Scale parameter for dist
fit : boolean
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
See Also
--------
scipy.stats.probplot
Notes
-----
1) Depends on matplotlib.
2) If `fit` is True then the parameters are fit using the
distribution's `fit()` method.
3) The call signatures for the `qqplot`, `ppplot`, and `probplot`
methods are similar, so examples 1 through 4 apply to all
three methods.
4) The three plotting methods are summarized below:
ppplot : Probability-Probability plot
Compares the sample and theoretical probabilities (percentiles).
qqplot : Quantile-Quantile plot
Compares the sample and theoretical quantiles
probplot : Probability plot
Same as a Q-Q plot, however probabilities are shown in the scale of
the theoretical distribution (x-axis) and the y-axis contains
unscaled quantiles of the sample data.
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> # example 1
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> model = sm.OLS(data.endog, data.exog)
>>> mod_fit = model.fit()
>>> res = mod_fit.resid # residuals
>>> probplot = sm.ProbPlot(res)
>>> probplot.qqplot()
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4
degrees of freedom:
>>> # example 2
>>> import scipy.stats as stats
>>> probplot = sm.ProbPlot(res, stats.t, distargs=(4,))
>>> fig = probplot.qqplot()
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> # example 3
>>> probplot = sm.ProbPlot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> fig = probplot.qqplot()
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> # example 4
>>> probplot = sm.ProbPlot(res, stats.t, fit=True)
>>> fig = probplot.qqplot(line='45')
>>> plt.show()
A second `ProbPlot` object can be used to compare two seperate sample
sets by using the `other` kwarg in the `qqplot` and `ppplot` methods.
>>> # example 5
>>> import numpy as np
>>> x = np.random.normal(loc=8.25, scale=2.75, size=37)
>>> y = np.random.normal(loc=8.75, scale=3.25, size=37)
>>> pp_x = sm.ProbPlot(x, fit=True)
>>> pp_y = sm.ProbPlot(y, fit=True)
>>> fig = pp_x.qqplot(line='45', other=pp_y)
>>> plt.show()
The following plot displays some options, follow the link to see the
code.
.. plot:: plots/graphics_gofplots_qqplot.py
"""
def __init__(self, data, dist=stats.norm, fit=False,
distargs=(), a=0, loc=0, scale=1):
self.data = data
self.a = a
self.nobs = data.shape[0]
self.distargs = distargs
self.fit = fit
if isinstance(dist, string_types):
dist = getattr(stats, dist)
self.fit_params = dist.fit(data)
if fit:
self.loc = self.fit_params[-2]
self.scale = self.fit_params[-1]
if len(self.fit_params) > 2:
self.dist = dist(*self.fit_params[:-2],
**dict(loc = 0, scale = 1))
else:
self.dist = dist(loc=0, scale=1)
elif distargs or loc == 0 or scale == 1:
self.dist = dist(*distargs, **dict(loc=loc, scale=scale))
self.loc = loc
self.scale = scale
else:
self.dist = dist
self.loc = loc
self.scale = scale
# propertes
self._cache = resettable_cache()
@cache_readonly
def theoretical_percentiles(self):
return plotting_pos(self.nobs, self.a)
@cache_readonly
def theoretical_quantiles(self):
try:
return self.dist.ppf(self.theoretical_percentiles)
except TypeError:
msg = '%s requires more parameters to ' \
'compute ppf'.format(self.dist.name,)
raise TypeError(msg)
except:
msg = 'failed to compute the ppf of {0}'.format(self.dist.name,)
raise
@cache_readonly
def sorted_data(self):
sorted_data = np.array(self.data, copy=True)
sorted_data.sort()
return sorted_data
@cache_readonly
def sample_quantiles(self):
if self.fit and self.loc != 0 and self.scale != 1:
return (self.sorted_data-self.loc)/self.scale
else:
return self.sorted_data
@cache_readonly
def sample_percentiles(self):
quantiles = \
(self.sorted_data - self.fit_params[-2])/self.fit_params[-1]
return self.dist.cdf(quantiles)
def ppplot(self, xlabel=None, ylabel=None, line=None, other=None,
ax=None, **plotkwargs):
"""
P-P plot of the percentiles (probabilities) of x versus the
probabilities (percetiles) of a distribution.
Parameters
----------
xlabel, ylabel : str or None, optional
User-provided lables for the x-axis and y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : str {'45', 's', 'r', q'} or None, optional
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : `ProbPlot` instance, array-like, or None, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. If an array-like object is provided, it will be turned
into a `ProbPlot` instance using default parameters. If not provided
(default), the theoretical quantiles are used.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs : additional matplotlib arguments to be passed to the
`plot` command.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
fig, ax = _do_plot(other.sample_percentiles,
self.sample_percentiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Probabilities of 2nd Sample'
if ylabel is None:
ylabel = 'Probabilities of 1st Sample'
else:
fig, ax = _do_plot(self.theoretical_percentiles,
self.sample_percentiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = "Theoretical Probabilities"
if ylabel is None:
ylabel = "Sample Probabilities"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.0])
return fig
def qqplot(self, xlabel=None, ylabel=None, line=None, other=None,
ax=None, **plotkwargs):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a
distribution or the quantiles of another `ProbPlot` instance.
Parameters
----------
xlabel, ylabel : str or None, optional
User-provided lables for the x-axis and y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : str {'45', 's', 'r', q'} or None, optional
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
other : `ProbPlot` instance, array-like, or None, optional
If provided, the sample quantiles of this `ProbPlot` instance are
plotted against the sample quantiles of the `other` `ProbPlot`
instance. If an array-like object is provided, it will be turned
into a `ProbPlot` instance using default parameters. If not
provided (default), the theoretical quantiles are used.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs : additional matplotlib arguments to be passed to the
`plot` command.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if other is not None:
check_other = isinstance(other, ProbPlot)
if not check_other:
other = ProbPlot(other)
fig, ax = _do_plot(other.sample_quantiles,
self.sample_quantiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Quantiles of 2nd Sample'
if ylabel is None:
ylabel = 'Quantiles of 1st Sample'
else:
fig, ax = _do_plot(self.theoretical_quantiles,
self.sample_quantiles,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = "Theoretical Quantiles"
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return fig
def probplot(self, xlabel=None, ylabel=None, line=None,
exceed=False, ax=None, **plotkwargs):
"""
Probability plot of the unscaled quantiles of x versus the
probabilities of a distibution (not to be confused with a P-P plot).
The x-axis is scaled linearly with the quantiles, but the probabilities
are used to label the axis.
Parameters
----------
xlabel, ylabel : str or None, optional
User-provided lables for the x-axis and y-axis. If None (default),
other values are used depending on the status of the kwarg `other`.
line : str {'45', 's', 'r', q'} or None, optional
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
exceed : boolean, optional
- If False (default) the raw sample quantiles are plotted against
the theoretical quantiles, show the probability that a sample
will not exceed a given value
- If True, the theoretical quantiles are flipped such that the
figure displays the probability that a sample will exceed a
given value.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure
being created.
**plotkwargs : additional matplotlib arguments to be passed to the
`plot` command.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
"""
if exceed:
fig, ax = _do_plot(self.theoretical_quantiles[::-1],
self.sorted_data,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Probability of Exceedance (%)'
else:
fig, ax = _do_plot(self.theoretical_quantiles,
self.sorted_data,
self.dist, ax=ax, line=line,
**plotkwargs)
if xlabel is None:
xlabel = 'Non-exceedance Probability (%)'
if ylabel is None:
ylabel = "Sample Quantiles"
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
_fmt_probplot_axis(ax, self.dist, self.nobs)
return fig
def qqplot(data, dist=stats.norm, distargs=(), a=0, loc=0, scale=1, fit=False,
line=None, ax=None):
"""
Q-Q plot of the quantiles of x versus the quantiles/ppf of a distribution.
Can take arguments specifying the parameters for dist or fit them
automatically. (See fit under Parameters.)
Parameters
----------
data : array-like
1d data array
dist : A scipy.stats or statsmodels distribution
Compare x against dist. The default
is scipy.stats.distributions.norm (a standard normal).
distargs : tuple
A tuple of arguments passed to dist to specify it fully
so dist.ppf may be called.
loc : float
Location parameter for dist
a : float
Offset for the plotting position of an expected order statistic, for
example. The plotting positions are given by (i - a)/(nobs - 2*a + 1)
for i in range(0,nobs+1)
scale : float
Scale parameter for dist
fit : boolean
If fit is false, loc, scale, and distargs are passed to the
distribution. If fit is True then the parameters for dist
are fit automatically using dist.fit. The quantiles are formed
from the standardized data, after subtracting the fitted loc
and dividing by the fitted scale.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Examples
--------
>>> import statsmodels.api as sm
>>> from matplotlib import pyplot as plt
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> mod_fit = sm.OLS(data.endog, data.exog).fit()
>>> res = mod_fit.resid # residuals
>>> fig = sm.qqplot(res)
>>> plt.show()
qqplot of the residuals against quantiles of t-distribution with 4 degrees
of freedom:
>>> import scipy.stats as stats
>>> fig = sm.qqplot(res, stats.t, distargs=(4,))
>>> plt.show()
qqplot against same as above, but with mean 3 and std 10:
>>> fig = sm.qqplot(res, stats.t, distargs=(4,), loc=3, scale=10)
>>> plt.show()
Automatically determine parameters for t distribution including the
loc and scale:
>>> fig = sm.qqplot(res, stats.t, fit=True, line='45')
>>> plt.show()
The following plot displays some options, follow the link to see the code.
.. plot:: plots/graphics_gofplots_qqplot.py
Notes
-----
Depends on matplotlib. If `fit` is True then the parameters are fit using
the distribution's fit() method.
"""
probplot = ProbPlot(data, dist=dist, distargs=distargs,
fit=fit, a=a, loc=loc, scale=scale)
fig = probplot.qqplot(ax=ax, line=line)
return fig
def qqplot_2samples(data1, data2, xlabel=None, ylabel=None, line=None, ax=None):
"""
Q-Q Plot of two samples' quantiles.
Can take either two `ProbPlot` instances or two array-like objects. In the
case of the latter, both inputs will be converted to `ProbPlot` instances
using only the default values - so use `ProbPlot` instances if
finer-grained control of the quantile computations is required.
Parameters
----------
data1, data2 : array-like (1d) or `ProbPlot` instances
xlabel, ylabel : str or None
User-provided labels for the x-axis and y-axis. If None (default),
other values are used.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled
by the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - by default no reference line is added to the plot.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
scipy.stats.probplot
Examples
--------
>>> x = np.random.normal(loc=8.5, scale=2.5, size=37)
>>> y = np.random.normal(loc=8.0, scale=3.0, size=37)
>>> pp_x = sm.ProbPlot(x)
>>> pp_y = sm.ProbPlot(y)
>>> qqplot_2samples(data1, data2, xlabel=None, ylabel=None, line=None, ax=None):
Notes
-----
1) Depends on matplotlib.
2) If `data1` and `data2` are not `ProbPlot` instances, instances will be
created using the default parameters. Therefore, it is recommended to use
`ProbPlot` instance if fine-grained control is needed in the computation
of the quantiles.
"""
check_data1 = isinstance(data1, ProbPlot)
check_data2 = isinstance(data2, ProbPlot)
if not check_data1 and not check_data2:
data1 = ProbPlot(data1)
data2 = ProbPlot(data2)
fig = data1.qqplot(xlabel=xlabel, ylabel=ylabel,
line=line, other=data2, ax=ax)
return fig
def qqline(ax, line, x=None, y=None, dist=None, fmt='r-'):
"""
Plot a reference line for a qqplot.
Parameters
----------
ax : matplotlib axes instance
The axes on which to plot the line
line : str {'45','r','s','q'}
Options for the reference line to which the data is compared.:
- '45' - 45-degree line
- 's' - standardized line, the expected order statistics are scaled by
the standard deviation of the given sample and have the mean
added to them
- 'r' - A regression line is fit
- 'q' - A line is fit through the quartiles.
- None - By default no reference line is added to the plot.
x : array
X data for plot. Not needed if line is '45'.
y : array
Y data for plot. Not needed if line is '45'.
dist : scipy.stats.distribution
A scipy.stats distribution, needed if line is 'q'.
Notes
-----
There is no return value. The line is plotted on the given `ax`.
"""
if line == '45':
end_pts = lzip(ax.get_xlim(), ax.get_ylim())
end_pts[0] = min(end_pts[0])
end_pts[1] = max(end_pts[1])
ax.plot(end_pts, end_pts, fmt)
ax.set_xlim(end_pts)
ax.set_ylim(end_pts)
return # does this have any side effects?
if x is None and y is None:
raise ValueError("If line is not 45, x and y cannot be None.")
elif line == 'r':
# could use ax.lines[0].get_xdata(), get_ydata(),
# but don't know axes are 'clean'
y = OLS(y, add_constant(x)).fit().fittedvalues
ax.plot(x,y,fmt)
elif line == 's':
m,b = y.std(), y.mean()
ref_line = x*m + b
ax.plot(x, ref_line, fmt)
elif line == 'q':
_check_for_ppf(dist)
q25 = stats.scoreatpercentile(y, 25)
q75 = stats.scoreatpercentile(y, 75)
theoretical_quartiles = dist.ppf([0.25, 0.75])
m = (q75 - q25) / np.diff(theoretical_quartiles)
b = q25 - m*theoretical_quartiles[0]
ax.plot(x, m*x + b, fmt)
#about 10x faster than plotting_position in sandbox and mstats
def plotting_pos(nobs, a):
"""
Generates sequence of plotting positions
Parameters
----------
nobs : int
Number of probability points to plot
a : float
Offset for the plotting position of an expected order statistic, for
example.
Returns
-------
plotting_positions : array
The plotting positions
Notes
-----
The plotting positions are given by (i - a)/(nobs - 2*a + 1) for i in
range(0,nobs+1)
See also
--------
scipy.stats.mstats.plotting_positions
"""
return (np.arange(1.,nobs+1) - a)/(nobs- 2*a + 1)
def _fmt_probplot_axis(ax, dist, nobs):
"""
Formats a theoretical quantile axis to display the corresponding
probabilities on the quantiles' scale.
Parameteters
------------
ax : Matplotlib AxesSubplot instance, optional
The axis to be formatted
nobs : scalar
Numbero of observations in the sample
dist : scipy.stats.distribution
A scipy.stats distribution sufficiently specified to impletment its
ppf() method.
Returns
-------
There is no return value. This operates on `ax` in place
"""
_check_for_ppf(dist)
if nobs < 50:
axis_probs = np.array([1,2,5,10,20,30,40,50,60,
70,80,90,95,98,99,])/100.0
elif nobs < 500:
axis_probs = np.array([0.1,0.2,0.5,1,2,5,10,20,30,40,50,60,70,
80,90,95,98,99,99.5,99.8,99.9])/100.0
else:
axis_probs = np.array([0.01,0.02,0.05,0.1,0.2,0.5,1,2,5,10,
20,30,40,50,60,70,80,90,95,98,99,99.5,
99.8,99.9,99.95,99.98,99.99])/100.0
axis_qntls = dist.ppf(axis_probs)
ax.set_xticks(axis_qntls)
ax.set_xticklabels(axis_probs*100, rotation=45,
rotation_mode='anchor',
horizontalalignment='right',
verticalalignment='center')
ax.set_xlim([axis_qntls.min(), axis_qntls.max()])
def _do_plot(x, y, dist=None, line=False, ax=None, fmt='bo', **kwargs):
"""
Boiler plate plotting function for the `ppplot`, `qqplot`, and
`probplot` methods of the `ProbPlot` class
Parameteters
------------
x, y : array-like
Data to be plotted
dist : scipy.stats.distribution
A scipy.stats distribution, needed if `line` is 'q'.
line : str {'45', 's', 'r', q'} or None
Options for the reference line to which the data is compared.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
fmt : str, optional
matplotlib-compatible formatting string for the data markers
kwargs : keywords
These are passed to matplotlib.plot
Returns
-------
fig : Matplotlib Figure instance
ax : Matplotlib AxesSubplot instance (see Parameters)
"""
fig, ax = utils.create_mpl_ax(ax)
ax.set_xmargin(0.02)
ax.plot(x, y, fmt, **kwargs)
if line:
if line not in ['r','q','45','s']:
msg = "%s option for line not understood" % line
raise ValueError(msg)
qqline(ax, line, x=x, y=y, dist=dist)
return fig, ax
def _check_for_ppf(dist):
if not hasattr(dist, 'ppf'):
raise ValueError("distribution must have a ppf method")
|
bsd-3-clause
|
glennq/scikit-learn
|
sklearn/tests/test_naive_bayes.py
|
72
|
19944
|
import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1., 2.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683,
0.174696337838317]]), 8)
assert_array_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_wrong_nb_priors():
""" Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([.25, .25, .25, .25]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2., 1.]))
assert_raises(ValueError, clf.fit, X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert_equal(clf.predict([[-0.1, -0.1]]), np.array([2]))
def test_check_update_with_no_data():
""" Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.
var = 1.
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean,
var, x_empty)
assert_equal(tmean, mean)
assert_equal(tvar, var)
def test_gnb_pfit_wrong_nb_features():
"""Test whether an error is raised when the number of feature changes
between two partial fit"""
clf = GaussianNB()
# Fit for the first time the GNB
clf.fit(X, y)
# Partial fit a second time with an incoherent X
assert_raises(ValueError, clf.partial_fit, np.hstack((X, X)), y)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
|
bsd-3-clause
|
kevin-intel/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
5
|
39582
|
from copy import deepcopy
import pickle
import tempfile
import shutil
import os
import numbers
from unittest.mock import Mock
from functools import partial
import numpy as np
import pytest
import joblib
from numpy.testing import assert_allclose
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import ignore_warnings
from sklearn.base import BaseEstimator
from sklearn.metrics import (
accuracy_score,
balanced_accuracy_score,
average_precision_score,
brier_score_loss,
f1_score,
fbeta_score,
jaccard_score,
log_loss,
precision_score,
r2_score,
recall_score,
roc_auc_score,
top_k_accuracy_score
)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics import check_scoring
from sklearn.metrics._scorer import (_PredictScorer, _passthrough_scorer,
_MultimetricScorer,
_check_multimetric_scoring)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.linear_model import Ridge, LogisticRegression, Perceptron
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification, make_regression
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['explained_variance', 'r2',
'neg_mean_absolute_error', 'neg_mean_squared_error',
'neg_mean_absolute_percentage_error',
'neg_mean_squared_log_error',
'neg_median_absolute_error',
'neg_root_mean_squared_error',
'mean_absolute_error',
'mean_absolute_percentage_error',
'mean_squared_error', 'median_absolute_error',
'max_error', 'neg_mean_poisson_deviance',
'neg_mean_gamma_deviance']
CLF_SCORERS = ['accuracy', 'balanced_accuracy', 'top_k_accuracy',
'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'neg_brier_score',
'jaccard', 'jaccard_weighted', 'jaccard_macro',
'jaccard_micro', 'roc_auc_ovr', 'roc_auc_ovo',
'roc_auc_ovr_weighted', 'roc_auc_ovo_weighted']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples',
'jaccard_samples']
REQUIRE_POSITIVE_Y_SCORERS = ['neg_mean_poisson_deviance',
'neg_mean_gamma_deviance']
def _require_positive_y(y):
"""Make targets strictly positive"""
offset = abs(y.min()) + 1
y = y + offset
return y
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DecisionTreeRegressor(random_state=0)
# some of the regressions scorers require strictly positive input.
sensible_regr.fit(X_train, _require_positive_y(y_train))
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit:
"""Dummy estimator to test scoring validators"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore:
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict:
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer:
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def check_scoring_validator_for_single_metric_usecases(scoring_validator):
# Test all branches of single metric usecases
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
with pytest.raises(TypeError, match=pattern):
scoring_validator(estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = scoring_validator(estimator)
assert scorer is _passthrough_scorer
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
with pytest.raises(TypeError, match=pattern):
scoring_validator(estimator)
scorer = scoring_validator(estimator, scoring="accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, scoring="accuracy")
assert isinstance(scorer, _PredictScorer)
# Test the allow_none parameter for check_scoring alone
if scoring_validator is check_scoring:
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, allow_none=True)
assert scorer is None
@pytest.mark.parametrize(
"scoring",
(
('accuracy', ), ['precision'],
{'acc': 'accuracy', 'precision': 'precision'},
('accuracy', 'precision'),
['precision', 'accuracy'],
{'accuracy': make_scorer(accuracy_score),
'precision': make_scorer(precision_score)}
), ids=["single_tuple", "single_list", "dict_str",
"multi_tuple", "multi_list", "dict_callable"])
def test_check_scoring_and_check_multimetric_scoring(scoring):
check_scoring_validator_for_single_metric_usecases(check_scoring)
# To make sure the check_scoring is correctly applied to the constituent
# scorers
estimator = LinearSVC(random_state=0)
estimator.fit([[1], [2], [3]], [1, 1, 0])
scorers = _check_multimetric_scoring(estimator, scoring)
assert isinstance(scorers, dict)
assert sorted(scorers.keys()) == sorted(list(scoring))
assert all([isinstance(scorer, _PredictScorer)
for scorer in list(scorers.values())])
if 'acc' in scoring:
assert_almost_equal(scorers['acc'](
estimator, [[1], [2], [3]], [1, 0, 0]), 2. / 3.)
if 'accuracy' in scoring:
assert_almost_equal(scorers['accuracy'](
estimator, [[1], [2], [3]], [1, 0, 0]), 2. / 3.)
if 'precision' in scoring:
assert_almost_equal(scorers['precision'](
estimator, [[1], [2], [3]], [1, 0, 0]), 0.5)
@pytest.mark.parametrize("scoring", [
((make_scorer(precision_score), make_scorer(accuracy_score)),
"One or more of the elements were callables"),
([5], "Non-string types were found"),
((make_scorer(precision_score), ),
"One of mor eof the elements were callables"),
((), "Empty list was given"),
(('f1', 'f1'), "Duplicate elements were found"),
({4: 'accuracy'}, "Non-string types were found in the keys"),
({}, "An empty dict was passed"),
], ids=[
"tuple of callables", "list of int",
"tuple of one callable", "empty tuple",
"non-unique str", "non-string key dict",
"empty dict"])
def test_check_scoring_and_check_multimetric_scoring_errors(scoring):
# Make sure it raises errors when scoring parameter is not valid.
# More weird corner cases are tested at test_validation.py
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
error_message_regexp = ".*must be unique strings.*"
with pytest.raises(ValueError, match=error_message_regexp):
_check_multimetric_scoring(estimator, scoring=scoring)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]}, cv=3)
scorer = check_scoring(grid, scoring="f1")
assert isinstance(scorer, _PredictScorer)
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, scoring="f1")
assert isinstance(scorer, _PredictScorer)
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer(), cv=3)
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
with pytest.raises(ValueError):
make_scorer(f, needs_threshold=True, needs_proba=True)
@pytest.mark.parametrize('scorer_name, metric', [
('f1', f1_score),
('f1_weighted', partial(f1_score, average='weighted')),
('f1_macro', partial(f1_score, average='macro')),
('f1_micro', partial(f1_score, average='micro')),
('precision', precision_score),
('precision_weighted', partial(precision_score, average='weighted')),
('precision_macro', partial(precision_score, average='macro')),
('precision_micro', partial(precision_score, average='micro')),
('recall', recall_score),
('recall_weighted', partial(recall_score, average='weighted')),
('recall_macro', partial(recall_score, average='macro')),
('recall_micro', partial(recall_score, average='micro')),
('jaccard', jaccard_score),
('jaccard_weighted', partial(jaccard_score, average='weighted')),
('jaccard_macro', partial(jaccard_score, average='macro')),
('jaccard_micro', partial(jaccard_score, average='micro')),
('top_k_accuracy', top_k_accuracy_score),
])
def test_classification_binary_scores(scorer_name, metric):
# check consistency between score and scorer for scores supporting
# binary classification.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
score = SCORERS[scorer_name](clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert_almost_equal(score, expected_score)
@pytest.mark.parametrize('scorer_name, metric', [
('accuracy', accuracy_score),
('balanced_accuracy', balanced_accuracy_score),
('f1_weighted', partial(f1_score, average='weighted')),
('f1_macro', partial(f1_score, average='macro')),
('f1_micro', partial(f1_score, average='micro')),
('precision_weighted', partial(precision_score, average='weighted')),
('precision_macro', partial(precision_score, average='macro')),
('precision_micro', partial(precision_score, average='micro')),
('recall_weighted', partial(recall_score, average='weighted')),
('recall_macro', partial(recall_score, average='macro')),
('recall_micro', partial(recall_score, average='micro')),
('jaccard_weighted', partial(jaccard_score, average='weighted')),
('jaccard_macro', partial(jaccard_score, average='macro')),
('jaccard_micro', partial(jaccard_score, average='micro')),
])
def test_classification_multiclass_scores(scorer_name, metric):
# check consistency between score and scorer for scores supporting
# multiclass classification.
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=30, random_state=0
)
# use `stratify` = y to ensure train and test sets capture all classes
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0, stratify=y
)
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X_train, y_train)
score = SCORERS[scorer_name](clf, X_test, y_test)
expected_score = metric(y_test, clf.predict(X_test))
assert score == pytest.approx(expected_score)
def test_custom_scorer_pickling():
# test that custom scorer can be pickled
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score2 = unpickled_scorer(clf, X_test, y_test)
assert score1 == pytest.approx(score2)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
with pytest.raises(ValueError, match="multiclass format is not supported"):
get_scorer('roc_auc')(clf, X_test, y_test)
# test error is raised with a single class present in model
# (predict_proba shape is not suitable for binary auc)
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X_train, np.zeros_like(y_train))
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer('roc_auc')(clf, X_test, y_test)
# for proba scorers
with pytest.raises(ValueError, match="need classifier with two classes"):
get_scorer('neg_log_loss')(clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack([p[:, -1] for p in y_proba]).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack([p for p in y_proba]).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
with pytest.raises(ValueError):
cross_val_score(clf, X, y, scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
with pytest.raises(ValueError):
grid_search.fit(X, y)
@ignore_warnings
def test_classification_scorer_sample_weight():
# Test that classification scorers support sample_weight or raise sensible
# errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in REGRESSION_SCORERS:
# skip the regression scores
continue
if name == 'top_k_accuracy':
# in the binary case k > 1 will always lead to a perfect score
scorer._kwargs = {'k': 1}
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert weighted != unweighted, (
f"scorer {name} behaves identically when called with "
f"sample weights: {weighted} vs {unweighted}")
assert_almost_equal(weighted, ignored,
err_msg=f"scorer {name} behaves differently "
f"when ignoring samples and setting "
f"sample_weight to 0: {weighted} vs {ignored}")
except TypeError as e:
assert "sample_weight" in str(e), (
f"scorer {name} raises unhelpful exception when called "
f"with sample weights: {str(e)}")
@ignore_warnings
def test_regression_scorer_sample_weight():
# Test that regression scorers support sample_weight or raise sensible
# errors
# Odd number of test samples req for neg_median_absolute_error
X, y = make_regression(n_samples=101, n_features=20, random_state=0)
y = _require_positive_y(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
sample_weight = np.ones_like(y_test)
# Odd number req for neg_median_absolute_error
sample_weight[:11] = 0
reg = DecisionTreeRegressor(random_state=0)
reg.fit(X_train, y_train)
for name, scorer in SCORERS.items():
if name not in REGRESSION_SCORERS:
# skip classification scorers
continue
try:
weighted = scorer(reg, X_test, y_test,
sample_weight=sample_weight)
ignored = scorer(reg, X_test[11:], y_test[11:])
unweighted = scorer(reg, X_test, y_test)
assert weighted != unweighted, (
f"scorer {name} behaves identically when called with "
f"sample weights: {weighted} vs {unweighted}")
assert_almost_equal(weighted, ignored,
err_msg=f"scorer {name} behaves differently "
f"when ignoring samples and setting "
f"sample_weight to 0: {weighted} vs {ignored}")
except TypeError as e:
assert "sample_weight" in str(e), (
f"scorer {name} raises unhelpful exception when called "
f"with sample weights: {str(e)}")
@pytest.mark.parametrize('name', SCORERS)
def test_scorer_memmap_input(name):
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
if name in REQUIRE_POSITIVE_Y_SCORERS:
y_mm_1 = _require_positive_y(y_mm)
y_ml_mm_1 = _require_positive_y(y_ml_mm)
else:
y_mm_1, y_ml_mm_1 = y_mm, y_ml_mm
# UndefinedMetricWarning for P / R scores
with ignore_warnings():
scorer, estimator = SCORERS[name], ESTIMATORS[name]
if name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm_1)
else:
score = scorer(estimator, X_mm, y_mm_1)
assert isinstance(score, numbers.Number), name
def test_scoring_is_not_metric():
with pytest.raises(ValueError, match='make_scorer'):
check_scoring(LogisticRegression(), scoring=f1_score)
with pytest.raises(ValueError, match='make_scorer'):
check_scoring(LogisticRegression(), scoring=roc_auc_score)
with pytest.raises(ValueError, match='make_scorer'):
check_scoring(Ridge(), scoring=r2_score)
with pytest.raises(ValueError, match='make_scorer'):
check_scoring(KMeans(), scoring=cluster_module.adjusted_rand_score)
with pytest.raises(ValueError, match='make_scorer'):
check_scoring(KMeans(), scoring=cluster_module.rand_score)
@pytest.mark.parametrize(
("scorers,expected_predict_count,"
"expected_predict_proba_count,expected_decision_func_count"),
[({'a1': 'accuracy', 'a2': 'accuracy',
'll1': 'neg_log_loss', 'll2': 'neg_log_loss',
'ra1': 'roc_auc', 'ra2': 'roc_auc'}, 1, 1, 1),
(['roc_auc', 'accuracy'], 1, 0, 1),
(['neg_log_loss', 'accuracy'], 1, 1, 0)])
def test_multimetric_scorer_calls_method_once(scorers, expected_predict_count,
expected_predict_proba_count,
expected_decision_func_count):
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
mock_est = Mock()
fit_func = Mock(return_value=mock_est)
predict_func = Mock(return_value=y)
pos_proba = np.random.rand(X.shape[0])
proba = np.c_[1 - pos_proba, pos_proba]
predict_proba_func = Mock(return_value=proba)
decision_function_func = Mock(return_value=pos_proba)
mock_est.fit = fit_func
mock_est.predict = predict_func
mock_est.predict_proba = predict_proba_func
mock_est.decision_function = decision_function_func
# add the classes that would be found during fit
mock_est.classes_ = np.array([0, 1])
scorer_dict = _check_multimetric_scoring(LogisticRegression(), scorers)
multi_scorer = _MultimetricScorer(**scorer_dict)
results = multi_scorer(mock_est, X, y)
assert set(scorers) == set(results) # compare dict keys
assert predict_func.call_count == expected_predict_count
assert predict_proba_func.call_count == expected_predict_proba_count
assert decision_function_func.call_count == expected_decision_func_count
def test_multimetric_scorer_calls_method_once_classifier_no_decision():
predict_proba_call_cnt = 0
class MockKNeighborsClassifier(KNeighborsClassifier):
def predict_proba(self, X):
nonlocal predict_proba_call_cnt
predict_proba_call_cnt += 1
return super().predict_proba(X)
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
# no decision function
clf = MockKNeighborsClassifier(n_neighbors=1)
clf.fit(X, y)
scorers = ['roc_auc', 'neg_log_loss']
scorer_dict = _check_multimetric_scoring(clf, scorers)
scorer = _MultimetricScorer(**scorer_dict)
scorer(clf, X, y)
assert predict_proba_call_cnt == 1
def test_multimetric_scorer_calls_method_once_regressor_threshold():
predict_called_cnt = 0
class MockDecisionTreeRegressor(DecisionTreeRegressor):
def predict(self, X):
nonlocal predict_called_cnt
predict_called_cnt += 1
return super().predict(X)
X, y = np.array([[1], [1], [0], [0], [0]]), np.array([0, 1, 1, 1, 0])
# no decision function
clf = MockDecisionTreeRegressor()
clf.fit(X, y)
scorers = {'neg_mse': 'neg_mean_squared_error', 'r2': 'roc_auc'}
scorer_dict = _check_multimetric_scoring(clf, scorers)
scorer = _MultimetricScorer(**scorer_dict)
scorer(clf, X, y)
assert predict_called_cnt == 1
def test_multimetric_scorer_sanity_check():
# scoring dictionary returned is the same as calling each scorer separately
scorers = {'a1': 'accuracy', 'a2': 'accuracy',
'll1': 'neg_log_loss', 'll2': 'neg_log_loss',
'ra1': 'roc_auc', 'ra2': 'roc_auc'}
X, y = make_classification(random_state=0)
clf = DecisionTreeClassifier()
clf.fit(X, y)
scorer_dict = _check_multimetric_scoring(clf, scorers)
multi_scorer = _MultimetricScorer(**scorer_dict)
result = multi_scorer(clf, X, y)
separate_scores = {
name: get_scorer(name)(clf, X, y)
for name in ['accuracy', 'neg_log_loss', 'roc_auc']}
for key, value in result.items():
score_name = scorers[key]
assert_allclose(value, separate_scores[score_name])
@pytest.mark.parametrize('scorer_name, metric', [
('roc_auc_ovr', partial(roc_auc_score, multi_class='ovr')),
('roc_auc_ovo', partial(roc_auc_score, multi_class='ovo')),
('roc_auc_ovr_weighted', partial(roc_auc_score, multi_class='ovr',
average='weighted')),
('roc_auc_ovo_weighted', partial(roc_auc_score, multi_class='ovo',
average='weighted'))])
def test_multiclass_roc_proba_scorer(scorer_name, metric):
scorer = get_scorer(scorer_name)
X, y = make_classification(n_classes=3, n_informative=3, n_samples=20,
random_state=0)
lr = LogisticRegression(multi_class="multinomial").fit(X, y)
y_proba = lr.predict_proba(X)
expected_score = metric(y, y_proba)
assert scorer(lr, X, y) == pytest.approx(expected_score)
def test_multiclass_roc_proba_scorer_label():
scorer = make_scorer(roc_auc_score, multi_class='ovo',
labels=[0, 1, 2], needs_proba=True)
X, y = make_classification(n_classes=3, n_informative=3, n_samples=20,
random_state=0)
lr = LogisticRegression(multi_class="multinomial").fit(X, y)
y_proba = lr.predict_proba(X)
y_binary = y == 0
expected_score = roc_auc_score(y_binary, y_proba,
multi_class='ovo',
labels=[0, 1, 2])
assert scorer(lr, X, y_binary) == pytest.approx(expected_score)
@pytest.mark.parametrize('scorer_name', [
'roc_auc_ovr', 'roc_auc_ovo',
'roc_auc_ovr_weighted', 'roc_auc_ovo_weighted'])
def test_multiclass_roc_no_proba_scorer_errors(scorer_name):
# Perceptron has no predict_proba
scorer = get_scorer(scorer_name)
X, y = make_classification(n_classes=3, n_informative=3, n_samples=20,
random_state=0)
lr = Perceptron().fit(X, y)
msg = "'Perceptron' object has no attribute 'predict_proba'"
with pytest.raises(AttributeError, match=msg):
scorer(lr, X, y)
@pytest.fixture
def string_labeled_classification_problem():
"""Train a classifier on binary problem with string target.
The classifier is trained on a binary classification problem where the
minority class of interest has a string label that is intentionally not the
greatest class label using the lexicographic order. In this case, "cancer"
is the positive label, and `classifier.classes_` is
`["cancer", "not cancer"]`.
In addition, the dataset is imbalanced to better identify problems when
using non-symmetric performance metrics such as f1-score, average precision
and so on.
Returns
-------
classifier : estimator object
Trained classifier on the binary problem.
X_test : ndarray of shape (n_samples, n_features)
Data to be used as testing set in tests.
y_test : ndarray of shape (n_samples,), dtype=object
Binary target where labels are strings.
y_pred : ndarray of shape (n_samples,), dtype=object
Prediction of `classifier` when predicting for `X_test`.
y_pred_proba : ndarray of shape (n_samples, 2), dtype=np.float64
Probabilities of `classifier` when predicting for `X_test`.
y_pred_decision : ndarray of shape (n_samples,), dtype=np.float64
Decision function values of `classifier` when predicting on `X_test`.
"""
from sklearn.datasets import load_breast_cancer
from sklearn.utils import shuffle
X, y = load_breast_cancer(return_X_y=True)
# create an highly imbalanced classification task
idx_positive = np.flatnonzero(y == 1)
idx_negative = np.flatnonzero(y == 0)
idx_selected = np.hstack([idx_negative, idx_positive[:25]])
X, y = X[idx_selected], y[idx_selected]
X, y = shuffle(X, y, random_state=42)
# only use 2 features to make the problem even harder
X = X[:, :2]
y = np.array(
["cancer" if c == 1 else "not cancer" for c in y], dtype=object
)
X_train, X_test, y_train, y_test = train_test_split(
X, y, stratify=y, random_state=0,
)
classifier = LogisticRegression().fit(X_train, y_train)
y_pred = classifier.predict(X_test)
y_pred_proba = classifier.predict_proba(X_test)
y_pred_decision = classifier.decision_function(X_test)
return classifier, X_test, y_test, y_pred, y_pred_proba, y_pred_decision
def test_average_precision_pos_label(string_labeled_classification_problem):
# check that _ThresholdScorer will lead to the right score when passing
# `pos_label`. Currently, only `average_precision_score` is defined to
# be such a scorer.
clf, X_test, y_test, _, y_pred_proba, y_pred_decision = \
string_labeled_classification_problem
pos_label = "cancer"
# we need to select the positive column or reverse the decision values
y_pred_proba = y_pred_proba[:, 0]
y_pred_decision = y_pred_decision * -1
assert clf.classes_[0] == pos_label
# check that when calling the scoring function, probability estimates and
# decision values lead to the same results
ap_proba = average_precision_score(
y_test, y_pred_proba, pos_label=pos_label
)
ap_decision_function = average_precision_score(
y_test, y_pred_decision, pos_label=pos_label
)
assert ap_proba == pytest.approx(ap_decision_function)
# create a scorer which would require to pass a `pos_label`
# check that it fails if `pos_label` is not provided
average_precision_scorer = make_scorer(
average_precision_score, needs_threshold=True,
)
err_msg = "pos_label=1 is not a valid label. It should be one of "
with pytest.raises(ValueError, match=err_msg):
average_precision_scorer(clf, X_test, y_test)
# otherwise, the scorer should give the same results than calling the
# scoring function
average_precision_scorer = make_scorer(
average_precision_score, needs_threshold=True, pos_label=pos_label
)
ap_scorer = average_precision_scorer(clf, X_test, y_test)
assert ap_scorer == pytest.approx(ap_proba)
# The above scorer call is using `clf.decision_function`. We will force
# it to use `clf.predict_proba`.
clf_without_predict_proba = deepcopy(clf)
def _predict_proba(self, X):
raise NotImplementedError
clf_without_predict_proba.predict_proba = partial(
_predict_proba, clf_without_predict_proba
)
# sanity check
with pytest.raises(NotImplementedError):
clf_without_predict_proba.predict_proba(X_test)
ap_scorer = average_precision_scorer(
clf_without_predict_proba, X_test, y_test
)
assert ap_scorer == pytest.approx(ap_proba)
def test_brier_score_loss_pos_label(string_labeled_classification_problem):
# check that _ProbaScorer leads to the right score when `pos_label` is
# provided. Currently only the `brier_score_loss` is defined to be such
# a scorer.
clf, X_test, y_test, _, y_pred_proba, _ = \
string_labeled_classification_problem
pos_label = "cancer"
assert clf.classes_[0] == pos_label
# brier score loss is symmetric
brier_pos_cancer = brier_score_loss(
y_test, y_pred_proba[:, 0], pos_label="cancer"
)
brier_pos_not_cancer = brier_score_loss(
y_test, y_pred_proba[:, 1], pos_label="not cancer"
)
assert brier_pos_cancer == pytest.approx(brier_pos_not_cancer)
brier_scorer = make_scorer(
brier_score_loss, needs_proba=True, pos_label=pos_label,
)
assert brier_scorer(clf, X_test, y_test) == pytest.approx(brier_pos_cancer)
@pytest.mark.parametrize(
"score_func", [f1_score, precision_score, recall_score, jaccard_score]
)
def test_non_symmetric_metric_pos_label(
score_func, string_labeled_classification_problem
):
# check that _PredictScorer leads to the right score when `pos_label` is
# provided. We check for all possible metric supported.
# Note: At some point we may end up having "scorer tags".
clf, X_test, y_test, y_pred, _, _ = string_labeled_classification_problem
pos_label = "cancer"
assert clf.classes_[0] == pos_label
score_pos_cancer = score_func(y_test, y_pred, pos_label="cancer")
score_pos_not_cancer = score_func(y_test, y_pred, pos_label="not cancer")
assert score_pos_cancer != pytest.approx(score_pos_not_cancer)
scorer = make_scorer(score_func, pos_label=pos_label)
assert scorer(clf, X_test, y_test) == pytest.approx(score_pos_cancer)
@pytest.mark.parametrize(
"scorer",
[
make_scorer(
average_precision_score, needs_threshold=True, pos_label="xxx"
),
make_scorer(brier_score_loss, needs_proba=True, pos_label="xxx"),
make_scorer(f1_score, pos_label="xxx")
],
ids=["ThresholdScorer", "ProbaScorer", "PredictScorer"],
)
def test_scorer_select_proba_error(scorer):
# check that we raise the the proper error when passing an unknown
# pos_label
X, y = make_classification(
n_classes=2, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
assert scorer._kwargs["pos_label"] not in np.unique(y).tolist()
err_msg = "is not a valid label"
with pytest.raises(ValueError, match=err_msg):
scorer(lr, X, y)
def test_scorer_no_op_multiclass_select_proba():
# check that calling a ProbaScorer on a multiclass problem do not raise
# even if `y_true` would be binary during the scoring.
# `_select_proba_binary` should not be called in this case.
X, y = make_classification(
n_classes=3, n_informative=3, n_samples=20, random_state=0
)
lr = LogisticRegression().fit(X, y)
mask_last_class = y == lr.classes_[-1]
X_test, y_test = X[~mask_last_class], y[~mask_last_class]
assert_array_equal(np.unique(y_test), lr.classes_[:-1])
scorer = make_scorer(
roc_auc_score, needs_proba=True, multi_class="ovo", labels=lr.classes_,
)
scorer(lr, X_test, y_test)
|
bsd-3-clause
|
wangmiao1981/spark
|
python/pyspark/pandas/tests/test_dataframe_spark_io.py
|
14
|
19999
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import unittest
import glob
import os
import numpy as np
import pandas as pd
import pyarrow as pa
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class DataFrameSparkIOTest(PandasOnSparkTestCase, TestUtils):
"""Test cases for big data I/O using Spark."""
@property
def test_column_order(self):
return ["i32", "i64", "f", "bhello"]
@property
def test_pdf(self):
pdf = pd.DataFrame(
{
"i32": np.arange(20, dtype=np.int32) % 3,
"i64": np.arange(20, dtype=np.int64) % 5,
"f": np.arange(20, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=20).astype("O"),
},
columns=self.test_column_order,
index=np.random.rand(20),
)
return pdf
def test_parquet_read(self):
with self.temp_dir() as tmp:
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.parquet(tmp, mode="overwrite")
def check(columns, expected):
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp, columns=columns)
actual = ps.read_parquet(tmp, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
check(None, data)
check(["i32", "i64"], data[["i32", "i64"]])
check(["i64", "i32"], data[["i64", "i32"]])
if LooseVersion(pa.__version__) < LooseVersion("1.0.0"):
# TODO: `pd.read_parquet()` changed the behavior due to PyArrow 1.0.0.
# We might want to adjust the behavior. Let's see how pandas handles it.
check(("i32", "i64"), data[["i32", "i64"]])
check(["a", "b", "i32", "i64"], data[["i32", "i64"]])
check([], pd.DataFrame([]))
check(["a"], pd.DataFrame([]))
check("i32", pd.DataFrame([]))
check("float", data[["f"]])
# check with pyspark patch.
if LooseVersion("0.21.1") <= LooseVersion(pd.__version__):
expected = pd.read_parquet(tmp)
else:
expected = data
actual = ps.read_parquet(tmp)
self.assertPandasEqual(expected, actual.to_pandas())
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_parquet(tmp, index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_parquet_read_with_pandas_metadata(self):
with self.temp_dir() as tmp:
expected1 = self.test_pdf
path1 = "{}/file1.parquet".format(tmp)
expected1.to_parquet(path1)
self.assert_eq(ps.read_parquet(path1, pandas_metadata=True), expected1)
expected2 = expected1.reset_index()
path2 = "{}/file2.parquet".format(tmp)
expected2.to_parquet(path2)
self.assert_eq(ps.read_parquet(path2, pandas_metadata=True), expected2)
expected3 = expected2.set_index("index", append=True)
path3 = "{}/file3.parquet".format(tmp)
expected3.to_parquet(path3)
self.assert_eq(ps.read_parquet(path3, pandas_metadata=True), expected3)
def test_parquet_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_parquet(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_parquet(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_parquet(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
def test_table(self):
with self.table("test_table"):
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.spark.to_table("test_table", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_table("test_table", mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_table("test_table")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
expected_idx = expected.set_index("bhello")[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col="bhello")[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["bhello"])[["f", "i32", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["bhello"])[["f", "i32", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
expected_idx = expected.set_index(["i32", "bhello"])[["f", "i64"]]
actual_idx = ps.read_table("test_table", index_col=["i32", "bhello"])[["f", "i64"]]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
def test_spark_io(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_spark_io(tmp, format="json", mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_spark_io(
tmp, format="json", mode="overwrite", partition_cols=["i32", "bhello"]
)
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_spark_io(path=tmp, format="json")
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# When index columns are known
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
col_order = ["f", "i32", "i64"]
expected_idx = expected.set_index("bhello")[col_order]
actual_idx = ps.read_spark_io(tmp, format="json", index_col="bhello")[col_order]
self.assert_eq(
actual_idx.sort_values(by="f").to_spark().toPandas(),
expected_idx.sort_values(by="f").to_spark().toPandas(),
)
@unittest.skip("openpyxl")
def test_read_excel(self):
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path1)
self.assert_eq(ps.read_excel(open(path1, "rb")), pd.read_excel(open(path1, "rb")))
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0),
pd.read_excel(open(path1, "rb"), index_col=0),
)
self.assert_eq(
ps.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
pd.read_excel(open(path1, "rb"), index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(path1), pd.read_excel(path1))
self.assert_eq(ps.read_excel(path1, index_col=0), pd.read_excel(path1, index_col=0))
self.assert_eq(
ps.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path1, index_col=0, squeeze=True),
)
self.assert_eq(ps.read_excel(tmp), pd.read_excel(path1))
path2 = "{}/file2.xlsx".format(tmp)
self.test_pdf[["i32"]].to_excel(path2)
self.assert_eq(
ps.read_excel(tmp, index_col=0).sort_index(),
pd.concat(
[pd.read_excel(path1, index_col=0), pd.read_excel(path2, index_col=0)]
).sort_index(),
)
self.assert_eq(
ps.read_excel(tmp, index_col=0, squeeze=True).sort_index(),
pd.concat(
[
pd.read_excel(path1, index_col=0, squeeze=True),
pd.read_excel(path2, index_col=0, squeeze=True),
]
).sort_index(),
)
with self.temp_dir() as tmp:
path1 = "{}/file1.xlsx".format(tmp)
with pd.ExcelWriter(path1) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
sheet_names = [["Sheet_name_1", "Sheet_name_2"], None]
pdfs1 = pd.read_excel(open(path1, "rb"), sheet_name=None, index_col=0)
pdfs1_squeezed = pd.read_excel(
open(path1, "rb"), sheet_name=None, index_col=0, squeeze=True
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(open(path1, "rb"), sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(
open(path1, "rb"), sheet_name=sheet_name, index_col=0, squeeze=True
)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
self.assert_eq(
ps.read_excel(tmp, index_col=0, sheet_name="Sheet_name_2"),
pdfs1["Sheet_name_2"],
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1["Sheet_name_2"])
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(psdfs["Sheet_name_1"], pdfs1_squeezed["Sheet_name_1"])
self.assert_eq(psdfs["Sheet_name_2"], pdfs1_squeezed["Sheet_name_2"])
path2 = "{}/file2.xlsx".format(tmp)
with pd.ExcelWriter(path2) as writer:
self.test_pdf.to_excel(writer, sheet_name="Sheet_name_1")
self.test_pdf[["i32"]].to_excel(writer, sheet_name="Sheet_name_2")
pdfs2 = pd.read_excel(path2, sheet_name=None, index_col=0)
pdfs2_squeezed = pd.read_excel(path2, sheet_name=None, index_col=0, squeeze=True)
self.assert_eq(
ps.read_excel(tmp, sheet_name="Sheet_name_2", index_col=0).sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
self.assert_eq(
ps.read_excel(
tmp, sheet_name="Sheet_name_2", index_col=0, squeeze=True
).sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
for sheet_name in sheet_names:
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat([pdfs1["Sheet_name_1"], pdfs2["Sheet_name_1"]]).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat([pdfs1["Sheet_name_2"], pdfs2["Sheet_name_2"]]).sort_index(),
)
psdfs = ps.read_excel(tmp, sheet_name=sheet_name, index_col=0, squeeze=True)
self.assert_eq(
psdfs["Sheet_name_1"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_1"], pdfs2_squeezed["Sheet_name_1"]]
).sort_index(),
)
self.assert_eq(
psdfs["Sheet_name_2"].sort_index(),
pd.concat(
[pdfs1_squeezed["Sheet_name_2"], pdfs2_squeezed["Sheet_name_2"]]
).sort_index(),
)
def test_read_orc(self):
with self.temp_dir() as tmp:
path = "{}/file1.orc".format(tmp)
data = self.test_pdf
self.spark.createDataFrame(data, "i32 int, i64 long, f double, bhello string").coalesce(
1
).write.orc(path, mode="overwrite")
# `spark.write.orc` create a directory contains distributed orc files.
# But pandas only can read from file, not directory. Therefore, we need orc file path.
orc_file_path = glob.glob(os.path.join(path, "*.orc"))[0]
expected = data.reset_index()[data.columns]
actual = ps.read_orc(path)
self.assertPandasEqual(expected, actual.to_pandas())
# columns
columns = ["i32", "i64"]
expected = data.reset_index()[columns]
actual = ps.read_orc(path, columns=columns)
self.assertPandasEqual(expected, actual.to_pandas())
# index_col
expected = data.set_index("i32")
actual = ps.read_orc(path, index_col="i32")
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])
actual = ps.read_orc(path, index_col=["i32", "f"])
self.assert_eq(actual, expected)
# index_col with columns
expected = data.set_index("i32")[["i64", "bhello"]]
actual = ps.read_orc(path, index_col=["i32"], columns=["i64", "bhello"])
self.assert_eq(actual, expected)
expected = data.set_index(["i32", "f"])[["bhello", "i64"]]
actual = ps.read_orc(path, index_col=["i32", "f"], columns=["bhello", "i64"])
self.assert_eq(actual, expected)
msg = "Unknown column name 'i'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns="i32")
msg = "Unknown column name 'i34'"
with self.assertRaises(ValueError, msg=msg):
ps.read_orc(path, columns=["i34", "i64"])
def test_orc_write(self):
with self.temp_dir() as tmp:
pdf = self.test_pdf
expected = ps.DataFrame(pdf)
# Write out partitioned by one column
expected.to_orc(tmp, mode="overwrite", partition_cols="i32")
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
# Write out partitioned by two columns
expected.to_orc(tmp, mode="overwrite", partition_cols=["i32", "bhello"])
# Reset column order, as once the data is written out, Spark rearranges partition
# columns to appear first.
actual = ps.read_orc(tmp)
self.assertFalse((actual.columns == self.test_column_order).all())
actual = actual[self.test_column_order]
self.assert_eq(
actual.sort_values(by="f").to_spark().toPandas(),
expected.sort_values(by="f").to_spark().toPandas(),
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_dataframe_spark_io import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
apache-2.0
|
RobertABT/heightmap
|
build/matplotlib/lib/mpl_toolkits/mplot3d/axis3d.py
|
6
|
16960
|
#!/usr/bin/python
# axis3d.py, original mplot3d version by John Porter
# Created: 23 Sep 2005
# Parts rewritten by Reinier Heeres <[email protected]>
import math
import copy
from matplotlib import lines as mlines, axis as maxis, \
patches as mpatches
import art3d
import proj3d
import numpy as np
def get_flip_min_max(coord, index, mins, maxs):
if coord[index] == mins[index]:
return maxs[index]
else:
return mins[index]
def move_from_center(coord, centers, deltas, axmask=(True, True, True)):
'''Return a coordinate that is moved by "deltas" away from the center.'''
coord = copy.copy(coord)
#print coord, centers, deltas, axmask
for i in range(3):
if not axmask[i]:
continue
if coord[i] < centers[i]:
coord[i] -= deltas[i]
else:
coord[i] += deltas[i]
return coord
def tick_update_position(tick, tickxs, tickys, labelpos):
'''Update tick line and label position and style.'''
for (label, on) in ((tick.label1, tick.label1On), \
(tick.label2, tick.label2On)):
if on:
label.set_position(labelpos)
tick.tick1On, tick.tick2On = True, False
tick.tick1line.set_linestyle('-')
tick.tick1line.set_marker('')
tick.tick1line.set_data(tickxs, tickys)
tick.gridline.set_data(0, 0)
class Axis(maxis.XAxis):
# These points from the unit cube make up the x, y and z-planes
_PLANES = (
(0, 3, 7, 4), (1, 2, 6, 5), # yz planes
(0, 1, 5, 4), (3, 2, 6, 7), # xz planes
(0, 1, 2, 3), (4, 5, 6, 7), # xy planes
)
# Some properties for the axes
_AXINFO = {
'x': {'i': 0, 'tickdir': 1, 'juggled': (1, 0, 2),
'color': (0.95, 0.95, 0.95, 0.5)},
'y': {'i': 1, 'tickdir': 0, 'juggled': (0, 1, 2),
'color': (0.90, 0.90, 0.90, 0.5)},
'z': {'i': 2, 'tickdir': 0, 'juggled': (0, 2, 1),
'color': (0.925, 0.925, 0.925, 0.5)},
}
def __init__(self, adir, v_intervalx, d_intervalx, axes, *args, **kwargs):
# adir identifies which axes this is
self.adir = adir
# data and viewing intervals for this direction
self.d_interval = d_intervalx
self.v_interval = v_intervalx
# This is a temporary member variable.
# Do not depend on this existing in future releases!
self._axinfo = self._AXINFO[adir].copy()
self._axinfo.update({'label' : {'space_factor': 1.6,
'va': 'center',
'ha': 'center'},
'tick' : {'inward_factor': 0.2,
'outward_factor': 0.1},
'ticklabel': {'space_factor': 0.7},
'axisline': {'linewidth': 0.75,
'color': (0, 0, 0, 1)},
'grid' : {'color': (0.9, 0.9, 0.9, 1),
'linewidth': 1.0},
})
maxis.XAxis.__init__(self, axes, *args, **kwargs)
self.set_rotate_label(kwargs.get('rotate_label', None))
def init3d(self):
self.line = mlines.Line2D(xdata=(0, 0), ydata=(0, 0),
linewidth=self._axinfo['axisline']['linewidth'],
color=self._axinfo['axisline']['color'],
antialiased=True,
)
# Store dummy data in Polygon object
self.pane = mpatches.Polygon(np.array([[0,0], [0,1], [1,0], [0,0]]),
closed=False,
alpha=0.8,
facecolor=(1,1,1,0),
edgecolor=(1,1,1,0))
self.set_pane_color(self._axinfo['color'])
self.axes._set_artist_props(self.line)
self.axes._set_artist_props(self.pane)
self.gridlines = art3d.Line3DCollection([], )
self.axes._set_artist_props(self.gridlines)
self.axes._set_artist_props(self.label)
self.axes._set_artist_props(self.offsetText)
# Need to be able to place the label at the correct location
self.label._transform = self.axes.transData
self.offsetText._transform = self.axes.transData
def get_tick_positions(self):
majorLocs = self.major.locator()
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i) for i, val in enumerate(majorLocs)]
return majorLabels, majorLocs
def get_major_ticks(self, numticks=None):
ticks = maxis.XAxis.get_major_ticks(self, numticks)
for t in ticks:
t.tick1line.set_transform(self.axes.transData)
t.tick2line.set_transform(self.axes.transData)
t.gridline.set_transform(self.axes.transData)
t.label1.set_transform(self.axes.transData)
t.label2.set_transform(self.axes.transData)
return ticks
def set_pane_pos(self, xys):
xys = np.asarray(xys)
xys = xys[:,:2]
self.pane.xy = xys
def set_pane_color(self, color):
'''Set pane color to a RGBA tuple'''
self._axinfo['color'] = color
self.pane.set_edgecolor(color)
self.pane.set_facecolor(color)
self.pane.set_alpha(color[-1])
def set_rotate_label(self, val):
'''
Whether to rotate the axis label: True, False or None.
If set to None the label will be rotated if longer than 4 chars.
'''
self._rotate_label = val
def get_rotate_label(self, text):
if self._rotate_label is not None:
return self._rotate_label
else:
return len(text) > 4
def _get_coord_info(self, renderer):
minx, maxx, miny, maxy, minz, maxz = self.axes.get_w_lims()
if minx > maxx:
minx, maxx = maxx, minx
if miny > maxy:
miny, maxy = maxy, miny
if minz > maxz:
minz, maxz = maxz, minz
mins = np.array((minx, miny, minz))
maxs = np.array((maxx, maxy, maxz))
centers = (maxs + mins) / 2.
deltas = (maxs - mins) / 12.
mins = mins - deltas / 4.
maxs = maxs + deltas / 4.
vals = mins[0], maxs[0], mins[1], maxs[1], mins[2], maxs[2]
tc = self.axes.tunit_cube(vals, renderer.M)
avgz = [tc[p1][2] + tc[p2][2] + tc[p3][2] + tc[p4][2] for \
p1, p2, p3, p4 in self._PLANES]
highs = np.array([avgz[2*i] < avgz[2*i+1] for i in range(3)])
return mins, maxs, centers, deltas, tc, highs
def draw_pane(self, renderer):
renderer.open_group('pane3d')
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
info = self._axinfo
index = info['i']
if not highs[index]:
plane = self._PLANES[2 * index]
else:
plane = self._PLANES[2 * index + 1]
xys = [tc[p] for p in plane]
self.set_pane_pos(xys)
self.pane.draw(renderer)
renderer.close_group('pane3d')
def draw(self, renderer):
self.label._transform = self.axes.transData
renderer.open_group('axis3d')
# code from XAxis
majorTicks = self.get_major_ticks()
majorLocs = self.major.locator()
info = self._axinfo
index = info['i']
# filter locations here so that no extra grid lines are drawn
locmin, locmax = self.get_view_interval()
if locmin > locmax:
locmin, locmax = locmax, locmin
# Rudimentary clipping
majorLocs = [loc for loc in majorLocs if
locmin <= loc <= locmax]
self.major.formatter.set_locs(majorLocs)
majorLabels = [self.major.formatter(val, i)
for i, val in enumerate(majorLocs)]
mins, maxs, centers, deltas, tc, highs = self._get_coord_info(renderer)
# Determine grid lines
minmax = np.where(highs, maxs, mins)
# Draw main axis line
juggled = info['juggled']
edgep1 = minmax.copy()
edgep1[juggled[0]] = get_flip_min_max(edgep1, juggled[0], mins, maxs)
edgep2 = edgep1.copy()
edgep2[juggled[1]] = get_flip_min_max(edgep2, juggled[1], mins, maxs)
pep = proj3d.proj_trans_points([edgep1, edgep2], renderer.M)
centpt = proj3d.proj_transform(centers[0], centers[1], centers[2], renderer.M)
self.line.set_data((pep[0][0], pep[0][1]), (pep[1][0], pep[1][1]))
self.line.draw(renderer)
# Grid points where the planes meet
xyz0 = []
for val in majorLocs:
coord = minmax.copy()
coord[index] = val
xyz0.append(coord)
# Draw labels
peparray = np.asanyarray(pep)
# The transAxes transform is used because the Text object
# rotates the text relative to the display coordinate system.
# Therefore, if we want the labels to remain parallel to the
# axis regardless of the aspect ratio, we need to convert the
# edge points of the plane to display coordinates and calculate
# an angle from that.
# TODO: Maybe Text objects should handle this themselves?
dx, dy = (self.axes.transAxes.transform(peparray[0:2, 1]) -
self.axes.transAxes.transform(peparray[0:2, 0]))
lxyz = 0.5*(edgep1 + edgep2)
labeldeltas = info['label']['space_factor'] * deltas
axmask = [True, True, True]
axmask[index] = False
lxyz = move_from_center(lxyz, centers, labeldeltas, axmask)
tlx, tly, tlz = proj3d.proj_transform(lxyz[0], lxyz[1], lxyz[2], \
renderer.M)
self.label.set_position((tlx, tly))
if self.get_rotate_label(self.label.get_text()):
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.label.set_rotation(angle)
self.label.set_va(info['label']['va'])
self.label.set_ha(info['label']['ha'])
self.label.draw(renderer)
# Draw Offset text
# Which of the two edge points do we want to
# use for locating the offset text?
if juggled[2] == 2 :
outeredgep = edgep1
outerindex = 0
else :
outeredgep = edgep2
outerindex = 1
pos = copy.copy(outeredgep)
pos = move_from_center(pos, centers, labeldeltas, axmask)
olx, oly, olz = proj3d.proj_transform(pos[0], pos[1], pos[2], renderer.M)
self.offsetText.set_text( self.major.formatter.get_offset() )
self.offsetText.set_position( (olx, oly) )
angle = art3d.norm_text_angle(math.degrees(math.atan2(dy, dx)))
self.offsetText.set_rotation(angle)
# Must set rotation mode to "anchor" so that
# the alignment point is used as the "fulcrum" for rotation.
self.offsetText.set_rotation_mode('anchor')
#-----------------------------------------------------------------------
# Note: the following statement for determining the proper alignment of
# the offset text. This was determined entirely by trial-and-error
# and should not be in any way considered as "the way". There are
# still some edge cases where alignment is not quite right, but
# this seems to be more of a geometry issue (in other words, I
# might be using the wrong reference points).
#
# (TT, FF, TF, FT) are the shorthand for the tuple of
# (centpt[info['tickdir']] <= peparray[info['tickdir'], outerindex],
# centpt[index] <= peparray[index, outerindex])
#
# Three-letters (e.g., TFT, FTT) are short-hand for the array
# of bools from the variable 'highs'.
# ---------------------------------------------------------------------
if centpt[info['tickdir']] > peparray[info['tickdir'], outerindex] :
# if FT and if highs has an even number of Trues
if (centpt[index] <= peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually, this means align right, except for the FTT case,
# in which offset for axis 1 and 2 are aligned left.
if highs.tolist() == [False, True, True] and index in (1, 2) :
align = 'left'
else :
align = 'right'
else :
# The FF case
align = 'left'
else :
# if TF and if highs has an even number of Trues
if (centpt[index] > peparray[index, outerindex]
and ((len(highs.nonzero()[0]) % 2) == 0)) :
# Usually mean align left, except if it is axis 2
if index == 2 :
align = 'right'
else :
align = 'left'
else :
# The TT case
align = 'right'
self.offsetText.set_va('center')
self.offsetText.set_ha(align)
self.offsetText.draw(renderer)
# Draw grid lines
if len(xyz0) > 0:
# Grid points at end of one plane
xyz1 = copy.deepcopy(xyz0)
newindex = (index + 1) % 3
newval = get_flip_min_max(xyz1[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz1[i][newindex] = newval
# Grid points at end of the other plane
xyz2 = copy.deepcopy(xyz0)
newindex = (index + 2) % 3
newval = get_flip_min_max(xyz2[0], newindex, mins, maxs)
for i in range(len(majorLocs)):
xyz2[i][newindex] = newval
lines = zip(xyz1, xyz0, xyz2)
if self.axes._draw_grid:
self.gridlines.set_segments(lines)
self.gridlines.set_color([info['grid']['color']] * len(lines))
self.gridlines.draw(renderer, project=True)
# Draw ticks
tickdir = info['tickdir']
tickdelta = deltas[tickdir]
if highs[tickdir]:
ticksign = 1
else:
ticksign = -1
for tick, loc, label in zip(majorTicks, majorLocs, majorLabels):
if tick is None:
continue
# Get tick line positions
pos = copy.copy(edgep1)
pos[index] = loc
pos[tickdir] = edgep1[tickdir] + info['tick']['outward_factor'] * \
ticksign * tickdelta
x1, y1, z1 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
pos[tickdir] = edgep1[tickdir] - info['tick']['inward_factor'] * \
ticksign * tickdelta
x2, y2, z2 = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
# Get position of label
labeldeltas = [info['ticklabel']['space_factor'] * x for
x in deltas]
axmask = [True, True, True]
axmask[index] = False
pos[tickdir] = edgep1[tickdir]
pos = move_from_center(pos, centers, labeldeltas, axmask)
lx, ly, lz = proj3d.proj_transform(pos[0], pos[1], pos[2], \
renderer.M)
tick_update_position(tick, (x1, x2), (y1, y2), (lx, ly))
tick.set_label1(label)
tick.set_label2(label)
tick.draw(renderer)
renderer.close_group('axis3d')
def get_view_interval(self):
"""return the Interval instance for this 3d axis view limits"""
return self.v_interval
def set_view_interval(self, vmin, vmax, ignore=False):
if ignore:
self.v_interval = vmin, vmax
else:
Vmin, Vmax = self.get_view_interval()
self.v_interval = min(vmin, Vmin), max(vmax, Vmax)
# TODO: Get this to work properly when mplot3d supports
# the transforms framework.
def get_tightbbox(self, renderer) :
# Currently returns None so that Axis.get_tightbbox
# doesn't return junk info.
return None
# Use classes to look at different data limits
class XAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervalx
class YAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.xy_dataLim.intervaly
class ZAxis(Axis):
def get_data_interval(self):
'return the Interval instance for this axis data limits'
return self.axes.zz_dataLim.intervalx
|
mit
|
gouthambs/karuth-source
|
code/stock_market_volatility.py
|
1
|
3219
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 11 14:52:13 2014
@author: Goutham
"""
from pandas.io.data import DataReader
from datetime import date
import matplotlib.pyplot as plt
import pandas as pd
import math
dfsp = DataReader('^GSPC','yahoo',date(1980,1,1),date(2013,12,31))
dfsp["Returns"] = dfsp["Adj Close"]/dfsp["Adj Close"].shift(1) - 1
#dfspmon = dfsp500.resample('M',how='last')
dfsp["Volatility"] = 100*math.sqrt(252)*pd.rolling_std(dfsp["Returns"],45)
#fig = dfsp[["Volatility","Close"]].plot(subplots=True)
#di = DataReader('IQ12110','fred',datetime.date(1980,1,1),date(2014,1,1))
#ax =plt.gca()
#ax.annotate('Crash',xy=(datetime.date(2008,9,1),41.0),xytext=((datetime.date(2002,9,1),81.0)))
f, axarr = plt.subplots(2, sharex=True)
axarr[0].plot(dfsp.index, dfsp["Close"],color='k')
axarr[0].set_ylabel("Index Level")
axarr[0].set_title('S&P 500 Index')
axarr[0].grid(True)
axarr[1].plot(dfsp.index,dfsp["Volatility"],color='k')
axarr[1].set_ylabel("Volatility (%)")
axarr[1].set_xlabel("Date")
axarr[1].grid(True)
axarr[0].axvspan(date(1981,7,1), date(1982,11,30), facecolor='g', alpha=0.5)
axarr[0].axvspan(date(1990,7,1), date(1991,3,30), facecolor='g', alpha=0.5)
axarr[0].axvspan(date(2001,3,1), date(2001,11,30), facecolor='g', alpha=0.5)
axarr[0].axvspan(date(2007,12,1), date(2009,6,30), facecolor='g', alpha=0.5)
axarr[0].annotate('Black Monday',xy=(date(1987,10,19),350.0),\
xytext=((date(1983,10,19),700.0)),arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
axarr[0].annotate('2008 Crash',xy=(date(2008,9,16),1500.0),\
xytext=((date(2002,9,16),1800.0)),arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
axarr[1].axvspan(date(1981,7,1), date(1982,11,30), facecolor='g', alpha=0.5)
axarr[1].axvspan(date(1990,7,1), date(1991,3,30), facecolor='g', alpha=0.5)
axarr[1].axvspan(date(2001,3,1), date(2001,11,30), facecolor='g', alpha=0.5)
axarr[1].axvspan(date(2007,12,1), date(2009,6,30), facecolor='g', alpha=0.5)
axarr[1].annotate('Black Monday',xy=(date(1987,10,19),41.0),\
xytext=((date(1983,10,19),71.0)),arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
axarr[1].annotate('2008 Crash',xy=(date(2008,9,16),41.0),\
xytext=((date(2002,9,16),71.0)),arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
axarr[1].annotate('Downgrade',xy=(date(2011,8,5),41.0),\
xytext=((date(2009,02,16),71.0)),arrowprops=dict(arrowstyle="->",
connectionstyle="arc3"))
#dfsp[["Close","Volatility"]].plot(subplots=True)
"""
fig, ax1 = plt.subplots()
ax1.plot(dfsp.index, dfsp["Close"], 'b-')
ax1.set_xlabel('Date')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('Index Level', color='b')
ax2 = ax1.twinx()
ax2.plot(dfsp.index,dfsp["Volatility"],'r-')
plt.show()
"""
"""
plt.axvspan(date(1981,7,1), date(1982,11,30), facecolor='g', alpha=0.5)
plt.axvspan(date(1990,7,1), date(1991,3,30), facecolor='g', alpha=0.5)
plt.axvspan(date(2001,3,1), date(2001,11,30), facecolor='g', alpha=0.5)
plt.axvspan(date(2007,12,1), date(2009,6,30), facecolor='g', alpha=0.5)
"""
|
artistic-2.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/sklearn/tests/test_isotonic.py
|
34
|
14159
|
import warnings
import numpy as np
import pickle
import copy
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permutation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
y = np.array([10, 0, 2])
y_ = np.array([4, 4, 4])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [1, 1, 2, 3, 4, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
y_ = ir.fit_transform(x, y)
# work-around for pearson divide warnings in scipy <= 0.17.0
assert_true(all(["invalid value encountered in "
in str(warn.message) for warn in w]))
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_ymin_ymax():
# Test from @NelleV's issue:
# https://github.com/scikit-learn/scikit-learn/issues/6921
x = np.array([1.263, 1.318, -0.572, 0.307, -0.707, -0.176, -1.599, 1.059,
1.396, 1.906, 0.210, 0.028, -0.081, 0.444, 0.018, -0.377,
-0.896, -0.377, -1.327, 0.180])
y = isotonic_regression(x, y_min=0., y_max=0.1)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Also test decreasing case since the logic there is different
y = isotonic_regression(x, y_min=0., y_max=0.1, increasing=False)
assert(np.all(y >= 0))
assert(np.all(y <= 0.1))
# Finally, test with only one bound
y = isotonic_regression(x, y_min=0., increasing=False)
assert(np.all(y >= 0))
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
def test_fast_predict():
# test that the faster prediction change doesn't
# affect out-of-sample predictions:
# https://github.com/scikit-learn/scikit-learn/pull/6206
rng = np.random.RandomState(123)
n_samples = 10 ** 3
# X values over the -10,10 range
X_train = 20.0 * rng.rand(n_samples) - 10
y_train = np.less(
rng.rand(n_samples),
1.0 / (1.0 + np.exp(-X_train))
).astype('int64')
weights = rng.rand(n_samples)
# we also want to test that everything still works when some weights are 0
weights[rng.rand(n_samples) < 0.1] = 0
slow_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
fast_model = IsotonicRegression(y_min=0, y_max=1, out_of_bounds="clip")
# Build interpolation function with ALL input data, not just the
# non-redundant subset. The following 2 lines are taken from the
# .fit() method, without removing unnecessary points
X_train_fit, y_train_fit = slow_model._build_y(X_train, y_train,
sample_weight=weights,
trim_duplicates=False)
slow_model._build_f(X_train_fit, y_train_fit)
# fit with just the necessary data
fast_model.fit(X_train, y_train, sample_weight=weights)
X_test = 20.0 * rng.rand(n_samples) - 10
y_pred_slow = slow_model.predict(X_test)
y_pred_fast = fast_model.predict(X_test)
assert_array_equal(y_pred_slow, y_pred_fast)
def test_isotonic_copy_before_fit():
# https://github.com/scikit-learn/scikit-learn/issues/6628
ir = IsotonicRegression()
copy.copy(ir)
|
mit
|
benitesf/Skin-Lesion-Analysis-Towards-Melanoma-Detection
|
test_prepro.py
|
1
|
7275
|
from skimage.color import rgb2hsv, hsv2rgb
from matplotlib import pyplot as plt
from scipy.misc import imread
import numpy as np
def sampling_from_corners(margin, extract, shape):
"""
Sampling from four corners
Parameters
----------
margin: scalar
A margin from the corners
extract: scalar
Size of the block sampling, extract x extract
shape: [a,b] list
Shape of the image to sampling
Returns
-------
A X and Y array 1D with pixel index
"""
# Left-top corner
y = list(range(margin, margin + extract))
x = list(range(margin, margin + extract))
Xlt, Ylt = np.meshgrid(x, y, copy=False)
# Right-top corner
y = list(range(margin, margin + extract))
x = list(range(shape[1] - margin - extract, shape[1] - margin))
Xrt, Yrt = np.meshgrid(x, y, copy=False)
# Left-bottom corner
y = list(range(shape[0] - margin - extract, shape[0] - margin))
x = list(range(margin, margin + extract))
Xlb, Ylb = np.meshgrid(x, y, copy=False)
# Right-bottom corner
y = list(range(shape[0] - margin - extract, shape[0] - margin))
x = list(range(shape[1] - margin - extract, shape[1] - margin))
Xrb, Yrb = np.meshgrid(x, y, copy=False)
X = np.vstack((Xlt, Xrt, Xlb, Xrb)).flatten()
Y = np.vstack((Ylt, Yrt, Ylb, Yrb)).flatten()
return Y, X
def sampling_from_frames(margin, extract, shape):
"""
Sampling from the frames
Parameters
----------
margin: scalar
A margin from the corners
extract: scalar
Size of the frame sampling
shape: [a,b] list
Shape of the image to sampling
Returns
-------
A X and Y array 1D with pixel index
"""
# Top frame
y = list(range(margin, margin + extract))
x = list(range(margin, shape[1] - margin))
Xt, Yt = np.meshgrid(x, y, copy=False)
# Right frame
y = list(range(margin + extract, shape[0] - margin - extract))
x = list(range(shape[1] - margin - extract, shape[1] - margin))
Xr, Yr = np.meshgrid(x, y, copy=False)
# Bottom frame
y = list(range(shape[0] - margin - extract, shape[0] - margin))
x = list(range(margin, shape[1] - margin))
Xb, Yb = np.meshgrid(x, y, copy=False)
# Left frame
y = list(range(margin + extract, shape[0] - margin - extract))
x = list(range(margin, margin + extract))
Xl, Yl = np.meshgrid(x, y, copy=False)
X = np.concatenate((Xt.flatten(), Xr.flatten(), Xb.flatten(), Xl.flatten()))
Y = np.concatenate((Yt.flatten(), Yr.flatten(), Yb.flatten(), Yl.flatten()))
return Y, X
def quadratic_polynomial_function(Y, X):
return np.array([X ** 2, Y ** 2, X * Y, X, Y, X * 0 + 1]).T
def cubic_polynomial_function(Y, X):
return np.array([X ** 3, Y ** 3, (X ** 2) * Y, X * (Y ** 2), X ** 2, Y ** 2, X * Y, X, Y, X * 0 + 1]).T
def apply_quadratic_function(V, coeff):
Vproc = np.copy(V)
shape = Vproc.shape
for y in range(0, shape[0]):
for x in range(0, shape[1]):
Vproc[y, x] /= coeff[0] * (x ** 2) + coeff[1] * (y ** 2) + coeff[2] * x * y + coeff[3] * x + coeff[4] * y + \
coeff[5]
return Vproc
def apply_cubic_function(V, coeff):
Vproc = np.copy(V)
shape = Vproc.shape
for y in range(0, shape[0]):
for x in range(0, shape[1]):
Vproc[y, x] /= coeff[0] * (x ** 3) + coeff[1] * (y ** 3) + coeff[2] * (x ** 2) * y + coeff[3] * x * (y ** 2) + \
coeff[4] * (x ** 2) + coeff[5] * (y ** 2) + coeff[6] * x * y + coeff[7] * x + coeff[8] * y + coeff[9]
return Vproc
def in_range(X):
min = X.min()
max = X.max()
return (X - min) / (max - min)
def retrieve_color(X, muorig):
mu = X.mean()
return X*muorig/mu
"""
Fitting polynomial function
---------------------------
"""
melanoma_path = 'image/ISIC-2017_Training_Data_Clean/'
melanoma_extension = 'jpg'
image = imread(melanoma_path + 'ISIC_0000386.jpg')
hsv = rgb2hsv(image)
V = np.copy(hsv[:, :, 2])
extract = 80 # Number of pixel to extract from the corners 20x20
margin = 20 # Margin from the borders
shape = image.shape[0:2]
"""
Sampling pixels
---------------
"""
Yc, Xc = sampling_from_corners(margin=margin, extract=extract, shape=shape)
Yf, Xf = sampling_from_frames(margin=margin, extract=extract, shape=shape)
Zc = np.zeros((Xc.shape))
Zf = np.zeros((Xf.shape))
for j in range(0, Zc.shape[0]):
Zc[j] = np.copy(V[Yc[j], Xc[j]])
for j in range(0, Zf.shape[0]):
Zf[j] = np.copy(V[Yf[j], Xf[j]])
"""
Quadratic and cubic polynomial
--------------------
"""
Ac2 = quadratic_polynomial_function(Yc, Xc)
Af2 = quadratic_polynomial_function(Yf, Xf)
Ac3 = cubic_polynomial_function(Yc, Xc)
Af3 = cubic_polynomial_function(Yf, Xf)
"""
Fitting polynomial
------------------
"""
coeffc2 = np.linalg.lstsq(Ac2, Zc)[0]
coefff2 = np.linalg.lstsq(Af2, Zf)[0]
coeffc3 = np.linalg.lstsq(Ac3, Zc)[0]
coefff3 = np.linalg.lstsq(Af3, Zf)[0]
"""
Processed
---------
"""
Vprocc2 = apply_quadratic_function(V, coeffc2)
Vprocf2 = apply_quadratic_function(V, coefff2)
Vprocc3 = apply_cubic_function(V, coeffc3)
Vprocf3 = apply_cubic_function(V, coefff3)
# Convert Value into the range 0-1
Vprocc2 = in_range(Vprocc2)
Vprocf2 = in_range(Vprocf2)
Vprocc3 = in_range(Vprocc3)
Vprocf3 = in_range(Vprocf3)
# ****************************************
# Images without retrieve color
fhsvc2 = np.copy(hsv)
fhsvf2 = np.copy(hsv)
fhsvc3 = np.copy(hsv)
fhsvf3 = np.copy(hsv)
fhsvc2[:, :, 2] = np.copy(Vprocc2)
fhsvf2[:, :, 2] = np.copy(Vprocf2)
fhsvc3[:, :, 2] = np.copy(Vprocc3)
fhsvf3[:, :, 2] = np.copy(Vprocf3)
fattenuatedc2 = hsv2rgb(fhsvc2)
fattenuatedf2 = hsv2rgb(fhsvf2)
fattenuatedc3 = hsv2rgb(fhsvc3)
fattenuatedf3 = hsv2rgb(fhsvf3)
# ****************************************
# Retrieve true color to skin
muorig = V.mean()
Vnewc2 = retrieve_color(Vprocc2, muorig)
Vnewf2 = retrieve_color(Vprocf2, muorig)
Vnewc3 = retrieve_color(Vprocc3, muorig)
Vnewf3 = retrieve_color(Vprocf3, muorig)
# Convert Value into the range 0-1
Vnewc2 = in_range(Vnewc2)
Vnewf2 = in_range(Vnewf2)
Vnewc3 = in_range(Vnewc3)
Vnewf3 = in_range(Vnewf3)
hsvc2 = np.copy(hsv)
hsvf2 = np.copy(hsv)
hsvc3 = np.copy(hsv)
hsvf3 = np.copy(hsv)
hsvc2[:, :, 2] = np.copy(Vnewc2)
hsvf2[:, :, 2] = np.copy(Vnewf2)
hsvc3[:, :, 2] = np.copy(Vnewc3)
hsvf3[:, :, 2] = np.copy(Vnewf3)
attenuatedc2 = hsv2rgb(hsvc2)
attenuatedf2 = hsv2rgb(hsvf2)
attenuatedc3 = hsv2rgb(hsvc3)
attenuatedf3 = hsv2rgb(hsvf3)
fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(6, 4))
ax = axes[0][0]
ax.imshow(image)
ax.axis('off')
# ****************************
ax = axes[0][1]
ax.imshow(fattenuatedc2)
ax.axis('off')
ax = axes[0][2]
ax.imshow(fattenuatedf2)
ax.axis('off')
ax = axes[0][3]
ax.imshow(fattenuatedc3)
ax.axis('off')
ax = axes[0][4]
ax.imshow(fattenuatedf3)
ax.axis('off')
# ****************************
ax = axes[1]
ax.imshow(attenuatedc2)
ax.axis('off')
ax = axes[2]
ax.imshow(attenuatedf2)
ax.axis('off')
ax = axes[3]
ax.imshow(attenuatedc3)
ax.axis('off')
ax = axes[4]
ax.imshow(attenuatedf3)
ax.axis('off')
plt.show()
"""
Print the corners
-----------------
for y,x in zip(Y, X):
image[y,x,0] = 255
image[y,x,1:3] = 0
io.imshow(image)
io.show()
"""
|
mit
|
CGATOxford/CGATPipelines
|
obsolete/pipeline_transcriptdiffexpression/trackers/Isoform.py
|
1
|
5494
|
import pandas as pd
import numpy as np
from CGATReport.Tracker import SingleTableTrackerRows
from CGATReport.Tracker import SingleTableTrackerHistogram
from CGATReport.Tracker import *
from CGATReport.Utils import PARAMS as P
from IsoformReport import *
class imagesTracker(TrackerImages):
'''Convience Tracker for globbing images for gallery plot'''
def __init__(self, *args, **kwargs):
Tracker.__init__(self, *args, **kwargs)
if "glob" not in kwargs:
raise ValueError("TrackerImages requires a:glob: parameter")
self.glob = kwargs["glob"]
class TranscriptBiotypeSummary(IsoformTracker):
pattern = "(.*)_DEresults$"
direction = ""
def __call__(self, track, slice=None):
statement = '''SELECT transcript_id, transcript_biotype, significant
FROM %(track)s_DEresults
WHERE l2fold %(direction)s 0;'''
df = pd.DataFrame(self.getAll(statement))
# this should be extracted from PARAMS!
keep_biotypes = ["protein_coding", "retained_intron",
"processed_transcript", "nonsense_mediated_decay",
"lincRNA", "IG_C_gene", "IG_D_gene", "IG_J_gene",
"IG_LV_gene", "IG_V_gene"]
df = df[[x in keep_biotypes for x in df['transcript_biotype']]]
grouped = df.groupby(['significant', 'transcript_biotype'])
df_agg = grouped.aggregate({"transcript_id": 'count'})
df_agg.columns = ["Count"]
# TS: must be able to do this more succinctly!
fraction = []
for l in df_agg.groupby(level=0).agg(
lambda x: list(x/float(np.sum(x)))).values.flatten():
fraction.extend(l)
cumsum_values = []
for l in df_agg.groupby(level=0).agg(
lambda x: list(np.cumsum(x)/float(np.sum(x)))).values.flatten():
cumsum_values.extend(l)
previous = 0
cumsum_centred = []
for value in cumsum_values:
if previous == 1:
previous = 0
cumsum_centred.append((previous+value)/2)
previous = value
df_agg['fraction'] = fraction
df_agg['cumsum'] = cumsum_values
df_agg['cumsum_centres'] = cumsum_centred
df_agg.reset_index(inplace=True)
df_agg['significant'] = ["Significant" if x == 1
else "Not significant"
for x in df_agg['significant']]
return df_agg
class TranscriptBiotypeSummaryUp(TranscriptBiotypeSummary):
direction = ">"
class TranscriptBiotypeSummaryDown(TranscriptBiotypeSummary):
direction = "<"
class TranscriptExpressionOrdered(IsoformTracker):
pattern = "(.*)_tpm$"
def __call__(self, track, slice=None):
def ordered_log(array):
array = sorted(array, reverse=True)
return np.log10(array)
statement = '''SELECT * FROM %(track)s_tpm;'''
df = self.getDataFrame(statement)
df.drop(["gene_id", "gene_name",
"transcript_biotype", "transcript_id"],
axis=1, inplace=True)
df = df.apply(ordered_log, axis=0)
df["index"] = df.index
df = pd.melt(df, id_vars=["index"])
df = df.replace([np.inf, -np.inf], np.nan).dropna()
return df
class TranscriptExpressionOrderedAnnotated(IsoformTracker):
pattern = "(.*)_tpm$"
def __call__(self, track, slice=None):
def ordered_log(array):
array = sorted(array, reverse=True)
return np.log10(array)
statement = '''SELECT * FROM %(track)s_tpm;'''
df = self.getDataFrame(statement)
df.drop(["gene_id", "gene_name",
"transcript_biotype", "transcript_id"],
axis=1, inplace=True)
df = df.apply(ordered_log, axis=0)
df["index"] = df.index
df = pd.melt(df, id_vars=["index"])
df = df.replace([np.inf, -np.inf], np.nan).dropna()
df.set_index("variable", inplace=True, drop=False)
statement2 = '''SELECT track, _group FROM %(track)s_design;'''
df_design = self.getDataFrame(statement2)
df_design['track'] = [x.replace("-", "_") for x in df_design['track']]
df_design.set_index("track", inplace=True)
df = df.join(df_design, how="left")
df.columns = [x.replace("_group", "group") for x in df.columns]
return df
class TranscriptNumberSamplesExpressed(IsoformTracker):
pattern = "(.*)_tpm$"
def __call__(self, track, slice=None):
statement = '''SELECT * FROM %(track)s_tpm;'''
df = pd.DataFrame(self.getAll(statement))
df = df.set_index(["transcript_id"])
df.drop(["gene_id", "gene_name", "transcript_biotype"],
axis=1, inplace=True)
final_df = pd.DataFrame()
for threshold in (0.01, 0.1, 1):
df_tmp = pd.DataFrame(
{"Count": df.apply(func=lambda row:
sum([x > threshold for x in row]), axis=1),
"No_transcripts": list(range(0, len(df.index)))})
df_tmp = df_tmp.ix[df_tmp['Count'] > 0, :]
df_tmp = df_tmp.groupby(["Count"]).count()
df_tmp.reset_index(inplace=True)
df_tmp["threshold"] = [threshold]*len(df_tmp.index)
final_df = pd.concat([final_df, df_tmp])
return final_df
|
mit
|
aleksandr-bakanov/astropy
|
astropy/io/misc/pandas/connect.py
|
5
|
3527
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file connects the readers/writers to the astropy.table.Table class
import functools
from astropy.table import Table
import astropy.io.registry as io_registry
__all__ = ['PANDAS_FMTS']
# Astropy users normally expect to not have an index, so default to turn
# off writing the index. This structure allows for astropy-specific
# customization of all options.
PANDAS_FMTS = {'csv': {'read': {},
'write': {'index': False}},
'fwf': {'read': {}}, # No writer
'html': {'read': {},
'write': {'index': False}},
'json': {'read': {},
'write': {}}}
PANDAS_PREFIX = 'pandas.'
# Imports for reading HTML
_IMPORTS = False
_HAS_BS4 = False
_HAS_LXML = False
_HAS_HTML5LIB = False
def import_html_libs():
"""Try importing dependencies for reading HTML.
This is copied from pandas.io.html
"""
# import things we need
# but make this done on a first use basis
global _IMPORTS
if _IMPORTS:
return
global _HAS_BS4, _HAS_LXML, _HAS_HTML5LIB
try:
import bs4 # noqa
_HAS_BS4 = True
except ImportError:
pass
try:
import lxml # noqa
_HAS_LXML = True
except ImportError:
pass
try:
import html5lib # noqa
_HAS_HTML5LIB = True
except ImportError:
pass
_IMPORTS = True
def _pandas_read(fmt, filespec, **kwargs):
"""Provide io Table connector to read table using pandas.
"""
try:
import pandas
except ImportError:
raise ImportError('pandas must be installed to use pandas table reader')
pandas_fmt = fmt[len(PANDAS_PREFIX):] # chop the 'pandas.' in front
read_func = getattr(pandas, 'read_' + pandas_fmt)
# Get defaults and then override with user-supplied values
read_kwargs = PANDAS_FMTS[pandas_fmt]['read'].copy()
read_kwargs.update(kwargs)
# Special case: pandas defaults to HTML lxml for reading, but does not attempt
# to fall back to bs4 + html5lib. So do that now for convenience if user has
# not specifically selected a flavor. If things go wrong the pandas exception
# with instruction to install a library will come up.
if pandas_fmt == 'html' and 'flavor' not in kwargs:
import_html_libs()
if (not _HAS_LXML and _HAS_HTML5LIB and _HAS_BS4):
read_kwargs['flavor'] = 'bs4'
df = read_func(filespec, **read_kwargs)
# Special case for HTML
if pandas_fmt == 'html':
df = df[0]
return Table.from_pandas(df)
def _pandas_write(fmt, tbl, filespec, **kwargs):
"""Provide io Table connector to write table using pandas.
"""
pandas_fmt = fmt[len(PANDAS_PREFIX):] # chop the 'pandas.' in front
# Get defaults and then override with user-supplied values
write_kwargs = PANDAS_FMTS[pandas_fmt]['write'].copy()
write_kwargs.update(kwargs)
df = tbl.to_pandas()
write_method = getattr(df, 'to_' + pandas_fmt)
return write_method(filespec, **write_kwargs)
for pandas_fmt, defaults in PANDAS_FMTS.items():
fmt = PANDAS_PREFIX + pandas_fmt # Full format specifier
if 'read' in defaults:
func = functools.partial(_pandas_read, fmt)
io_registry.register_reader(fmt, Table, func)
if 'write' in defaults:
func = functools.partial(_pandas_write, fmt)
io_registry.register_writer(fmt, Table, func)
|
bsd-3-clause
|
parenthetical-e/wheelerdata
|
preprocess/archive/metadata_fh_rt.py
|
1
|
3016
|
#! /usr/local/bin/python
"""A program that takes a .fidl file from the Face/House dataset and converts then to a csv file suitable for import and use as labels in a
sklearn classfication experiemnt, and also produces a .mat file suitable
for use in SPM.
Usage: metadata_butterfly_rt fidl
"""
import os
import sys
import pandas as pd
import numpy as np
from fidl.convert import (fidl_to_csv, fuzzy_label, tr_time, fill_tr_gaps,
nod_mat)
# ----
# 1. Get the name of the file to process
if len(sys.argv[1:]) > 1:
raise ValueError("Too many arguments.")
fidlfile = sys.argv[1]
basename = os.path.splitext(fidlfile)[0]
## drop the fidl extension
# ----
# 0. "Globals"
triallen = 5 ## The length (in TR) of each trial
condcol = 2 ## The location in csv_f of the cond names
trialcol = 4 ## The location in csv_f of the trial desginator
# and convert the fidl to csv,
# and write that csv to disk.
csv_f = basename + ".csv"
fidl_to_csv(fidlfile, csv_f, 0)
# ----
# 2. Separate trials from events we want to ignore
triallab = {
"1FaceCor1" : "trial",
"2FaceCor2" : "trial",
"3FaceCor3" : "trial",
"4FaceCor4" : "trial",
"5FaceCor5" : "trial",
"6HouseCor1" : "trial",
"7HouseCor2" : "trial",
"8HouseCor3" : "trial",
"9HouseCor4" : "trial",
"10HouseCor5" : "trial",
"11NoiseResp1" : "trial",
"12NoiseResp2" : "trial",
"13NoiseResp3" : "trial",
"14NoiseResp4" : "trial",
"15NoiseResp5" : "trial",
"16NoiseResp5" : "trial",
"16MultiResp" : "trial",
"17NoResp": "trial"}
fuzzy_label(csv_f, condcol, triallab, "trial", header=True)
# ----
# 4. Add labels for rt independent of stim type
rtlab = {
"Cor1" : "rt1",
"Cor2" : "rt2",
"Cor3" : "rt3",
"Cor4" : "rt4",
"Resp1" : "rt1",
"Resp2" : "rt2",
"Resp3" : "rt3",
"Resp4" : "rt4"}
## Dropping RT5 as it is missing form some
## SS and has low counts overall anyway
fuzzy_label(csv_f, condcol, rtlab, "rt", header=True)
# ----
# 5. Add labels for stim type
explab = {
"Face" : "face",
"House" : "house",
"Noise" : "noise"}
fuzzy_label(csv_f, condcol, explab, "exp", header=True)
# ----
# 6. Add labels for subject/fidl
sublab = {"trial" : basename}
fuzzy_label(csv_f, trialcol, sublab, "scode", header=True)
# ----
# 7. Expand labels so they cover every TR
final_ncol = 8
tdur = {"trial" : triallen}
tr_time(csv_f, trialcol, tdur, drop=True, header=True)
fill_tr_gaps("trtime_" + csv_f, final_ncol)
## As trial labels were added first, following
## csv conversion, the trial label lives in col 3
# ----
# 8. Create the NOD mat file
#
# Get the csv file"s data
# then get onsets, names and
# create durations
csvdata = pd.read_csv(csv_f)
names = csvdata["rt"]
trials = csvdata["trial"]
onsets = csvdata["TR"]
durations = np.array([triallen, ] * len(trials))
nod_mat(names, onsets, durations, "nod_" + basename + ".mat")
|
bsd-2-clause
|
ilyes14/scikit-learn
|
sklearn/decomposition/tests/test_nmf.py
|
47
|
8566
|
import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from scipy.sparse import csc_matrix
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.base import clone
random_state = np.random.mtrand.RandomState(0)
def test_initialize_nn_output():
# Test that initialization does not return negative values
data = np.abs(random_state.randn(10, 10))
for init in ('random', 'nndsvd', 'nndsvda', 'nndsvdar'):
W, H = nmf._initialize_nmf(data, 10, init=init, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
@ignore_warnings
def test_parameter_checking():
A = np.ones((2, 2))
name = 'spam'
msg = "Invalid solver parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(solver=name).fit, A)
msg = "Invalid init parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(init=name).fit, A)
msg = "Invalid sparseness parameter: got 'spam' instead of one of"
assert_raise_message(ValueError, msg, nmf.NMF(sparseness=name).fit, A)
msg = "Negative values in data passed to"
assert_raise_message(ValueError, msg, nmf.NMF().fit, -A)
assert_raise_message(ValueError, msg, nmf._initialize_nmf, -A,
2, 'nndsvd')
clf = nmf.NMF(2, tol=0.1).fit(A)
assert_raise_message(ValueError, msg, clf.transform, -A)
def test_initialize_close():
# Test NNDSVD error
# Test that _initialize_nmf error is less than the standard deviation of
# the entries in the matrix.
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10, init='nndsvd')
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
# Test NNDSVD variants correctness
# Test that the variants 'nndsvda' and 'nndsvdar' differ from basic
# 'nndsvd' only where the basic version has zeros.
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, init='nndsvd')
Wa, Ha = nmf._initialize_nmf(data, 10, init='nndsvda')
War, Har = nmf._initialize_nmf(data, 10, init='nndsvdar',
random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@ignore_warnings
def test_nmf_fit_nn_output():
# Test that the decomposition does not contain negative values
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for solver in ('pg', 'cd'):
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.NMF(n_components=2, solver=solver, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
@ignore_warnings
def test_nmf_fit_close():
# Test that the fit is not too far away
for solver in ('pg', 'cd'):
pnmf = nmf.NMF(5, solver=solver, init='nndsvd', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
# Test that NLS solver doesn't return negative values
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
# Test that the NLS results should be close
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
@ignore_warnings
def test_nmf_transform():
# Test that NMF.transform returns close values
A = np.abs(random_state.randn(6, 5))
for solver in ('pg', 'cd'):
m = nmf.NMF(solver=solver, n_components=4, init='nndsvd',
random_state=0)
ft = m.fit_transform(A)
t = m.transform(A)
assert_array_almost_equal(ft, t, decimal=2)
@ignore_warnings
def test_n_components_greater_n_features():
# Smoke test for the case of more components than features.
A = np.abs(random_state.randn(30, 10))
nmf.NMF(n_components=15, random_state=0, tol=1e-2).fit(A)
@ignore_warnings
def test_projgrad_nmf_sparseness():
# Test sparseness
# Test that sparsity constraints actually increase sparseness in the
# part where they are applied.
tol = 1e-2
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0,
tol=tol).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0,
tol=tol).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0,
tol=tol).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
@ignore_warnings
def test_sparse_input():
# Test that sparse matrices are accepted as input
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
for solver in ('pg', 'cd'):
est1 = nmf.NMF(solver=solver, n_components=5, init='random',
random_state=0, tol=1e-2)
est2 = clone(est1)
W1 = est1.fit_transform(A)
W2 = est2.fit_transform(A_sparse)
H1 = est1.components_
H2 = est2.components_
assert_array_almost_equal(W1, W2)
assert_array_almost_equal(H1, H2)
@ignore_warnings
def test_sparse_transform():
# Test that transform works on sparse data. Issue #2124
A = np.abs(random_state.randn(3, 2))
A[A > 1.0] = 0
A = csc_matrix(A)
for solver in ('pg', 'cd'):
model = nmf.NMF(solver=solver, random_state=0, tol=1e-4,
n_components=2)
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
assert_array_almost_equal(A_fit_tr, A_tr, decimal=1)
@ignore_warnings
def test_non_negative_factorization_consistency():
# Test that the function is called in the same way, either directly
# or through the NMF class
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
for solver in ('pg', 'cd'):
W_nmf, H, _ = nmf.non_negative_factorization(
A, solver=solver, random_state=1, tol=1e-2)
W_nmf_2, _, _ = nmf.non_negative_factorization(
A, H=H, update_H=False, solver=solver, random_state=1, tol=1e-2)
model_class = nmf.NMF(solver=solver, random_state=1, tol=1e-2)
W_cls = model_class.fit_transform(A)
W_cls_2 = model_class.transform(A)
assert_array_almost_equal(W_nmf, W_cls, decimal=10)
assert_array_almost_equal(W_nmf_2, W_cls_2, decimal=10)
@ignore_warnings
def test_non_negative_factorization_checking():
A = np.ones((2, 2))
# Test parameters checking is public function
nnmf = nmf.non_negative_factorization
msg = "Number of components must be positive; got (n_components='2')"
assert_raise_message(ValueError, msg, nnmf, A, A, A, '2')
msg = "Negative values in data passed to NMF (input H)"
assert_raise_message(ValueError, msg, nnmf, A, A, -A, 2, 'custom')
msg = "Negative values in data passed to NMF (input W)"
assert_raise_message(ValueError, msg, nnmf, A, -A, A, 2, 'custom')
msg = "Array passed to NMF (input H) is full of zeros"
assert_raise_message(ValueError, msg, nnmf, A, A, 0 * A, 2, 'custom')
def test_safe_compute_error():
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
A_sparse = csc_matrix(A)
W, H = nmf._initialize_nmf(A, 5, init='random', random_state=0)
error = nmf._safe_compute_error(A, W, H)
error_sparse = nmf._safe_compute_error(A_sparse, W, H)
assert_almost_equal(error, error_sparse)
|
bsd-3-clause
|
ephes/scikit-learn
|
sklearn/feature_selection/tests/test_base.py
|
170
|
3666
|
import numpy as np
from scipy import sparse as sp
from nose.tools import assert_raises, assert_equal
from numpy.testing import assert_array_equal
from sklearn.base import BaseEstimator
from sklearn.feature_selection.base import SelectorMixin
from sklearn.utils import check_array
class StepSelector(SelectorMixin, BaseEstimator):
"""Retain every `step` features (beginning with 0)"""
def __init__(self, step=2):
self.step = step
def fit(self, X, y=None):
X = check_array(X, 'csc')
self.n_input_feats = X.shape[1]
return self
def _get_support_mask(self):
mask = np.zeros(self.n_input_feats, dtype=bool)
mask[::self.step] = True
return mask
support = [True, False] * 5
support_inds = [0, 2, 4, 6, 8]
X = np.arange(20).reshape(2, 10)
Xt = np.arange(0, 20, 2).reshape(2, 5)
Xinv = X.copy()
Xinv[:, 1::2] = 0
y = [0, 1]
feature_names = list('ABCDEFGHIJ')
feature_names_t = feature_names[::2]
feature_names_inv = np.array(feature_names)
feature_names_inv[1::2] = ''
def test_transform_dense():
sel = StepSelector()
Xt_actual = sel.fit(X, y).transform(X)
Xt_actual2 = StepSelector().fit_transform(X, y)
assert_array_equal(Xt, Xt_actual)
assert_array_equal(Xt, Xt_actual2)
# Check dtype matches
assert_equal(np.int32, sel.transform(X.astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(X.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_t_actual = sel.transform(feature_names)
assert_array_equal(feature_names_t, names_t_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xt_actual = sel.fit(sparse(X)).transform(sparse(X))
Xt_actual2 = sel.fit_transform(sparse(X))
assert_array_equal(Xt, Xt_actual.toarray())
assert_array_equal(Xt, Xt_actual2.toarray())
# Check dtype matches
assert_equal(np.int32, sel.transform(sparse(X).astype(np.int32)).dtype)
assert_equal(np.float32, sel.transform(sparse(X).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.transform, np.array([[1], [2]]))
def test_inverse_transform_dense():
sel = StepSelector()
Xinv_actual = sel.fit(X, y).inverse_transform(Xt)
assert_array_equal(Xinv, Xinv_actual)
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(Xt.astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(Xt.astype(np.float32)).dtype)
# Check 1d list and other dtype:
names_inv_actual = sel.inverse_transform(feature_names_t)
assert_array_equal(feature_names_inv, names_inv_actual.ravel())
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_inverse_transform_sparse():
sparse = sp.csc_matrix
sel = StepSelector()
Xinv_actual = sel.fit(sparse(X)).inverse_transform(sparse(Xt))
assert_array_equal(Xinv, Xinv_actual.toarray())
# Check dtype matches
assert_equal(np.int32,
sel.inverse_transform(sparse(Xt).astype(np.int32)).dtype)
assert_equal(np.float32,
sel.inverse_transform(sparse(Xt).astype(np.float32)).dtype)
# Check wrong shape raises error
assert_raises(ValueError, sel.inverse_transform, np.array([[1], [2]]))
def test_get_support():
sel = StepSelector()
sel.fit(X, y)
assert_array_equal(support, sel.get_support())
assert_array_equal(support_inds, sel.get_support(indices=True))
|
bsd-3-clause
|
Britefury/scikit-image
|
doc/examples/plot_glcm.py
|
26
|
3307
|
"""
=====================
GLCM Texture Features
=====================
This example illustrates texture classification using texture
classification using grey level co-occurrence matrices (GLCMs).
A GLCM is a histogram of co-occurring greyscale values at a given
offset over an image.
In this example, samples of two different textures are extracted from
an image: grassy areas and sky areas. For each patch, a GLCM with
a horizontal offset of 5 is computed. Next, two features of the
GLCM matrices are computed: dissimilarity and correlation. These are
plotted to illustrate that the classes form clusters in feature space.
In a typical classification problem, the final step (not included in
this example) would be to train a classifier, such as logistic
regression, to label image patches from new images.
"""
import matplotlib.pyplot as plt
from skimage.feature import greycomatrix, greycoprops
from skimage import data
PATCH_SIZE = 21
# open the camera image
image = data.camera()
# select some patches from grassy areas of the image
grass_locations = [(474, 291), (440, 433), (466, 18), (462, 236)]
grass_patches = []
for loc in grass_locations:
grass_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# select some patches from sky areas of the image
sky_locations = [(54, 48), (21, 233), (90, 380), (195, 330)]
sky_patches = []
for loc in sky_locations:
sky_patches.append(image[loc[0]:loc[0] + PATCH_SIZE,
loc[1]:loc[1] + PATCH_SIZE])
# compute some GLCM properties each patch
xs = []
ys = []
for patch in (grass_patches + sky_patches):
glcm = greycomatrix(patch, [5], [0], 256, symmetric=True, normed=True)
xs.append(greycoprops(glcm, 'dissimilarity')[0, 0])
ys.append(greycoprops(glcm, 'correlation')[0, 0])
# create the figure
fig = plt.figure(figsize=(8, 8))
# display original image with locations of patches
ax = fig.add_subplot(3, 2, 1)
ax.imshow(image, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
for (y, x) in grass_locations:
ax.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'gs')
for (y, x) in sky_locations:
ax.plot(x + PATCH_SIZE / 2, y + PATCH_SIZE / 2, 'bs')
ax.set_xlabel('Original Image')
ax.set_xticks([])
ax.set_yticks([])
ax.axis('image')
# for each patch, plot (dissimilarity, correlation)
ax = fig.add_subplot(3, 2, 2)
ax.plot(xs[:len(grass_patches)], ys[:len(grass_patches)], 'go',
label='Grass')
ax.plot(xs[len(grass_patches):], ys[len(grass_patches):], 'bo',
label='Sky')
ax.set_xlabel('GLCM Dissimilarity')
ax.set_ylabel('GLVM Correlation')
ax.legend()
# display the image patches
for i, patch in enumerate(grass_patches):
ax = fig.add_subplot(3, len(grass_patches), len(grass_patches)*1 + i + 1)
ax.imshow(patch, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
ax.set_xlabel('Grass %d' % (i + 1))
for i, patch in enumerate(sky_patches):
ax = fig.add_subplot(3, len(sky_patches), len(sky_patches)*2 + i + 1)
ax.imshow(patch, cmap=plt.cm.gray, interpolation='nearest',
vmin=0, vmax=255)
ax.set_xlabel('Sky %d' % (i + 1))
# display the patches and plot
fig.suptitle('Grey level co-occurrence matrix features', fontsize=14)
plt.show()
|
bsd-3-clause
|
louispotok/pandas
|
pandas/tests/categorical/test_algos.py
|
4
|
4074
|
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
@pytest.mark.parametrize('ordered', [True, False])
@pytest.mark.parametrize('categories', [
['b', 'a', 'c'],
['a', 'b', 'c', 'd'],
])
def test_factorize(categories, ordered):
cat = pd.Categorical(['b', 'b', 'a', 'c', None],
categories=categories,
ordered=ordered)
labels, uniques = pd.factorize(cat)
expected_labels = np.array([0, 0, 1, 2, -1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a', 'c'],
categories=categories,
ordered=ordered)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort():
cat = pd.Categorical(['b', 'b', None, 'a'])
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([1, 1, -1, 0], dtype=np.intp)
expected_uniques = pd.Categorical(['a', 'b'])
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_factorized_sort_ordered():
cat = pd.Categorical(['b', 'b', None, 'a'],
categories=['c', 'b', 'a'],
ordered=True)
labels, uniques = pd.factorize(cat, sort=True)
expected_labels = np.array([0, 0, -1, 1], dtype=np.intp)
expected_uniques = pd.Categorical(['b', 'a'],
categories=['c', 'b', 'a'],
ordered=True)
tm.assert_numpy_array_equal(labels, expected_labels)
tm.assert_categorical_equal(uniques, expected_uniques)
def test_isin_cats():
# GH2003
cat = pd.Categorical(["a", "b", np.nan])
result = cat.isin(["a", np.nan])
expected = np.array([True, False, True], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
result = cat.isin(["a", "c"])
expected = np.array([True, False, False], dtype=bool)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], pd.Series(), np.array([])])
def test_isin_empty(empty):
s = pd.Categorical(["a", "b"])
expected = np.array([False, False], dtype=bool)
result = s.isin(empty)
tm.assert_numpy_array_equal(expected, result)
class TestTake(object):
# https://github.com/pandas-dev/pandas/issues/20664
def test_take_warns(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(FutureWarning):
cat.take([0, -1])
def test_take_positive_no_warning(self):
cat = pd.Categorical(['a', 'b'])
with tm.assert_produces_warning(None):
cat.take([0, 0])
def test_take_bounds(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical(['a', 'b', 'a'])
with pytest.raises(IndexError):
cat.take([4, 5], allow_fill=allow_fill)
def test_take_empty(self, allow_fill):
# https://github.com/pandas-dev/pandas/issues/20664
cat = pd.Categorical([], categories=['a', 'b'])
with pytest.raises(IndexError):
cat.take([0], allow_fill=allow_fill)
def test_positional_take(self, ordered):
cat = pd.Categorical(['a', 'a', 'b', 'b'], categories=['b', 'a'],
ordered=ordered)
result = cat.take([0, 1, 2], allow_fill=False)
expected = pd.Categorical(['a', 'a', 'b'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
def test_positional_take_unobserved(self, ordered):
cat = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'],
ordered=ordered)
result = cat.take([1, 0], allow_fill=False)
expected = pd.Categorical(['b', 'a'], categories=cat.categories,
ordered=ordered)
tm.assert_categorical_equal(result, expected)
|
bsd-3-clause
|
qifeigit/scikit-learn
|
examples/applications/plot_species_distribution_modeling.py
|
254
|
7434
|
"""
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <[email protected]>
# Jake Vanderplas <[email protected]>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
|
bsd-3-clause
|
PatrickOReilly/scikit-learn
|
sklearn/tests/test_base.py
|
3
|
7698
|
# Author: Gael Varoquaux
# License: BSD 3 clause
import sys
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import BaseEstimator, clone, is_classifier
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.utils import deprecated
#############################################################################
# A few test classes
class MyEstimator(BaseEstimator):
def __init__(self, l1=0, empty=None):
self.l1 = l1
self.empty = empty
class K(BaseEstimator):
def __init__(self, c=None, d=None):
self.c = c
self.d = d
class T(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class DeprecatedAttributeEstimator(BaseEstimator):
def __init__(self, a=None, b=None):
self.a = a
if b is not None:
DeprecationWarning("b is deprecated and renamed 'a'")
self.a = b
@property
@deprecated("Parameter 'b' is deprecated and renamed to 'a'")
def b(self):
return self._b
class Buggy(BaseEstimator):
" A buggy estimator that does not set its parameters right. "
def __init__(self, a=None):
self.a = 1
class NoEstimator(object):
def __init__(self):
pass
def fit(self, X=None, y=None):
return self
def predict(self, X=None):
return None
class VargEstimator(BaseEstimator):
"""scikit-learn estimators shouldn't have vargs."""
def __init__(self, *vargs):
pass
#############################################################################
# The tests
def test_clone():
# Tests that clone creates a correct deep copy.
# We create an estimator, make a copy of its original state
# (which, in this case, is the current state of the estimator),
# and check that the obtained copy is a correct deep copy.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
new_selector = clone(selector)
assert_true(selector is not new_selector)
assert_equal(selector.get_params(), new_selector.get_params())
selector = SelectFpr(f_classif, alpha=np.zeros((10, 2)))
new_selector = clone(selector)
assert_true(selector is not new_selector)
def test_clone_2():
# Tests that clone doesn't copy everything.
# We first create an estimator, give it an own attribute, and
# make a copy of its original state. Then we check that the copy doesn't
# have the specific attribute we manually added to the initial estimator.
from sklearn.feature_selection import SelectFpr, f_classif
selector = SelectFpr(f_classif, alpha=0.1)
selector.own_attribute = "test"
new_selector = clone(selector)
assert_false(hasattr(new_selector, "own_attribute"))
def test_clone_buggy():
# Check that clone raises an error on buggy estimators.
buggy = Buggy()
buggy.a = 2
assert_raises(RuntimeError, clone, buggy)
no_estimator = NoEstimator()
assert_raises(TypeError, clone, no_estimator)
varg_est = VargEstimator()
assert_raises(RuntimeError, clone, varg_est)
def test_clone_empty_array():
# Regression test for cloning estimators with empty arrays
clf = MyEstimator(empty=np.array([]))
clf2 = clone(clf)
assert_array_equal(clf.empty, clf2.empty)
clf = MyEstimator(empty=sp.csr_matrix(np.array([[0]])))
clf2 = clone(clf)
assert_array_equal(clf.empty.data, clf2.empty.data)
def test_clone_nan():
# Regression test for cloning estimators with default parameter as np.nan
clf = MyEstimator(empty=np.nan)
clf2 = clone(clf)
assert_true(clf.empty is clf2.empty)
def test_clone_sparse_matrices():
sparse_matrix_classes = [
getattr(sp, name)
for name in dir(sp) if name.endswith('_matrix')]
PY26 = sys.version_info[:2] == (2, 6)
if PY26:
# sp.dok_matrix can not be deepcopied in Python 2.6
sparse_matrix_classes.remove(sp.dok_matrix)
for cls in sparse_matrix_classes:
sparse_matrix = cls(np.eye(5))
clf = MyEstimator(empty=sparse_matrix)
clf_cloned = clone(clf)
assert_true(clf.empty.__class__ is clf_cloned.empty.__class__)
assert_array_equal(clf.empty.toarray(), clf_cloned.empty.toarray())
def test_repr():
# Smoke test the repr of the base estimator.
my_estimator = MyEstimator()
repr(my_estimator)
test = T(K(), K())
assert_equal(
repr(test),
"T(a=K(c=None, d=None), b=K(c=None, d=None))"
)
some_est = T(a=["long_params"] * 1000)
assert_equal(len(repr(some_est)), 415)
def test_str():
# Smoke test the str of the base estimator
my_estimator = MyEstimator()
str(my_estimator)
def test_get_params():
test = T(K(), K())
assert_true('a__d' in test.get_params(deep=True))
assert_true('a__d' not in test.get_params(deep=False))
test.set_params(a__d=2)
assert_true(test.a.d == 2)
assert_raises(ValueError, test.set_params, a__a=2)
def test_get_params_deprecated():
# deprecated attribute should not show up as params
est = DeprecatedAttributeEstimator(a=1)
assert_true('a' in est.get_params())
assert_true('a' in est.get_params(deep=True))
assert_true('a' in est.get_params(deep=False))
assert_true('b' not in est.get_params())
assert_true('b' not in est.get_params(deep=True))
assert_true('b' not in est.get_params(deep=False))
def test_is_classifier():
svc = SVC()
assert_true(is_classifier(svc))
assert_true(is_classifier(GridSearchCV(svc, {'C': [0.1, 1]})))
assert_true(is_classifier(Pipeline([('svc', svc)])))
assert_true(is_classifier(Pipeline([('svc_cv',
GridSearchCV(svc, {'C': [0.1, 1]}))])))
def test_set_params():
# test nested estimator parameter setting
clf = Pipeline([("svc", SVC())])
# non-existing parameter in svc
assert_raises(ValueError, clf.set_params, svc__stupid_param=True)
# non-existing parameter of pipeline
assert_raises(ValueError, clf.set_params, svm__stupid_param=True)
# we don't currently catch if the things in pipeline are estimators
# bad_pipeline = Pipeline([("bad", NoEstimator())])
# assert_raises(AttributeError, bad_pipeline.set_params,
# bad__stupid_param=True)
def test_score_sample_weight():
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import datasets
rng = np.random.RandomState(0)
# test both ClassifierMixin and RegressorMixin
estimators = [DecisionTreeClassifier(max_depth=2),
DecisionTreeRegressor(max_depth=2)]
sets = [datasets.load_iris(),
datasets.load_boston()]
for est, ds in zip(estimators, sets):
est.fit(ds.data, ds.target)
# generate random sample weights
sample_weight = rng.randint(1, 10, size=len(ds.target))
# check that the score with and without sample weights are different
assert_not_equal(est.score(ds.data, ds.target),
est.score(ds.data, ds.target,
sample_weight=sample_weight),
msg="Unweighted and weighted scores "
"are unexpectedly equal")
|
bsd-3-clause
|
wbinventor/openmc
|
openmc/filter.py
|
1
|
59893
|
from abc import ABCMeta
from collections import OrderedDict
from collections.abc import Iterable
import copy
import hashlib
from itertools import product
from numbers import Real, Integral
from xml.etree import ElementTree as ET
import numpy as np
import pandas as pd
import openmc
import openmc.checkvalue as cv
from .cell import Cell
from .material import Material
from .mixin import IDManagerMixin
from .surface import Surface
from .universe import Universe
_FILTER_TYPES = (
'universe', 'material', 'cell', 'cellborn', 'surface', 'mesh', 'energy',
'energyout', 'mu', 'polar', 'azimuthal', 'distribcell', 'delayedgroup',
'energyfunction', 'cellfrom', 'legendre', 'spatiallegendre',
'sphericalharmonics', 'zernike', 'zernikeradial', 'particle'
)
_CURRENT_NAMES = (
'x-min out', 'x-min in', 'x-max out', 'x-max in',
'y-min out', 'y-min in', 'y-max out', 'y-max in',
'z-min out', 'z-min in', 'z-max out', 'z-max in'
)
_PARTICLE_IDS = {'neutron': 1, 'photon': 2, 'electron': 3, 'positron': 4}
class FilterMeta(ABCMeta):
def __new__(cls, name, bases, namespace, **kwargs):
# Check the class name.
if not name.endswith('Filter'):
raise ValueError("All filter class names must end with 'Filter'")
# Create a 'short_name' attribute that removes the 'Filter' suffix.
namespace['short_name'] = name[:-6]
# Subclass methods can sort of inherit the docstring of parent class
# methods. If a function is defined without a docstring, most (all?)
# Python interpreters will search through the parent classes to see if
# there is a docstring for a function with the same name, and they will
# use that docstring. However, Sphinx does not have that functionality.
# This chunk of code handles this docstring inheritance manually so that
# the autodocumentation will pick it up.
if name != 'Filter':
# Look for newly-defined functions that were also in Filter.
for func_name in namespace:
if func_name in Filter.__dict__:
# Inherit the docstring from Filter if not defined.
if isinstance(namespace[func_name],
(classmethod, staticmethod)):
new_doc = namespace[func_name].__func__.__doc__
old_doc = Filter.__dict__[func_name].__func__.__doc__
if new_doc is None and old_doc is not None:
namespace[func_name].__func__.__doc__ = old_doc
else:
new_doc = namespace[func_name].__doc__
old_doc = Filter.__dict__[func_name].__doc__
if new_doc is None and old_doc is not None:
namespace[func_name].__doc__ = old_doc
# Make the class.
return super().__new__(cls, name, bases, namespace, **kwargs)
class Filter(IDManagerMixin, metaclass=FilterMeta):
"""Tally modifier that describes phase-space and other characteristics.
Parameters
----------
bins : Integral or Iterable of Integral or Iterable of Real
The bins for the filter. This takes on different meaning for different
filters. See the docstrings for sublcasses of this filter or the online
documentation for more details.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Integral or Iterable of Integral or Iterable of Real
The bins for the filter
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
next_id = 1
used_ids = set()
def __init__(self, bins, filter_id=None):
self.bins = bins
self.id = filter_id
def __eq__(self, other):
if type(self) is not type(other):
return False
elif len(self.bins) != len(other.bins):
return False
else:
return np.allclose(self.bins, other.bins)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
if type(self) is not type(other):
if self.short_name in _FILTER_TYPES and \
other.short_name in _FILTER_TYPES:
delta = _FILTER_TYPES.index(self.short_name) - \
_FILTER_TYPES.index(other.short_name)
return delta > 0
else:
return False
else:
return max(self.bins) > max(other.bins)
def __lt__(self, other):
return not self > other
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tBins', self.bins)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tBins', self.bins)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@classmethod
def _recursive_subclasses(cls):
"""Return all subclasses and their subclasses, etc."""
all_subclasses = []
for subclass in cls.__subclasses__():
all_subclasses.append(subclass)
all_subclasses.extend(subclass._recursive_subclasses())
return all_subclasses
@classmethod
def from_hdf5(cls, group, **kwargs):
"""Construct a new Filter instance from HDF5 data.
Parameters
----------
group : h5py.Group
HDF5 group to read from
Keyword arguments
-----------------
meshes : dict
Dictionary mapping integer IDs to openmc.Mesh objects. Only used
for openmc.MeshFilter objects.
"""
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
# If the HDF5 'type' variable matches this class's short_name, then
# there is no overriden from_hdf5 method. Pass the bins to __init__.
if group['type'][()].decode() == cls.short_name.lower():
out = cls(group['bins'][()], filter_id=filter_id)
out._num_bins = group['n_bins'][()]
return out
# Search through all subclasses and find the one matching the HDF5
# 'type'. Call that class's from_hdf5 method.
for subclass in cls._recursive_subclasses():
if group['type'][()].decode() == subclass.short_name.lower():
return subclass.from_hdf5(group, **kwargs)
raise ValueError("Unrecognized Filter class: '"
+ group['type'][()].decode() + "'")
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, bins):
self.check_bins(bins)
self._bins = bins
@property
def num_bins(self):
return len(self.bins)
def check_bins(self, bins):
"""Make sure given bins are valid for this filter.
Raises
------
TypeError
ValueError
"""
pass
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = ET.Element('filter')
element.set('id', str(self.id))
element.set('type', self.short_name.lower())
subelement = ET.SubElement(element, 'bins')
subelement.text = ' '.join(str(b) for b in self.bins)
return element
def can_merge(self, other):
"""Determine if filter can be merged with another.
Parameters
----------
other : openmc.Filter
Filter to compare with
Returns
-------
bool
Whether the filter can be merged
"""
return type(self) is type(other)
def merge(self, other):
"""Merge this filter with another.
Parameters
----------
other : openmc.Filter
Filter to merge with
Returns
-------
merged_filter : openmc.Filter
Filter resulting from the merge
"""
if not self.can_merge(other):
msg = 'Unable to merge "{0}" with "{1}" '.format(
type(self), type(other))
raise ValueError(msg)
# Merge unique filter bins
merged_bins = np.concatenate((self.bins, other.bins))
merged_bins = np.unique(merged_bins)
# Create a new filter with these bins and a new auto-generated ID
return type(self)(merged_bins)
def is_subset(self, other):
"""Determine if another filter is a subset of this filter.
If all of the bins in the other filter are included as bins in this
filter, then it is a subset of this filter.
Parameters
----------
other : openmc.Filter
The filter to query as a subset of this filter
Returns
-------
bool
Whether or not the other filter is a subset of this filter
"""
if type(self) is not type(other):
return False
for bin in other.bins:
if bin not in self.bins:
return False
return True
def get_bin_index(self, filter_bin):
"""Returns the index in the Filter for some bin.
Parameters
----------
filter_bin : int or tuple
The bin is the integer ID for 'material', 'surface', 'cell',
'cellborn', and 'universe' Filters. The bin is an integer for the
cell instance ID for 'distribcell' Filters. The bin is a 2-tuple of
floats for 'energy' and 'energyout' filters corresponding to the
energy boundaries of the bin of interest. The bin is an (x,y,z)
3-tuple for 'mesh' filters corresponding to the mesh cell of
interest.
Returns
-------
filter_index : int
The index in the Tally data array for this filter bin.
"""
if filter_bin not in self.bins:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
if isinstance(self.bins, np.ndarray):
return np.where(self.bins == filter_bin)[0][0]
else:
return self.bins.index(filter_bin)
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Keyword arguments
-----------------
paths : bool
Only used for DistribcellFilter. If True (default), expand
distribcell indices into multi-index columns describing the path
to that distribcell through the CSG tree. NOTE: This option assumes
that all distribcell paths are of the same length and do not have
the same universes and cells but different lattice cell indices.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with columns of strings that characterize the
filter's bins. The number of rows in the DataFrame is the same as
the total number of bins in the corresponding tally, with the filter
bin appropriately tiled to map to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
filter_bins = np.repeat(self.bins, stride)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
df = pd.concat([df, pd.DataFrame(
{self.short_name.lower(): filter_bins})])
return df
class WithIDFilter(Filter):
"""Abstract parent for filters of types with IDs (Cell, Material, etc.)."""
def __init__(self, bins, filter_id=None):
bins = np.atleast_1d(bins)
# Make sure bins are either integers or appropriate objects
cv.check_iterable_type('filter bins', bins,
(Integral, self.expected_type))
# Extract ID values
bins = np.array([b if isinstance(b, Integral) else b.id
for b in bins])
self.bins = bins
self.id = filter_id
def check_bins(self, bins):
# Check the bin values.
for edge in bins:
cv.check_greater_than('filter bin', edge, 0, equality=True)
class UniverseFilter(WithIDFilter):
"""Bins tally event locations based on the Universe they occured in.
Parameters
----------
bins : openmc.Universe, int, or iterable thereof
The Universes to tally. Either openmc.Universe objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Universe IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Universe
class MaterialFilter(WithIDFilter):
"""Bins tally event locations based on the Material they occured in.
Parameters
----------
bins : openmc.Material, Integral, or iterable thereof
The Materials to tally. Either openmc.Material objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Material IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Material
class CellFilter(WithIDFilter):
"""Bins tally event locations based on the Cell they occured in.
Parameters
----------
bins : openmc.Cell, int, or iterable thereof
The cells to tally. Either openmc.Cell objects or their ID numbers can
be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Cell IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Cell
class CellFromFilter(WithIDFilter):
"""Bins tally on which Cell the neutron came from.
Parameters
----------
bins : openmc.Cell, Integral, or iterable thereof
The Cell(s) to tally. Either openmc.Cell objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Integral or Iterable of Integral
openmc.Cell IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Cell
class CellbornFilter(WithIDFilter):
"""Bins tally events based on which Cell the neutron was born in.
Parameters
----------
bins : openmc.Cell, Integral, or iterable thereof
The birth Cells to tally. Either openmc.Cell objects or their
Integral ID numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
openmc.Cell IDs.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Cell
class SurfaceFilter(WithIDFilter):
"""Filters particles by surface crossing
Parameters
----------
bins : openmc.Surface, int, or iterable of Integral
The surfaces to tally over. Either openmc.Surface objects or their ID
numbers can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
The surfaces to tally over. Either openmc.Surface objects or their ID
numbers can be used.
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
expected_type = Surface
class ParticleFilter(Filter):
"""Bins tally events based on the Particle type.
Parameters
----------
bins : str, int, or iterable of Integral
The Particles to tally. Either str with particle type or their
ID numbers can be used ('neutron' = 1, 'photon' = 2, 'electron' = 3,
'positron' = 4).
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
The Particles to tally
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
@property
def bins(self):
return self._bins
@bins.setter
def bins(self, bins):
bins = np.atleast_1d(bins)
cv.check_iterable_type('filter bins', bins, (Integral, str))
for edge in bins:
if isinstance(edge, Integral):
cv.check_value('filter bin', edge, _PARTICLE_IDS.values())
else:
cv.check_value('filter bin', edge, _PARTICLE_IDS.keys())
bins = np.atleast_1d([b if isinstance(b, Integral) else _PARTICLE_IDS[b]
for b in bins])
self._bins = bins
class MeshFilter(Filter):
"""Bins tally event locations onto a regular, rectangular mesh.
Parameters
----------
mesh : openmc.Mesh
The Mesh object that events will be tallied onto
filter_id : int
Unique identifier for the filter
Attributes
----------
mesh : openmc.Mesh
The Mesh object that events will be tallied onto
id : int
Unique identifier for the filter
bins : list of tuple
A list of mesh indices for each filter bin, e.g. [(1, 1, 1), (2, 1, 1),
...]
num_bins : Integral
The number of filter bins
"""
def __init__(self, mesh, filter_id=None):
self.mesh = mesh
self.id = filter_id
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tMesh ID', self.mesh.id)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
if 'meshes' not in kwargs:
raise ValueError(cls.__name__ + " requires a 'meshes' keyword "
"argument.")
mesh_id = group['bins'][()]
mesh_obj = kwargs['meshes'][mesh_id]
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
out = cls(mesh_obj, filter_id=filter_id)
return out
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, mesh):
cv.check_type('filter mesh', mesh, openmc.Mesh)
self._mesh = mesh
self.bins = list(mesh.indices)
def can_merge(self, other):
# Mesh filters cannot have more than one bin
return False
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with three columns describing the x,y,z mesh
cell indices corresponding to each filter bin. The number of rows
in the DataFrame is the same as the total number of bins in the
corresponding tally, with the filter bin appropriately tiled to map
to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
# Initialize dictionary to build Pandas Multi-index column
filter_dict = {}
# Append Mesh ID as outermost index of multi-index
mesh_key = 'mesh {}'.format(self.mesh.id)
# Find mesh dimensions - use 3D indices for simplicity
n_dim = len(self.mesh.dimension)
if n_dim == 3:
nx, ny, nz = self.mesh.dimension
elif n_dim == 2:
nx, ny = self.mesh.dimension
nz = 1
else:
nx = self.mesh.dimension
ny = nz = 1
# Generate multi-index sub-column for x-axis
filter_bins = np.arange(1, nx + 1)
repeat_factor = stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'x')] = filter_bins
# Generate multi-index sub-column for y-axis
filter_bins = np.arange(1, ny + 1)
repeat_factor = nx * stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'y')] = filter_bins
# Generate multi-index sub-column for z-axis
filter_bins = np.arange(1, nz + 1)
repeat_factor = nx * ny * stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'z')] = filter_bins
# Initialize a Pandas DataFrame from the mesh dictionary
df = pd.concat([df, pd.DataFrame(filter_dict)])
return df
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = super().to_xml_element()
element[0].text = str(self.mesh.id)
return element
class MeshSurfaceFilter(MeshFilter):
"""Filter events by surface crossings on a regular, rectangular mesh.
Parameters
----------
mesh : openmc.Mesh
The Mesh object that events will be tallied onto
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Integral
The Mesh ID
mesh : openmc.Mesh
The Mesh object that events will be tallied onto
id : int
Unique identifier for the filter
bins : list of tuple
A list of mesh indices / surfaces for each filter bin, e.g. [(1, 1,
'x-min out'), (1, 1, 'x-min in'), ...]
num_bins : Integral
The number of filter bins
"""
@MeshFilter.mesh.setter
def mesh(self, mesh):
cv.check_type('filter mesh', mesh, openmc.Mesh)
self._mesh = mesh
# Take the product of mesh indices and current names
n_dim = len(mesh.dimension)
self.bins = [mesh_tuple + (surf,) for mesh_tuple, surf in
product(mesh.indices, _CURRENT_NAMES[:4*n_dim])]
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with three columns describing the x,y,z mesh
cell indices corresponding to each filter bin. The number of rows
in the DataFrame is the same as the total number of bins in the
corresponding tally, with the filter bin appropriately tiled to map
to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
# Initialize dictionary to build Pandas Multi-index column
filter_dict = {}
# Append Mesh ID as outermost index of multi-index
mesh_key = 'mesh {}'.format(self.mesh.id)
# Find mesh dimensions - use 3D indices for simplicity
n_surfs = 4 * len(self.mesh.dimension)
if len(self.mesh.dimension) == 3:
nx, ny, nz = self.mesh.dimension
elif len(self.mesh.dimension) == 2:
nx, ny = self.mesh.dimension
nz = 1
else:
nx = self.mesh.dimension
ny = nz = 1
# Generate multi-index sub-column for x-axis
filter_bins = np.arange(1, nx + 1)
repeat_factor = n_surfs * stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'x')] = filter_bins
# Generate multi-index sub-column for y-axis
if len(self.mesh.dimension) > 1:
filter_bins = np.arange(1, ny + 1)
repeat_factor = n_surfs * nx * stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'y')] = filter_bins
# Generate multi-index sub-column for z-axis
if len(self.mesh.dimension) > 2:
filter_bins = np.arange(1, nz + 1)
repeat_factor = n_surfs * nx * ny * stride
filter_bins = np.repeat(filter_bins, repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'z')] = filter_bins
# Generate multi-index sub-column for surface
repeat_factor = stride
filter_bins = np.repeat(_CURRENT_NAMES[:n_surfs], repeat_factor)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
filter_dict[(mesh_key, 'surf')] = filter_bins
# Initialize a Pandas DataFrame from the mesh dictionary
return pd.concat([df, pd.DataFrame(filter_dict)])
class RealFilter(Filter):
"""Tally modifier that describes phase-space and other characteristics
Parameters
----------
values : iterable of float
A list of values for which each successive pair constitutes a range of
values for a single bin
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
values for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of values indicating a
filter bin range
num_bins : int
The number of filter bins
"""
def __init__(self, values, filter_id=None):
self.values = np.asarray(values)
self.bins = np.vstack((self.values[:-1], self.values[1:])).T
self.id = filter_id
def __gt__(self, other):
if type(self) is type(other):
# Compare largest/smallest bin edges in filters
# This logic is used when merging tallies with real filters
return self.values[0] >= other.values[-1]
else:
return super().__gt__(other)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tValues', self.values)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@Filter.bins.setter
def bins(self, bins):
Filter.bins.__set__(self, np.asarray(bins))
def check_bins(self, bins):
for v0, v1 in bins:
# Values should be real
cv.check_type('filter value', v0, Real)
cv.check_type('filter value', v1, Real)
# Make sure that each tuple has values that are increasing
if v1 < v0:
raise ValueError('Values {} and {} appear to be out of order'
.format(v0, v1))
for pair0, pair1 in zip(bins[:-1], bins[1:]):
# Successive pairs should be ordered
if pair1[1] < pair0[1]:
raise ValueError('Values {} and {} appear to be out of order'
.format(pair1[1], pair0[1]))
def can_merge(self, other):
if type(self) is not type(other):
return False
if self.bins[0, 0] == other.bins[-1][1]:
# This low edge coincides with other's high edge
return True
elif self.bins[-1][1] == other.bins[0, 0]:
# This high edge coincides with other's low edge
return True
else:
return False
def merge(self, other):
if not self.can_merge(other):
msg = 'Unable to merge "{0}" with "{1}" ' \
'filters'.format(type(self), type(other))
raise ValueError(msg)
# Merge unique filter bins
merged_values = np.concatenate((self.values, other.values))
merged_values = np.unique(merged_values)
# Create a new filter with these bins and a new auto-generated ID
return type(self)(sorted(merged_values))
def is_subset(self, other):
"""Determine if another filter is a subset of this filter.
If all of the bins in the other filter are included as bins in this
filter, then it is a subset of this filter.
Parameters
----------
other : openmc.Filter
The filter to query as a subset of this filter
Returns
-------
bool
Whether or not the other filter is a subset of this filter
"""
if type(self) is not type(other):
return False
elif self.num_bins != other.num_bins:
return False
else:
return np.allclose(self.values, other.values)
def get_bin_index(self, filter_bin):
i = np.where(self.bins[:, 1] == filter_bin[1])[0]
if len(i) == 0:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
else:
return i[0]
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with one column of the lower energy bound and one
column of upper energy bound for each filter bin. The number of
rows in the DataFrame is the same as the total number of bins in the
corresponding tally, with the filter bin appropriately tiled to map
to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
# Extract the lower and upper energy bounds, then repeat and tile
# them as necessary to account for other filters.
lo_bins = np.repeat(self.bins[:, 0], stride)
hi_bins = np.repeat(self.bins[:, 1], stride)
tile_factor = data_size // len(lo_bins)
lo_bins = np.tile(lo_bins, tile_factor)
hi_bins = np.tile(hi_bins, tile_factor)
# Add the new energy columns to the DataFrame.
if hasattr(self, 'units'):
units = ' [{}]'.format(self.units)
else:
units = ''
df.loc[:, self.short_name.lower() + ' low' + units] = lo_bins
df.loc[:, self.short_name.lower() + ' high' + units] = hi_bins
return df
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = super().to_xml_element()
element[0].text = ' '.join(str(x) for x in self.values)
return element
class EnergyFilter(RealFilter):
"""Bins tally events based on incident particle energy.
Parameters
----------
values : Iterable of Real
A list of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of energies in [eV]
for a single filter bin
num_bins : int
The number of filter bins
"""
units = 'eV'
def get_bin_index(self, filter_bin):
# Use lower energy bound to find index for RealFilters
deltas = np.abs(self.bins[:, 1] - filter_bin[1]) / filter_bin[1]
min_delta = np.min(deltas)
if min_delta < 1E-3:
return deltas.argmin()
else:
msg = 'Unable to get the bin index for Filter since "{0}" ' \
'is not one of the bins'.format(filter_bin)
raise ValueError(msg)
def check_bins(self, bins):
super().check_bins(bins)
for v0, v1 in bins:
cv.check_greater_than('filter value', v0, 0., equality=True)
cv.check_greater_than('filter value', v1, 0., equality=True)
class EnergyoutFilter(EnergyFilter):
"""Bins tally events based on outgoing particle energy.
Parameters
----------
values : Iterable of Real
A list of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
energies in [eV] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of energies in [eV]
for a single filter bin
num_bins : int
The number of filter bins
"""
def _path_to_levels(path):
"""Convert distribcell path to list of levels
Parameters
----------
path : str
Distribcell path
Returns
-------
list
List of levels in path
"""
# Split path into universes/cells/lattices
path_items = path.split('->')
# Pair together universe and cell information from the same level
idx = [i for i, item in enumerate(path_items) if item.startswith('u')]
for i in reversed(idx):
univ_id = int(path_items.pop(i)[1:])
cell_id = int(path_items.pop(i)[1:])
path_items.insert(i, ('universe', univ_id, cell_id))
# Reformat lattice into tuple
idx = [i for i, item in enumerate(path_items) if isinstance(item, str)]
for i in idx:
item = path_items.pop(i)[1:-1]
lat_id, lat_xyz = item.split('(')
lat_id = int(lat_id)
lat_xyz = tuple(int(x) for x in lat_xyz.split(','))
path_items.insert(i, ('lattice', lat_id, lat_xyz))
return path_items
class DistribcellFilter(Filter):
"""Bins tally event locations on instances of repeated cells.
Parameters
----------
cell : openmc.Cell or Integral
The distributed cell to tally. Either an openmc.Cell or an Integral
cell ID number can be used.
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : Iterable of Integral
An iterable with one element---the ID of the distributed Cell.
id : int
Unique identifier for the filter
num_bins : int
The number of filter bins
paths : list of str
The paths traversed through the CSG tree to reach each distribcell
instance (for 'distribcell' filters only)
"""
def __init__(self, cell, filter_id=None):
self._paths = None
super().__init__(cell, filter_id)
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
out = cls(group['bins'][()], filter_id=filter_id)
out._num_bins = group['n_bins'][()]
return out
@property
def num_bins(self):
# Need to handle number of bins carefully -- for distribcell tallies, we
# need to know how many instances of the cell there are
return self._num_bins
@property
def paths(self):
return self._paths
@Filter.bins.setter
def bins(self, bins):
# Format the bins as a 1D numpy array.
bins = np.atleast_1d(bins)
# Make sure there is only 1 bin.
if not len(bins) == 1:
msg = 'Unable to add bins "{0}" to a DistribcellFilter since ' \
'only a single distribcell can be used per tally'.format(bins)
raise ValueError(msg)
# Check the type and extract the id, if necessary.
cv.check_type('distribcell bin', bins[0], (Integral, openmc.Cell))
if isinstance(bins[0], openmc.Cell):
bins = np.atleast_1d(bins[0].id)
self._bins = bins
@paths.setter
def paths(self, paths):
cv.check_iterable_type('paths', paths, str)
self._paths = paths
def can_merge(self, other):
# Distribcell filters cannot have more than one bin
return False
def get_bin_index(self, filter_bin):
# Filter bins for distribcells are indices of each unique placement of
# the Cell in the Geometry (consecutive integers starting at 0).
return filter_bin
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Keyword arguments
-----------------
paths : bool
If True (default), expand distribcell indices into multi-index
columns describing the path to that distribcell through the CSG
tree. NOTE: This option assumes that all distribcell paths are of
the same length and do not have the same universes and cells but
different lattice cell indices.
Returns
-------
pandas.DataFrame
A Pandas DataFrame with columns describing distributed cells. The
for will be either:
1. a single column with the cell instance IDs (without summary info)
2. separate columns for the cell IDs, universe IDs, and lattice IDs
and x,y,z cell indices corresponding to each (distribcell paths).
The number of rows in the DataFrame is the same as the total number
of bins in the corresponding tally, with the filter bin
appropriately tiled to map to the corresponding tally bins.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
# Initialize Pandas DataFrame
df = pd.DataFrame()
level_df = None
paths = kwargs.setdefault('paths', True)
# Create Pandas Multi-index columns for each level in CSG tree
if paths:
# Distribcell paths require linked metadata from the Summary
if self.paths is None:
msg = 'Unable to construct distribcell paths since ' \
'the Summary is not linked to the StatePoint'
raise ValueError(msg)
# Make copy of array of distribcell paths to use in
# Pandas Multi-index column construction
num_offsets = len(self.paths)
paths = [_path_to_levels(p) for p in self.paths]
# Loop over CSG levels in the distribcell paths
num_levels = len(paths[0])
for i_level in range(num_levels):
# Use level key as first index in Pandas Multi-index column
level_key = 'level {}'.format(i_level + 1)
# Create a dictionary for this level for Pandas Multi-index
level_dict = OrderedDict()
# Use the first distribcell path to determine if level
# is a universe/cell or lattice level
path = paths[0]
if path[i_level][0] == 'lattice':
# Initialize prefix Multi-index keys
lat_id_key = (level_key, 'lat', 'id')
lat_x_key = (level_key, 'lat', 'x')
lat_y_key = (level_key, 'lat', 'y')
lat_z_key = (level_key, 'lat', 'z')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[lat_id_key] = np.empty(num_offsets)
level_dict[lat_x_key] = np.empty(num_offsets)
level_dict[lat_y_key] = np.empty(num_offsets)
if len(path[i_level][2]) == 3:
level_dict[lat_z_key] = np.empty(num_offsets)
else:
# Initialize prefix Multi-index keys
univ_key = (level_key, 'univ', 'id')
cell_key = (level_key, 'cell', 'id')
# Allocate NumPy arrays for each CSG level and
# each Multi-index column in the DataFrame
level_dict[univ_key] = np.empty(num_offsets)
level_dict[cell_key] = np.empty(num_offsets)
# Populate Multi-index arrays with all distribcell paths
for i, path in enumerate(paths):
level = path[i_level]
if level[0] == 'lattice':
# Assign entry to Lattice Multi-index column
level_dict[lat_id_key][i] = level[1]
level_dict[lat_x_key][i] = level[2][0]
level_dict[lat_y_key][i] = level[2][1]
if len(level[2]) == 3:
level_dict[lat_z_key][i] = level[2][2]
else:
# Assign entry to Universe, Cell Multi-index columns
level_dict[univ_key][i] = level[1]
level_dict[cell_key][i] = level[2]
# Tile the Multi-index columns
for level_key, level_bins in level_dict.items():
level_bins = np.repeat(level_bins, stride)
tile_factor = data_size // len(level_bins)
level_bins = np.tile(level_bins, tile_factor)
level_dict[level_key] = level_bins
# Initialize a Pandas DataFrame from the level dictionary
if level_df is None:
level_df = pd.DataFrame(level_dict)
else:
level_df = pd.concat([level_df, pd.DataFrame(level_dict)],
axis=1)
# Create DataFrame column for distribcell instance IDs
# NOTE: This is performed regardless of whether the user
# requests Summary geometric information
filter_bins = np.arange(self.num_bins)
filter_bins = np.repeat(filter_bins, stride)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
df = pd.DataFrame({self.short_name.lower() : filter_bins})
# Concatenate with DataFrame of distribcell instance IDs
if level_df is not None:
level_df = level_df.dropna(axis=1, how='all')
level_df = level_df.astype(np.int)
df = pd.concat([level_df, df], axis=1)
return df
class MuFilter(RealFilter):
"""Bins tally events based on particle scattering angle.
Parameters
----------
values : int or Iterable of Real
A grid of scattering angles which events will binned into. Values
represent the cosine of the scattering angle. If an iterable is given,
the values will be used explicitly as grid points. If a single int is
given, the range [-1, 1] will be divided up equally into that number of
bins.
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
scattering angle cosines for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of scattering angle
cosines for a single filter bin
num_bins : Integral
The number of filter bins
"""
def __init__(self, values, filter_id=None):
if isinstance(values, Integral):
values = np.linspace(-1., 1., values + 1)
super().__init__(values, filter_id)
def check_bins(self, bins):
super().check_bins(bins)
for x in np.ravel(bins):
if not np.isclose(x, -1.):
cv.check_greater_than('filter value', x, -1., equality=True)
if not np.isclose(x, 1.):
cv.check_less_than('filter value', x, 1., equality=True)
class PolarFilter(RealFilter):
"""Bins tally events based on the incident particle's direction.
Parameters
----------
values : int or Iterable of Real
A grid of polar angles which events will binned into. Values represent
an angle in radians relative to the z-axis. If an iterable is given, the
values will be used explicitly as grid points. If a single int is given,
the range [0, pi] will be divided up equally into that number of bins.
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
polar angles in [rad] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of polar angles for a
single filter bin
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
units = 'rad'
def __init__(self, values, filter_id=None):
if isinstance(values, Integral):
values = np.linspace(0., np.pi, values + 1)
super().__init__(values, filter_id)
def check_bins(self, bins):
super().check_bins(bins)
for x in np.ravel(bins):
if not np.isclose(x, 0.):
cv.check_greater_than('filter value', x, 0., equality=True)
if not np.isclose(x, np.pi):
cv.check_less_than('filter value', x, np.pi, equality=True)
class AzimuthalFilter(RealFilter):
"""Bins tally events based on the incident particle's direction.
Parameters
----------
values : int or Iterable of Real
A grid of azimuthal angles which events will binned into. Values
represent an angle in radians relative to the x-axis and perpendicular
to the z-axis. If an iterable is given, the values will be used
explicitly as grid points. If a single int is given, the range
[-pi, pi) will be divided up equally into that number of bins.
filter_id : int
Unique identifier for the filter
Attributes
----------
values : numpy.ndarray
An array of values for which each successive pair constitutes a range of
azimuthal angles in [rad] for a single bin
id : int
Unique identifier for the filter
bins : numpy.ndarray
An array of shape (N, 2) where each row is a pair of azimuthal angles
for a single filter bin
num_bins : Integral
The number of filter bins
"""
units = 'rad'
def __init__(self, values, filter_id=None):
if isinstance(values, Integral):
values = np.linspace(-np.pi, np.pi, values + 1)
super().__init__(values, filter_id)
def check_bins(self, bins):
super().check_bins(bins)
for x in np.ravel(bins):
if not np.isclose(x, -np.pi):
cv.check_greater_than('filter value', x, -np.pi, equality=True)
if not np.isclose(x, np.pi):
cv.check_less_than('filter value', x, np.pi, equality=True)
class DelayedGroupFilter(Filter):
"""Bins fission events based on the produced neutron precursor groups.
Parameters
----------
bins : iterable of int
The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses
6 precursor groups so a tally with all groups will have bins =
[1, 2, 3, 4, 5, 6].
filter_id : int
Unique identifier for the filter
Attributes
----------
bins : iterable of int
The delayed neutron precursor groups. For example, ENDF/B-VII.1 uses
6 precursor groups so a tally with all groups will have bins =
[1, 2, 3, 4, 5, 6].
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins
"""
def check_bins(self, bins):
# Check the bin values.
for g in bins:
cv.check_greater_than('delayed group', g, 0)
class EnergyFunctionFilter(Filter):
"""Multiplies tally scores by an arbitrary function of incident energy.
The arbitrary function is described by a piecewise linear-linear
interpolation of energy and y values. Values outside of the given energy
range will be evaluated as zero.
Parameters
----------
energy : Iterable of Real
A grid of energy values in [eV]
y : iterable of Real
A grid of interpolant values in [eV]
filter_id : int
Unique identifier for the filter
Attributes
----------
energy : Iterable of Real
A grid of energy values in [eV]
y : iterable of Real
A grid of interpolant values in [eV]
id : int
Unique identifier for the filter
num_bins : Integral
The number of filter bins (always 1 for this filter)
"""
def __init__(self, energy, y, filter_id=None):
self.energy = energy
self.y = y
self.id = filter_id
def __eq__(self, other):
if type(self) is not type(other):
return False
elif not all(self.energy == other.energy):
return False
elif not all(self.y == other.y):
return False
else:
return True
def __gt__(self, other):
if type(self) is not type(other):
if self.short_name in _FILTER_TYPES and \
other.short_name in _FILTER_TYPES:
delta = _FILTER_TYPES.index(self.short_name) - \
_FILTER_TYPES.index(other.short_name)
return delta > 0
else:
return False
else:
return False
def __lt__(self, other):
if type(self) is not type(other):
if self.short_name in _FILTER_TYPES and \
other.short_name in _FILTER_TYPES:
delta = _FILTER_TYPES.index(self.short_name) - \
_FILTER_TYPES.index(other.short_name)
return delta < 0
else:
return False
else:
return False
def __hash__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy)
string += '{: <16}=\t{}\n'.format('\tInterpolant', self.y)
return hash(string)
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tEnergy', self.energy)
string += '{: <16}=\t{}\n'.format('\tInterpolant', self.y)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
return string
@classmethod
def from_hdf5(cls, group, **kwargs):
if group['type'][()].decode() != cls.short_name.lower():
raise ValueError("Expected HDF5 data for filter type '"
+ cls.short_name.lower() + "' but got '"
+ group['type'][()].decode() + " instead")
energy = group['energy'][()]
y = group['y'][()]
filter_id = int(group.name.split('/')[-1].lstrip('filter '))
return cls(energy, y, filter_id=filter_id)
@classmethod
def from_tabulated1d(cls, tab1d):
"""Construct a filter from a Tabulated1D object.
Parameters
----------
tab1d : openmc.data.Tabulated1D
A linear-linear Tabulated1D object with only a single interpolation
region.
Returns
-------
EnergyFunctionFilter
"""
cv.check_type('EnergyFunctionFilter tab1d', tab1d,
openmc.data.Tabulated1D)
if tab1d.n_regions > 1:
raise ValueError('Only Tabulated1Ds with a single interpolation '
'region are supported')
if tab1d.interpolation[0] != 2:
raise ValueError('Only linear-linar Tabulated1Ds are supported')
return cls(tab1d.x, tab1d.y)
@property
def energy(self):
return self._energy
@property
def y(self):
return self._y
@property
def bins(self):
raise AttributeError('EnergyFunctionFilters have no bins.')
@property
def num_bins(self):
return 1
@energy.setter
def energy(self, energy):
# Format the bins as a 1D numpy array.
energy = np.atleast_1d(energy)
# Make sure the values are Real and positive.
cv.check_type('filter energy grid', energy, Iterable, Real)
for E in energy:
cv.check_greater_than('filter energy grid', E, 0, equality=True)
self._energy = energy
@y.setter
def y(self, y):
# Format the bins as a 1D numpy array.
y = np.atleast_1d(y)
# Make sure the values are Real.
cv.check_type('filter interpolant values', y, Iterable, Real)
self._y = y
@bins.setter
def bins(self, bins):
raise RuntimeError('EnergyFunctionFilters have no bins.')
def to_xml_element(self):
"""Return XML Element representing the Filter.
Returns
-------
element : xml.etree.ElementTree.Element
XML element containing filter data
"""
element = ET.Element('filter')
element.set('id', str(self.id))
element.set('type', self.short_name.lower())
subelement = ET.SubElement(element, 'energy')
subelement.text = ' '.join(str(e) for e in self.energy)
subelement = ET.SubElement(element, 'y')
subelement.text = ' '.join(str(y) for y in self.y)
return element
def can_merge(self, other):
return False
def is_subset(self, other):
return self == other
def get_bin_index(self, filter_bin):
# This filter only has one bin. Always return 0.
return 0
def get_pandas_dataframe(self, data_size, stride, **kwargs):
"""Builds a Pandas DataFrame for the Filter's bins.
This method constructs a Pandas DataFrame object for the filter with
columns annotated by filter bin information. This is a helper method for
:meth:`Tally.get_pandas_dataframe`.
Parameters
----------
data_size : int
The total number of bins in the tally corresponding to this filter
stride : int
Stride in memory for the filter
Returns
-------
pandas.DataFrame
A Pandas DataFrame with a column that is filled with a hash of this
filter. EnergyFunctionFilters have only 1 bin so the purpose of this
DataFrame column is to differentiate the filter from other
EnergyFunctionFilters. The number of rows in the DataFrame is the
same as the total number of bins in the corresponding tally.
See also
--------
Tally.get_pandas_dataframe(), CrossFilter.get_pandas_dataframe()
"""
df = pd.DataFrame()
# There is no clean way of sticking all the energy, y data into a
# DataFrame so instead we'll just make a column with the filter name
# and fill it with a hash of the __repr__. We want a hash that is
# reproducible after restarting the interpreter so we'll use hashlib.md5
# rather than the intrinsic hash().
hash_fun = hashlib.md5()
hash_fun.update(repr(self).encode('utf-8'))
out = hash_fun.hexdigest()
# The full 16 bytes make for a really wide column. Just 7 bytes (14
# hex characters) of the digest are probably sufficient.
out = out[:14]
filter_bins = np.repeat(out, stride)
tile_factor = data_size // len(filter_bins)
filter_bins = np.tile(filter_bins, tile_factor)
df = pd.concat([df, pd.DataFrame(
{self.short_name.lower(): filter_bins})])
return df
|
mit
|
piskvorky/gensim
|
gensim/sklearn_api/phrases.py
|
2
|
8727
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Radim Rehurek <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for `gensim.models.phrases.Phrases`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
.. sourcecode:: pycon
>>> from gensim.sklearn_api.phrases import PhrasesTransformer
>>>
>>> # Create the model. Make sure no term is ignored and combinations seen 3+ times are captured.
>>> m = PhrasesTransformer(min_count=1, threshold=3)
>>> texts = [
... ['I', 'love', 'computer', 'science'],
... ['computer', 'science', 'is', 'my', 'passion'],
... ['I', 'studied', 'computer', 'science']
... ]
>>>
>>> # Use sklearn fit_transform to see the transformation.
>>> # Since computer and science were seen together 3+ times they are considered a phrase.
>>> assert ['I', 'love', 'computer_science'] == m.fit_transform(texts)[0]
"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim.models.phrases import FrozenPhrases, ENGLISH_CONNECTOR_WORDS # noqa:F401
class PhrasesTransformer(TransformerMixin, BaseEstimator):
"""Base Phrases module, wraps :class:`~gensim.models.phrases.Phrases`.
For more information, please have a look to `Mikolov, et. al: "Distributed Representations
of Words and Phrases and their Compositionality" <https://arxiv.org/abs/1310.4546>`_ and
`Gerlof Bouma: "Normalized (Pointwise) Mutual Information in Collocation Extraction"
<https://svn.spraakdata.gu.se/repos/gerlof/pub/www/Docs/npmi-pfd.pdf>`_.
"""
def __init__(
self, min_count=5, threshold=10.0, max_vocab_size=40000000,
delimiter='_', progress_per=10000, scoring='default', connector_words=frozenset(),
):
"""
Parameters
----------
min_count : int, optional
Terms with a count lower than this will be ignored
threshold : float, optional
Only phrases scoring above this will be accepted, see `scoring` below.
max_vocab_size : int, optional
Maximum size of the vocabulary. Used to control pruning of less common words, to keep memory under control.
The default of 40M needs about 3.6GB of RAM.
delimiter : str, optional
Character used to join collocation tokens (e.g. '_').
progress_per : int, optional
Training will report to the logger every that many phrases are learned.
scoring : str or function, optional
Specifies how potential phrases are scored for comparison to the `threshold`
setting. `scoring` can be set with either a string that refers to a built-in scoring function,
or with a function with the expected parameter names. Two built-in scoring functions are available
by setting `scoring` to a string:
* 'default': `Mikolov, et. al: "Distributed Representations of Words and Phrases
and their Compositionality" <https://arxiv.org/abs/1310.4546>`_.
* 'npmi': Explained in `Gerlof Bouma: "Normalized (Pointwise) Mutual Information in Collocation
Extraction" <https://svn.spraakdata.gu.se/repos/gerlof/pub/www/Docs/npmi-pfd.pdf>`_.
'npmi' is more robust when dealing with common words that form part of common bigrams, and
ranges from -1 to 1, but is slower to calculate than the default.
To use a custom scoring function, create a function with the following parameters and set the `scoring`
parameter to the custom function, see :func:`~gensim.models.phrases.original_scorer` as example.
You must define all the parameters (but can use only part of it):
* worda_count: number of occurrences in `sentences` of the first token in the phrase being scored
* wordb_count: number of occurrences in `sentences` of the second token in the phrase being scored
* bigram_count: number of occurrences in `sentences` of the phrase being scored
* len_vocab: the number of unique tokens in `sentences`
* min_count: the `min_count` setting of the Phrases class
* corpus_word_count: the total number of (non-unique) tokens in `sentences`
A scoring function without any of these parameters (even if the parameters are not used) will
raise a ValueError on initialization of the Phrases class. The scoring function must be pickleable.
connector_words : set of str, optional
Set of words that may be included within a phrase, without affecting its scoring.
No phrase can start nor end with a connector word; a phrase may contain any number of
connector words in the middle.
**If your texts are in English, set ``connector_words=phrases.ENGLISH_CONNECTOR_WORDS``.**
This will cause phrases to include common English articles, prepositions and
conjuctions, such as `bank_of_america` or `eye_of_the_beholder`.
For other languages or specific applications domains, use custom ``connector_words``
that make sense there: ``connector_words=frozenset("der die das".split())`` etc.
"""
self.gensim_model = None
self.phraser = None
self.min_count = min_count
self.threshold = threshold
self.max_vocab_size = max_vocab_size
self.delimiter = delimiter
self.progress_per = progress_per
self.scoring = scoring
self.connector_words = connector_words
def __setstate__(self, state):
self.__dict__ = state
self.connector_words = frozenset()
self.phraser = None
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : iterable of list of str
Sequence of sentences to be used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.phrases.PhrasesTransformer`
The trained model.
"""
self.gensim_model = models.Phrases(
sentences=X, min_count=self.min_count, threshold=self.threshold,
max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, scoring=self.scoring, connector_words=self.connector_words,
)
self.phraser = FrozenPhrases(self.gensim_model)
return self
def transform(self, docs):
"""Transform the input documents into phrase tokens.
Words in the sentence will be joined by `self.delimiter`.
Parameters
----------
docs : {iterable of list of str, list of str}
Sequence of documents to be used transformed.
Returns
-------
iterable of str
Phrase representation for each of the input sentences.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
if self.phraser is None:
self.phraser = FrozenPhrases(self.gensim_model)
# input as python lists
if isinstance(docs[0], str):
docs = [docs]
return [self.phraser[doc] for doc in docs]
def partial_fit(self, X):
"""Train model over a potentially incomplete set of sentences.
This method can be used in two ways:
1. On an unfitted model in which case the model is initialized and trained on `X`.
2. On an already fitted model in which case the X sentences are **added** to the vocabulary.
Parameters
----------
X : iterable of list of str
Sequence of sentences to be used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.phrases.PhrasesTransformer`
The trained model.
"""
if self.gensim_model is None:
self.gensim_model = models.Phrases(
sentences=X, min_count=self.min_count, threshold=self.threshold,
max_vocab_size=self.max_vocab_size, delimiter=self.delimiter,
progress_per=self.progress_per, scoring=self.scoring, connector_words=self.connector_words,
)
self.gensim_model.add_vocab(X)
self.phraser = FrozenPhrases(self.gensim_model)
return self
|
lgpl-2.1
|
stylianos-kampakis/scikit-learn
|
examples/calibration/plot_calibration_multiclass.py
|
272
|
6972
|
"""
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <[email protected]>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Ploit modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
|
bsd-3-clause
|
sabi0/intellij-community
|
python/helpers/pydev/pydev_ipython/qt_for_kernel.py
|
22
|
3574
|
""" Import Qt in a manner suitable for an IPython kernel.
This is the import used for the `gui=qt` or `matplotlib=qt` initialization.
Import Priority:
if Qt4 has been imported anywhere else:
use that
if matplotlib has been imported and doesn't support v2 (<= 1.0.1):
use PyQt4 @v1
Next, ask ETS' QT_API env variable
if QT_API not set:
ask matplotlib via rcParams['backend.qt4']
if it said PyQt:
use PyQt4 @v1
elif it said PySide:
use PySide
else: (matplotlib said nothing)
# this is the default path - nobody told us anything
try:
PyQt @v1
except:
fallback on PySide
else:
use PyQt @v2 or PySide, depending on QT_API
because ETS doesn't work with PyQt @v1.
"""
import os
import sys
from pydev_ipython.version import check_version
from pydev_ipython.qt_loaders import (load_qt, QT_API_PYSIDE,
QT_API_PYQT, QT_API_PYQT_DEFAULT,
loaded_api, QT_API_PYQT5)
#Constraints placed on an imported matplotlib
def matplotlib_options(mpl):
if mpl is None:
return
# #PyDev-779: In pysrc/pydev_ipython/qt_for_kernel.py, matplotlib_options should be replaced with latest from ipython
# (i.e.: properly check backend to decide upon qt4/qt5).
backend = mpl.rcParams.get('backend', None)
if backend == 'Qt4Agg':
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt4v2':
return [QT_API_PYQT]
raise ImportError("unhandled value for backend.qt4 from matplotlib: %r" %
mpqt)
elif backend == 'Qt5Agg':
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for backend.qt5 from matplotlib: %r" %
mpqt)
# Fallback without checking backend (previous code)
mpqt = mpl.rcParams.get('backend.qt4', None)
if mpqt is None:
mpqt = mpl.rcParams.get('backend.qt5', None)
if mpqt is None:
return None
if mpqt.lower() == 'pyside':
return [QT_API_PYSIDE]
elif mpqt.lower() == 'pyqt4':
return [QT_API_PYQT_DEFAULT]
elif mpqt.lower() == 'pyqt5':
return [QT_API_PYQT5]
raise ImportError("unhandled value for qt backend from matplotlib: %r" %
mpqt)
def get_options():
"""Return a list of acceptable QT APIs, in decreasing order of
preference
"""
#already imported Qt somewhere. Use that
loaded = loaded_api()
if loaded is not None:
return [loaded]
mpl = sys.modules.get('matplotlib', None)
if mpl is not None and not check_version(mpl.__version__, '1.0.2'):
#1.0.1 only supports PyQt4 v1
return [QT_API_PYQT_DEFAULT]
if os.environ.get('QT_API', None) is None:
#no ETS variable. Ask mpl, then use either
return matplotlib_options(mpl) or [QT_API_PYQT_DEFAULT, QT_API_PYSIDE, QT_API_PYQT5]
#ETS variable present. Will fallback to external.qt
return None
api_opts = get_options()
if api_opts is not None:
QtCore, QtGui, QtSvg, QT_API = load_qt(api_opts)
else: # use ETS variable
from pydev_ipython.qt import QtCore, QtGui, QtSvg, QT_API
|
apache-2.0
|
frrp/trading-with-python
|
lib/functions.py
|
76
|
11627
|
# -*- coding: utf-8 -*-
"""
twp support functions
@author: Jev Kuznetsov
Licence: GPL v2
"""
from scipy import polyfit, polyval
import datetime as dt
#from datetime import datetime, date
from pandas import DataFrame, Index, Series
import csv
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def nans(shape, dtype=float):
''' create a nan numpy array '''
a = np.empty(shape, dtype)
a.fill(np.nan)
return a
def plotCorrelationMatrix(price, thresh = None):
''' plot a correlation matrix as a heatmap image
inputs:
price: prices DataFrame
thresh: correlation threshold to use for checking, default None
'''
symbols = price.columns.tolist()
R = price.pct_change()
correlationMatrix = R.corr()
if thresh is not None:
correlationMatrix = correlationMatrix > thresh
plt.imshow(abs(correlationMatrix.values),interpolation='none')
plt.xticks(range(len(symbols)),symbols)
plt.yticks(range(len(symbols)),symbols)
plt.colorbar()
plt.title('Correlation matrix')
return correlationMatrix
def pca(A):
""" performs principal components analysis
(PCA) on the n-by-p DataFrame A
Rows of A correspond to observations, columns to variables.
Returns :
coeff : principal components, column-wise
transform: A in principal component space
latent : eigenvalues
"""
# computing eigenvalues and eigenvectors of covariance matrix
M = (A - A.mean()).T # subtract the mean (along columns)
[latent,coeff] = np.linalg.eig(np.cov(M)) # attention:not always sorted
idx = np.argsort(latent) # sort eigenvalues
idx = idx[::-1] # in ascending order
coeff = coeff[:,idx]
latent = latent[idx]
score = np.dot(coeff.T,A.T) # projection of the data in the new space
transform = DataFrame(index = A.index, data = score.T)
return coeff,transform,latent
def pos2pnl(price,position , ibTransactionCost=False ):
"""
calculate pnl based on price and position
Inputs:
---------
price: series or dataframe of price
position: number of shares at each time. Column names must be same as in price
ibTransactionCost: use bundled Interactive Brokers transaction cost of 0.005$/share
Returns a portfolio DataFrame
"""
delta=position.diff()
port = DataFrame(index=price.index)
if isinstance(price,Series): # no need to sum along 1 for series
port['cash'] = (-delta*price).cumsum()
port['stock'] = (position*price)
else: # dealing with DataFrame here
port['cash'] = (-delta*price).sum(axis=1).cumsum()
port['stock'] = (position*price).sum(axis=1)
if ibTransactionCost:
tc = -0.005*position.diff().abs() # basic transaction cost
tc[(tc>-1) & (tc<0)] = -1 # everything under 1$ will be ceil'd to 1$
if isinstance(price,DataFrame):
tc = tc.sum(axis=1)
port['tc'] = tc.cumsum()
else:
port['tc'] = 0.
port['total'] = port['stock']+port['cash']+port['tc']
return port
def tradeBracket(price,entryBar,maxTradeLength,bracket):
'''
trade a symmetrical bracket on price series, return price delta and exit bar #
Input
------
price : series of price values
entryBar: entry bar number
maxTradeLength : max trade duration in bars
bracket : allowed price deviation
'''
lastBar = min(entryBar+maxTradeLength,len(price)-1)
p = price[entryBar:lastBar]-price[entryBar]
idxOutOfBound = np.nonzero(abs(p)>bracket) # find indices where price comes out of bracket
if idxOutOfBound[0].any(): # found match
priceDelta = p[idxOutOfBound[0][0]]
exitBar = idxOutOfBound[0][0]+entryBar
else: # all in bracket, exiting based on time
priceDelta = p[-1]
exitBar = lastBar
return priceDelta, exitBar
def estimateBeta(priceY,priceX,algo = 'standard'):
'''
estimate stock Y vs stock X beta using iterative linear
regression. Outliers outside 3 sigma boundary are filtered out
Parameters
--------
priceX : price series of x (usually market)
priceY : price series of y (estimate beta of this price)
Returns
--------
beta : stockY beta relative to stock X
'''
X = DataFrame({'x':priceX,'y':priceY})
if algo=='returns':
ret = (X/X.shift(1)-1).dropna().values
#print len(ret)
x = ret[:,0]
y = ret[:,1]
# filter high values
low = np.percentile(x,20)
high = np.percentile(x,80)
iValid = (x>low) & (x<high)
x = x[iValid]
y = y[iValid]
iteration = 1
nrOutliers = 1
while iteration < 10 and nrOutliers > 0 :
(a,b) = polyfit(x,y,1)
yf = polyval([a,b],x)
#plot(x,y,'x',x,yf,'r-')
err = yf-y
idxOutlier = abs(err) > 3*np.std(err)
nrOutliers =sum(idxOutlier)
beta = a
#print 'Iteration: %i beta: %.2f outliers: %i' % (iteration,beta, nrOutliers)
x = x[~idxOutlier]
y = y[~idxOutlier]
iteration += 1
elif algo=='log':
x = np.log(X['x'])
y = np.log(X['y'])
(a,b) = polyfit(x,y,1)
beta = a
elif algo=='standard':
ret =np.log(X).diff().dropna()
beta = ret['x'].cov(ret['y'])/ret['x'].var()
else:
raise TypeError("unknown algorithm type, use 'standard', 'log' or 'returns'")
return beta
def estimateVolatility(ohlc, N=10, algo='YangZhang'):
"""
Volatility estimation
Possible algorithms: ['YangZhang', 'CC']
"""
cc = np.log(ohlc.close/ohlc.close.shift(1))
if algo == 'YangZhang': # Yang-zhang volatility
ho = np.log(ohlc.high/ohlc.open)
lo = np.log(ohlc.low/ohlc.open)
co = np.log(ohlc.close/ohlc.open)
oc = np.log(ohlc.open/ohlc.close.shift(1))
oc_sq = oc**2
cc_sq = cc**2
rs = ho*(ho-co)+lo*(lo-co)
close_vol = pd.rolling_sum(cc_sq, window=N) * (1.0 / (N - 1.0))
open_vol = pd.rolling_sum(oc_sq, window=N) * (1.0 / (N - 1.0))
window_rs = pd.rolling_sum(rs, window=N) * (1.0 / (N - 1.0))
result = (open_vol + 0.164333 * close_vol + 0.835667 * window_rs).apply(np.sqrt) * np.sqrt(252)
result[:N-1] = np.nan
elif algo == 'CC': # standard close-close estimator
result = np.sqrt(252)*np.sqrt(((pd.rolling_sum(cc**2,N))/N))
else:
raise ValueError('Unknown algo type.')
return result*100
def rank(current,past):
''' calculate a relative rank 0..1 for a value against series '''
return (current>past).sum()/float(past.count())
def returns(df):
return (df/df.shift(1)-1)
def logReturns(df):
t = np.log(df)
return t-t.shift(1)
def dateTimeToDate(idx):
''' convert datetime index to date '''
dates = []
for dtm in idx:
dates.append(dtm.date())
return dates
def readBiggerScreener(fName):
''' import data from Bigger Capital screener '''
with open(fName,'rb') as f:
reader = csv.reader(f)
rows = [row for row in reader]
header = rows[0]
data = [[] for i in range(len(header))]
for row in rows[1:]:
for i,elm in enumerate(row):
try:
data[i].append(float(elm))
except Exception:
data[i].append(str(elm))
return DataFrame(dict(zip(header,data)),index=Index(range(len(data[0]))))[header]
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
def drawdown(s):
"""
calculate max drawdown and duration
Input:
s, price or cumulative pnl curve $
Returns:
drawdown : vector of drawdwon values
duration : vector of drawdown duration
"""
# convert to array if got pandas series, 10x speedup
if isinstance(s,pd.Series):
idx = s.index
s = s.values
returnSeries = True
else:
returnSeries = False
if s.min() < 0: # offset if signal minimum is less than zero
s = s-s.min()
highwatermark = np.zeros(len(s))
drawdown = np.zeros(len(s))
drawdowndur = np.zeros(len(s))
for t in range(1,len(s)):
highwatermark[t] = max(highwatermark[t-1], s[t])
drawdown[t] = (highwatermark[t]-s[t])
drawdowndur[t]= (0 if drawdown[t] == 0 else drawdowndur[t-1]+1)
if returnSeries:
return pd.Series(index=idx,data=drawdown), pd.Series(index=idx,data=drawdowndur)
else:
return drawdown , drawdowndur
def profitRatio(pnl):
'''
calculate profit ratio as sum(pnl)/drawdown
Input: pnl - daily pnl, Series or DataFrame
'''
def processVector(pnl): # process a single column
s = pnl.fillna(0)
dd = drawdown(s)[0]
p = s.sum()/dd.max()
return p
if isinstance(pnl,Series):
return processVector(pnl)
elif isinstance(pnl,DataFrame):
p = Series(index = pnl.columns)
for col in pnl.columns:
p[col] = processVector(pnl[col])
return p
else:
raise TypeError("Input must be DataFrame or Series, not "+str(type(pnl)))
def candlestick(df,width=0.5, colorup='b', colordown='r'):
''' plot a candlestick chart of a dataframe '''
O = df['open'].values
H = df['high'].values
L = df['low'].values
C = df['close'].values
fig = plt.gcf()
ax = plt.axes()
#ax.hold(True)
X = df.index
#plot high and low
ax.bar(X,height=H-L,bottom=L,width=0.1,color='k')
idxUp = C>O
ax.bar(X[idxUp],height=(C-O)[idxUp],bottom=O[idxUp],width=width,color=colorup)
idxDown = C<=O
ax.bar(X[idxDown],height=(O-C)[idxDown],bottom=C[idxDown],width=width,color=colordown)
try:
fig.autofmt_xdate()
except Exception: # pragma: no cover
pass
ax.grid(True)
#ax.bar(x,height=H-L,bottom=L,width=0.01,color='k')
def datetime2matlab(t):
''' convert datetime timestamp to matlab numeric timestamp '''
mdn = t + dt.timedelta(days = 366)
frac = (t-dt.datetime(t.year,t.month,t.day,0,0,0)).seconds / (24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def getDataSources(fName = None):
''' return data sources directories for this machine.
directories are defined in datasources.ini or provided filepath'''
import socket
from ConfigParser import ConfigParser
pcName = socket.gethostname()
p = ConfigParser()
p.optionxform = str
if fName is None:
fName = 'datasources.ini'
p.read(fName)
if pcName not in p.sections():
raise NameError('Host name section %s not found in file %s' %(pcName,fName))
dataSources = {}
for option in p.options(pcName):
dataSources[option] = p.get(pcName,option)
return dataSources
if __name__ == '__main__':
df = DataFrame({'open':[1,2,3],'high':[5,6,7],'low':[-2,-1,0],'close':[2,1,4]})
plt.clf()
candlestick(df)
|
bsd-3-clause
|
weaver-viii/h2o-3
|
h2o-py/tests/testdir_golden/pyunit_svd_1_golden.py
|
3
|
2183
|
import sys
sys.path.insert(1, "../../")
import h2o
def svd_1_golden(ip, port):
print "Importing USArrests.csv data..."
arrestsH2O = h2o.upload_file(h2o.locate("smalldata/pca_test/USArrests.csv"))
print "Compare with SVD"
fitH2O = h2o.svd(x=arrestsH2O[0:4], nv=4, transform="NONE", max_iterations=2000)
print "Compare singular values (D)"
h2o_d = fitH2O._model_json['output']['d']
r_d = [1419.06139509772, 194.825846110138, 45.6613376308754, 18.0695566224677]
print "R Singular Values: {0}".format(r_d)
print "H2O Singular Values: {0}".format(h2o_d)
for r, h in zip(r_d, h2o_d): assert abs(r - h) < 1e-6, "H2O got {0}, but R got {1}".format(h, r)
print "Compare right singular vectors (V)"
h2o_v = fitH2O._model_json['output']['v']
r_v = [[-0.04239181, 0.01616262, -0.06588426, 0.99679535],
[-0.94395706, 0.32068580, 0.06655170, -0.04094568],
[-0.30842767, -0.93845891, 0.15496743, 0.01234261],
[-0.10963744, -0.12725666, -0.98347101, -0.06760284]]
print "R Right Singular Vectors: {0}".format(r_v)
print "H2O Right Singular Vectors: {0}".format(h2o_v)
for rl, hl in zip(r_v, h2o_v):
for r, h in zip(rl, hl): assert abs(abs(r) - abs(h)) < 1e-5, "H2O got {0}, but R got {1}".format(h, r)
print "Compare left singular vectors (U)"
h2o_u = h2o.as_list(h2o.get_frame(fitH2O._model_json['output']['u_key']['name']), use_pandas=False)
h2o_u.pop(0)
r_u = [[-0.1716251, 0.096325710, 0.06515480, 0.15369551],
[-0.1891166, 0.173452566, -0.42665785, -0.17801438],
[-0.2155930, 0.078998111, 0.02063740, -0.28070784],
[-0.1390244, 0.059889811, 0.01392269, 0.01610418],
[-0.2067788, -0.009812026, -0.17633244, -0.21867425],
[-0.1558794, -0.064555293, -0.28288280, -0.11797419]]
print "R Left Singular Vectors: {0}".format(r_u)
print "H2O Left Singular Vectors: {0}".format(h2o_u)
for rl, hl in zip(r_u, h2o_u):
for r, h in zip(rl, hl): assert abs(abs(r) - abs(float(h))) < 1e-5, "H2O got {0}, but R got {1}".format(h, r)
if __name__ == "__main__":
h2o.run_test(sys.argv, svd_1_golden)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.