repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mlyundin/scikit-learn | examples/linear_model/plot_sgd_penalties.py | 249 | 1563 | """
==============
SGD: Penalties
==============
Plot the contours of the three penalties.
All of the above are supported by
:class:`sklearn.linear_model.stochastic_gradient`.
"""
from __future__ import division
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
def l1(xs):
return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs])
def l2(xs):
return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs])
def el(xs, z):
return np.array([(2 - 2 * x - 2 * z + 4 * x * z -
(4 * z ** 2
- 8 * x * z ** 2
+ 8 * x ** 2 * z ** 2
- 16 * x ** 2 * z ** 3
+ 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2)
- 2 * x * z ** 2) / (2 - 4 * z) for x in xs])
def cross(ext):
plt.plot([-ext, ext], [0, 0], "k-")
plt.plot([0, 0], [-ext, ext], "k-")
xs = np.linspace(0, 1, 100)
alpha = 0.501 # 0.5 division throuh zero
cross(1.2)
plt.plot(xs, l1(xs), "r-", label="L1")
plt.plot(xs, -1.0 * l1(xs), "r-")
plt.plot(-1 * xs, l1(xs), "r-")
plt.plot(-1 * xs, -1.0 * l1(xs), "r-")
plt.plot(xs, l2(xs), "b-", label="L2")
plt.plot(xs, -1.0 * l2(xs), "b-")
plt.plot(-1 * xs, l2(xs), "b-")
plt.plot(-1 * xs, -1.0 * l2(xs), "b-")
plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net")
plt.plot(xs, -1.0 * el(xs, alpha), "y-")
plt.plot(-1 * xs, el(xs, alpha), "y-")
plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-")
plt.xlabel(r"$w_0$")
plt.ylabel(r"$w_1$")
plt.legend()
plt.axis("equal")
plt.show()
| bsd-3-clause |
DiracInstitute/kbmod | analysis/filter_utils.py | 1 | 11539 | import os
import sys
import shutil
import numpy as np
import pandas as pd
import multiprocessing as mp
from kbmodpy import kbmod as kb
from astropy.io import fits
from astropy.wcs import WCS
from skimage import measure
from sklearn.cluster import DBSCAN
def file_and_header_length(filename):
i = 0
header_length = 0
with open(filename, 'r') as f:
for line in f:
i+=1
if line.startswith('#'):
header_length += 1
return i, header_length
def load_chunk(filename, chunk_start, chunk_size):
results_arr = np.genfromtxt(filename, names=['t0_x', 't0_y',
'v_x', 'v_y',
'likelihood', 'est_flux'],
skip_header=chunk_start,
max_rows=chunk_size)
return results_arr
def calcCenters(startLocArr, velArr, timeArr):
startLocArr = np.array([startLocArr['t0_x'], startLocArr['t0_y']])
velArr = np.array([velArr['v_x'], velArr['v_y']])
centerArr = []
for time in timeArr:
centerArr.append(startLocArr + (velArr*time))
return np.array(centerArr)
def createPostageStamp(imageArray, objectStartArr, velArr,
timeArr, stamp_width):
"""
Create postage stamp image coadds of potential objects traveling along
a trajectory.
Parameters
----------
imageArray: numpy array, required
The masked input images.
objectStartArr: numpy array, required
An array with the starting location of the object in pixels.
velArr: numpy array, required
The x,y velocity in pixels/hr. of the object trajectory.
timeArr: numpy array, required
The time in hours of each image starting from 0 at the first image.
stamp_width: numpy array or list, [2], required
The row, column dimensions of the desired output image.
Returns
-------
stampImage: numpy array
The coadded postage stamp.
singleImagesArray: numpy array
The postage stamps that were added together to create the coadd.
"""
singleImagesArray = []
stampWidth = np.array(stamp_width, dtype=int)
#print stampWidth
stampImage = np.zeros(stampWidth)
#if len(np.shape(imageArray)) < 3:
# imageArray = [imageArray]
measureCoords = calcCenters(np.array(objectStartArr), np.array(velArr), timeArr)
if len(np.shape(measureCoords)) < 2:
measureCoords = [measureCoords]
off_edge = []
for centerCoords in measureCoords:
if (centerCoords[0] + stampWidth[0]/2 + 1) > np.shape(imageArray[0].science())[1]:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[0] - stampWidth[0]/2) < 0:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[1] + stampWidth[1]/2 + 1) > np.shape(imageArray[0].science())[0]:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
elif (centerCoords[1] - stampWidth[1]/2) < 0:
#raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')
off_edge.append(True)
else:
off_edge.append(False)
i=0
for image in imageArray:
if off_edge[i] is False:
xmin = int(np.rint(measureCoords[i,1]-stampWidth[0]/2))
xmax = int(xmin + stampWidth[0])
ymin = int(np.rint(measureCoords[i,0]-stampWidth[1]/2))
ymax = int(ymin + stampWidth[1])
#print xmin, xmax, ymin, ymax
single_stamp = image.science()[xmin:xmax, ymin:ymax]
single_stamp[np.isnan(single_stamp)] = 0.
single_stamp[np.isinf(single_stamp)] = 0.
stampImage += single_stamp
singleImagesArray.append(single_stamp)
else:
single_stamp = np.zeros((stampWidth))
singleImagesArray.append(single_stamp)
i+=1
return stampImage, singleImagesArray
def clusterResults(results, dbscan_args=None):
"""
Use scikit-learn algorithm of density-based spatial clustering of
applications with noise (DBSCAN)
(http://scikit-learn.org/stable/modules/generated/
sklearn.cluster.DBSCAN.html)
to cluster the results of the likelihood image search using starting
location, total velocity and slope of trajectory.
Parameters
----------
results: numpy recarray, required
The results output from findObjects in searchImage.
dbscan_args: dict, optional
Additional arguments for the DBSCAN instance. See options in link
above.
Returns
-------
db_cluster: DBSCAN instance
DBSCAN instance with clustering completed. To get cluster labels use
db_cluster.labels_
top_vals: list of integers
The indices in the results array where the most likely object in each
cluster is located.
"""
default_dbscan_args = dict(eps=0.03, min_samples=1, n_jobs=-1)
if dbscan_args is not None:
default_dbscan_args.update(dbscan_args)
dbscan_args = default_dbscan_args
slope_arr = []
intercept_arr = []
t0x_arr = []
t0y_arr = []
vel_total_arr = []
vx_arr = []
vel_x_arr = []
vel_y_arr = []
for target_num in range(len(results)):
t0x = results['t0_x'][target_num]
t0x_arr.append(t0x)
t0y = results['t0_y'][target_num]
t0y_arr.append(t0y)
v0x = results['v_x'][target_num]
vel_x_arr.append(v0x)
v0y = results['v_y'][target_num]
vel_y_arr.append(v0y)
db_cluster = DBSCAN(**dbscan_args)
scaled_t0x = np.array(t0x_arr) #- np.min(t0x_arr)
if np.max(scaled_t0x) > 0.:
scaled_t0x = scaled_t0x/4200.#np.max(scaled_t0x)
scaled_t0y = np.array(t0y_arr) #- np.min(t0y_arr)
if np.max(scaled_t0y) > 0.:
scaled_t0y = scaled_t0y/4200.#np.max(scaled_t0y)
scaled_vx = np.array(vel_x_arr)# - np.min(vel_x_arr)
if np.max(scaled_vx) > 0.:
scaled_vx /= np.max(scaled_vx)
scaled_vy = np.array(vel_y_arr)# - np.min(vel_y_arr)
if np.max(scaled_vy) > 0.:
scaled_vy /= np.max(scaled_vy)
db_cluster.fit(np.array([scaled_t0x, scaled_t0y,
scaled_vx, scaled_vy], dtype=np.float).T)
top_vals = []
for cluster_num in np.unique(db_cluster.labels_):
cluster_vals = np.where(db_cluster.labels_ == cluster_num)[0]
top_vals.append(cluster_vals[0])
return db_cluster, top_vals
#http://scipy-cookbook.readthedocs.io/items/KalmanFiltering.html
def kalman_filter(obs, var):
xhat = np.zeros(len(obs))
P = np.zeros(len(obs))
xhatminus = np.zeros(len(obs))
Pminus = np.zeros(len(obs))
K = np.zeros(len(obs))
Q = 0
R = np.copy(var)
R[R==0.] = 100.
xhat[0] = obs[0]
P[0] = R[0]
for k in range(1,len(obs)):
xhatminus[k] = xhat[k-1]
Pminus[k] = P[k-1] + Q
K[k] = Pminus[k] / (Pminus[k] + R[k])
xhat[k] = xhatminus[k] + K[k]*(obs[k]-xhatminus[k])
P[k] = (1-K[k])*Pminus[k]
return xhat, P
def calc_nu(psi_vals, phi_vals):
not_0 = np.where(psi_vals > -9000.)[0]
if len(not_0) > 0:
return np.sum(psi_vals[not_0])/np.sqrt(np.sum(phi_vals[not_0]))
else:
return 0.
def create_filter_curve(ps_lc, ph_lc, lc_on):
num_stamps = len(ps_lc[0])
lc_list = []
for ps_lc_line, ph_lc_line in zip(ps_lc, ph_lc):
filter_sums = []
#for ps_lc_line, ph_lc_line in zip(ps_lc_list, ph_lc_list):
fluxes = ps_lc_line/ph_lc_line
#use_f = np.where(ps_lc_line != 0.)
fluxes = fluxes#[use_f]
f_var = np.var(fluxes)*np.ones(len(fluxes))
kalman_flux, kalman_error = kalman_filter(fluxes, f_var)
deviations = np.abs(kalman_flux - fluxes) / kalman_error**.5
#print deviations
keep_idx = np.where(deviations < 1.)[0]
kalman_flux, kalman_error = kalman_filter(fluxes[::-1], f_var[::-1])
deviations = np.abs(kalman_flux - fluxes[::-1]) / kalman_error**.5
#print deviations
keep_idx_back = np.where(deviations < 1.)[0]
if len(keep_idx) >= len(keep_idx_back):
single_stamps = fluxes[keep_idx]
#print keep_idx
else:
keep_idx = num_stamps-1 - keep_idx_back
single_stamps = fluxes[keep_idx]
#print keep_idx
filter_sums = calc_nu(ps_lc_line[keep_idx], ph_lc_line[keep_idx])
lc_list.append(filter_sums)
#return filter_stamps
return (lc_on, lc_list)
def return_filter_curve(ps_lc_line, ph_lc_line):
num_stamps = len(ps_lc_line)
#filter_sums = []
filter_stamps = []
#for ps_lc_line, ph_lc_line in zip(ps_lc_list, ph_lc_list):
fluxes = ps_lc_line/ph_lc_line
#use_f = np.where(ps_lc_line != 0.)
fluxes = fluxes#[use_f]
f_var = np.var(fluxes)*np.ones(len(fluxes))
kalman_flux, kalman_error = kalman_filter(fluxes, f_var)
deviations = np.abs(kalman_flux - fluxes) / kalman_error**.5
#print deviations
keep_idx = np.where(deviations < 1.)[0]
kalman_flux, kalman_error = kalman_filter(fluxes[::-1], f_var[::-1])
deviations = np.abs(kalman_flux - fluxes[::-1]) / kalman_error**.5
#print deviations
keep_idx_back = np.where(deviations < 1.)[0]
if len(keep_idx) >= len(keep_idx_back):
single_stamps = fluxes[keep_idx]
filter_stamps.append(single_stamps)
kalman_flux, kalman_error = kalman_filter(fluxes, f_var)
kf_b, kf_e = kalman_filter(fluxes[::-1], f_var[::-1])
#print keep_idx
else:
keep_idx = num_stamps-1 - keep_idx_back
single_stamps = fluxes[keep_idx]
filter_stamps.append(single_stamps)
kalman_flux, kalman_error = kalman_filter(fluxes[::-1], f_var[::-1])
kf_b, kf_e = kalman_filter(fluxes, f_var)
#print keep_idx
#filter_sums = calc_nu(ps_lc_line[keep_idx], ph_lc_line[keep_idx])
return filter_stamps, keep_idx#, kalman_flux, kalman_error, kf_b, kf_e, fluxes
def get_likelihood_lcs(results_arr, psi, phi, image_times):
ps_lc = np.zeros((len(image_times), len(results_arr)))
ph_lc = np.zeros((len(image_times), len(results_arr)))
print('Building Lightcurves')
for idx, t_current in list(enumerate(image_times)):
#print(idx)
x0 = results_arr['t0_x'] + results_arr['v_x']*t_current
y0 = results_arr['t0_y'] + results_arr['v_y']*t_current
x0_0 = x0
y0_0 = y0
x0_0 = np.array(x0_0, dtype=np.int)
y0_0 = np.array(y0_0, dtype=np.int)
x0_0[np.where(((x0_0 > 4199) | (x0_0 < 0)))] = 4199
y0_0[np.where(((y0_0 > 4199) | (y0_0 < 0)))] = 4199
psi_on = psi[idx]
phi_on = phi[idx]
psi_on[4199, 4199] = 0.
psi_on[np.isnan(psi_on)] = 0.
psi_on[np.where(psi_on < -9000)] = 0.
phi_on[np.where(phi_on < -9000)] = 999999.
phi_on[np.where(phi_on == 0.)] = 999999.
ps_lc[idx] = psi_on[y0_0, x0_0]
#print(p_o)
ph_lc[idx] = phi_on[y0_0, x0_0]
return ps_lc.T, ph_lc.T
| bsd-2-clause |
Achuth17/scikit-learn | examples/feature_selection/plot_rfe_with_cross_validation.py | 226 | 1384 | """
===================================================
Recursive feature elimination with cross-validation
===================================================
A recursive feature elimination example with automatic tuning of the
number of features selected with cross-validation.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold
from sklearn.feature_selection import RFECV
from sklearn.datasets import make_classification
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000, n_features=25, n_informative=3,
n_redundant=2, n_repeated=0, n_classes=8,
n_clusters_per_class=1, random_state=0)
# Create the RFE object and compute a cross-validated score.
svc = SVC(kernel="linear")
# The "accuracy" scoring is proportional to the number of correct
# classifications
rfecv = RFECV(estimator=svc, step=1, cv=StratifiedKFold(y, 2),
scoring='accuracy')
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (nb of correct classifications)")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
| bsd-3-clause |
davidgbe/scikit-learn | sklearn/neighbors/tests/test_dist_metrics.py | 230 | 5234 | import itertools
import pickle
import numpy as np
from numpy.testing import assert_array_almost_equal
import scipy
from scipy.spatial.distance import cdist
from sklearn.neighbors.dist_metrics import DistanceMetric
from nose import SkipTest
def dist_func(x1, x2, p):
return np.sum((x1 - x2) ** p) ** (1. / p)
def cmp_version(version1, version2):
version1 = tuple(map(int, version1.split('.')[:2]))
version2 = tuple(map(int, version2.split('.')[:2]))
if version1 < version2:
return -1
elif version1 > version2:
return 1
else:
return 0
class TestMetrics:
def __init__(self, n1=20, n2=25, d=4, zero_frac=0.5,
rseed=0, dtype=np.float64):
np.random.seed(rseed)
self.X1 = np.random.random((n1, d)).astype(dtype)
self.X2 = np.random.random((n2, d)).astype(dtype)
# make boolean arrays: ones and zeros
self.X1_bool = self.X1.round(0)
self.X2_bool = self.X2.round(0)
V = np.random.random((d, d))
VI = np.dot(V, V.T)
self.metrics = {'euclidean': {},
'cityblock': {},
'minkowski': dict(p=(1, 1.5, 2, 3)),
'chebyshev': {},
'seuclidean': dict(V=(np.random.random(d),)),
'wminkowski': dict(p=(1, 1.5, 3),
w=(np.random.random(d),)),
'mahalanobis': dict(VI=(VI,)),
'hamming': {},
'canberra': {},
'braycurtis': {}}
self.bool_metrics = ['matching', 'jaccard', 'dice',
'kulsinski', 'rogerstanimoto', 'russellrao',
'sokalmichener', 'sokalsneath']
def test_cdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X2, metric, **kwargs)
yield self.check_cdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X2_bool, metric)
yield self.check_cdist_bool, metric, D_true
def check_cdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1, self.X2)
assert_array_almost_equal(D12, D_true)
def check_cdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool, self.X2_bool)
assert_array_almost_equal(D12, D_true)
def test_pdist(self):
for metric, argdict in self.metrics.items():
keys = argdict.keys()
for vals in itertools.product(*argdict.values()):
kwargs = dict(zip(keys, vals))
D_true = cdist(self.X1, self.X1, metric, **kwargs)
yield self.check_pdist, metric, kwargs, D_true
for metric in self.bool_metrics:
D_true = cdist(self.X1_bool, self.X1_bool, metric)
yield self.check_pdist_bool, metric, D_true
def check_pdist(self, metric, kwargs, D_true):
if metric == 'canberra' and cmp_version(scipy.__version__, '0.9') <= 0:
raise SkipTest("Canberra distance incorrect in scipy < 0.9")
dm = DistanceMetric.get_metric(metric, **kwargs)
D12 = dm.pairwise(self.X1)
assert_array_almost_equal(D12, D_true)
def check_pdist_bool(self, metric, D_true):
dm = DistanceMetric.get_metric(metric)
D12 = dm.pairwise(self.X1_bool)
assert_array_almost_equal(D12, D_true)
def test_haversine_metric():
def haversine_slow(x1, x2):
return 2 * np.arcsin(np.sqrt(np.sin(0.5 * (x1[0] - x2[0])) ** 2
+ np.cos(x1[0]) * np.cos(x2[0]) *
np.sin(0.5 * (x1[1] - x2[1])) ** 2))
X = np.random.random((10, 2))
haversine = DistanceMetric.get_metric("haversine")
D1 = haversine.pairwise(X)
D2 = np.zeros_like(D1)
for i, x1 in enumerate(X):
for j, x2 in enumerate(X):
D2[i, j] = haversine_slow(x1, x2)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(haversine.dist_to_rdist(D1),
np.sin(0.5 * D2) ** 2)
def test_pyfunc_metric():
X = np.random.random((10, 3))
euclidean = DistanceMetric.get_metric("euclidean")
pyfunc = DistanceMetric.get_metric("pyfunc", func=dist_func, p=2)
# Check if both callable metric and predefined metric initialized
# DistanceMetric object is picklable
euclidean_pkl = pickle.loads(pickle.dumps(euclidean))
pyfunc_pkl = pickle.loads(pickle.dumps(pyfunc))
D1 = euclidean.pairwise(X)
D2 = pyfunc.pairwise(X)
D1_pkl = euclidean_pkl.pairwise(X)
D2_pkl = pyfunc_pkl.pairwise(X)
assert_array_almost_equal(D1, D2)
assert_array_almost_equal(D1_pkl, D2_pkl)
| bsd-3-clause |
shangwuhencc/scikit-learn | examples/ensemble/plot_feature_transformation.py | 67 | 4285 | """
===============================================
Feature transformations with ensembles of trees
===============================================
Transform your features into a higher dimensional, sparse space. Then
train a linear model on these features.
First fit an ensemble of trees (totally random trees, a random
forest, or gradient boosted trees) on the training set. Then each leaf
of each tree in the ensemble is assigned a fixed arbitrary feature
index in a new feature space. These leaf indices are then encoded in a
one-hot fashion.
Each sample goes through the decisions of each tree of the ensemble
and ends up in one leaf per tree. The sample is encoded by setting
feature values for these leaves to 1 and the other feature values to 0.
The resulting transformer has then learned a supervised, sparse,
high-dimensional categorical embedding of the data.
"""
# Author: Tim Head <[email protected]>
#
# License: BSD 3 clause
import numpy as np
np.random.seed(10)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (RandomTreesEmbedding, RandomForestClassifier,
GradientBoostingClassifier)
from sklearn.preprocessing import OneHotEncoder
from sklearn.cross_validation import train_test_split
from sklearn.metrics import roc_curve
n_estimator = 10
X, y = make_classification(n_samples=80000)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# It is important to train the ensemble of trees on a different subset
# of the training data than the linear regression model to avoid
# overfitting, in particular if the total number of leaves is
# similar to the number of training samples
X_train, X_train_lr, y_train, y_train_lr = train_test_split(X_train,
y_train,
test_size=0.5)
# Unsupervised transformation based on totally random trees
rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimator)
rt_lm = LogisticRegression()
rt.fit(X_train, y_train)
rt_lm.fit(rt.transform(X_train_lr), y_train_lr)
y_pred_rt = rt_lm.predict_proba(rt.transform(X_test))[:, 1]
fpr_rt_lm, tpr_rt_lm, _ = roc_curve(y_test, y_pred_rt)
# Supervised transformation based on random forests
rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimator)
rf_enc = OneHotEncoder()
rf_lm = LogisticRegression()
rf.fit(X_train, y_train)
rf_enc.fit(rf.apply(X_train))
rf_lm.fit(rf_enc.transform(rf.apply(X_train_lr)), y_train_lr)
y_pred_rf_lm = rf_lm.predict_proba(rf_enc.transform(rf.apply(X_test)))[:, 1]
fpr_rf_lm, tpr_rf_lm, _ = roc_curve(y_test, y_pred_rf_lm)
grd = GradientBoostingClassifier(n_estimators=n_estimator)
grd_enc = OneHotEncoder()
grd_lm = LogisticRegression()
grd.fit(X_train, y_train)
grd_enc.fit(grd.apply(X_train)[:, :, 0])
grd_lm.fit(grd_enc.transform(grd.apply(X_train_lr)[:, :, 0]), y_train_lr)
y_pred_grd_lm = grd_lm.predict_proba(
grd_enc.transform(grd.apply(X_test)[:, :, 0]))[:, 1]
fpr_grd_lm, tpr_grd_lm, _ = roc_curve(y_test, y_pred_grd_lm)
# The gradient boosted model by itself
y_pred_grd = grd.predict_proba(X_test)[:, 1]
fpr_grd, tpr_grd, _ = roc_curve(y_test, y_pred_grd)
# The random forest model by itself
y_pred_rf = rf.predict_proba(X_test)[:, 1]
fpr_rf, tpr_rf, _ = roc_curve(y_test, y_pred_rf)
plt.figure(1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.legend(loc='best')
plt.show()
plt.figure(2)
plt.xlim(0, 0.2)
plt.ylim(0.8, 1)
plt.plot([0, 1], [0, 1], 'k--')
plt.plot(fpr_rt_lm, tpr_rt_lm, label='RT + LR')
plt.plot(fpr_rf, tpr_rf, label='RF')
plt.plot(fpr_rf_lm, tpr_rf_lm, label='RF + LR')
plt.plot(fpr_grd, tpr_grd, label='GBT')
plt.plot(fpr_grd_lm, tpr_grd_lm, label='GBT + LR')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve (zoomed in at top left)')
plt.legend(loc='best')
plt.show() | bsd-3-clause |
kball/ambry | test/old_tests/test_geo.py | 1 | 5525 | """
Created on Jan 17, 2013
@author: eric
"""
import unittest
from testbundle.bundle import Bundle
from ambry.identity import * #@UnusedWildImport
from test_base import TestBase
from osgeo.gdalconst import GDT_Float32
import ogr
class Test(TestBase):
def setUp(self):
self.copy_or_build_bundle()
self.bundle = Bundle()
self.bundle_dir = self.bundle.bundle_dir
def tearDown(self):
pass
def test_geo_schema(self):
from osgeo import ogr
import support
import os.path
from ambry.geo.sfschema import copy_schema
def geo_file(p):
return os.path.join(os.path.dirname(support.__file__),'neighborhoods',p)
url = "http://rdw.sandag.org/file_store/Business/Business_Sites.zip"
path = geo_file("Neighborhoods_SD.shp")
with self.bundle.session:
copy_schema(self.bundle.schema, url)
print self.bundle.schema.as_csv()
def x_test_basic(self):
from ambry.geo.analysisarea import get_analysis_area, draw_edges
from ambry.geo import Point
from ambry.geo.kernel import GaussianKernel
aa = get_analysis_area(self.bundle.library, geoid = 'CG0666000')
a = aa.new_array()
#draw_edges(a)
print a.shape, a.size
gaussian = GaussianKernel(11,6)
for i in range(0,400, 20):
p = Point(100+i,100+i)
gaussian.apply_add(a,p)
aa.write_geotiff('/tmp/box.tiff', a, data_type=GDT_Float32)
def test_sfschema(self):
from ambry.geo.sfschema import TableShapefile
from ambry.geo.analysisarea import get_analysis_area
_, communities = self.bundle.library.dep('communities')
csrs = communities.get_srs()
gp = self.bundle.partitions.new_geo_partition(table='geot2')
with gp.database.inserter(source_srs=csrs) as ins:
for row in communities.query("""
SELECT *,
X(Transform(Centroid(geometry), 4326)) AS lon,
Y(Transform(Centroid(geometry), 4326)) as lat,
AsText(geometry) as wkt,
AsBinary(geometry) as wkb
FROM communities"""):
r = {'name':row['cpname'], 'lat': row['lat'], 'lon': row['lon'], 'wkt': row['wkt']}
ins.insert(r)
return
aa = get_analysis_area(self.bundle.library, geoid = 'CG0666000')
path1 = '/tmp/geot1.kml'
if os.path.exists(path1): os.remove(path1)
sfs1 = TableShapefile(self.bundle, path1, 'geot1' )
path2 = '/tmp/geot2.kml'
if os.path.exists(path2): os.remove(path2)
sfs2 = TableShapefile(self.bundle, path2, 'geot2', source_srs=communities.get_srs())
print sfs1.type, sfs2.type
for row in communities.query("""
SELECT *,
X(Transform(Centroid(geometry), 4326)) AS lon,
Y(Transform(Centroid(geometry), 4326)) as lat,
AsText(geometry) as wkt,
AsBinary(geometry) as wkb
FROM communities"""):
sfs1.add_feature( {'name':row['cpname'], 'lat': row['lat'], 'lon': row['lon'], 'wkt': row['wkt']})
sfs2.add_feature( {'name':row['cpname'], 'lat': row['lat'], 'lon': row['lon'], 'wkt': row['wkt']})
sfs1.close()
sfs2.close()
def demo2(self):
import ambry
import ambry.library as dl
import ambry.geo as dg
import numpy as np
from matplotlib import pyplot as plt
k = dg.ConstantKernel(11)
b = np.zeros((50,50))
k.apply_add(b,dg.Point(0,0))
k.apply_add(b,dg.Point(0,b.shape[1]))
k.apply_add(b,dg.Point(b.shape[0],b.shape[1]))
k.apply_add(b,dg.Point(b.shape[0],0))
k.apply_add(b,dg.Point(45,0)) # for a specific bug
for i in range(-5,55):
for j in range(-5,55):
k.apply_add(b,dg.Point(i,j))
b /= np.max(b)
print "Done, Rendering"
img = plt.imshow(b, interpolation='nearest')
img.set_cmap('gist_heat')
plt.colorbar()
plt.show()
def demo3(self):
import ambry
import ambry.library as dl
import ambry.geo as dg
from matplotlib import pyplot as plt
import numpy as np
l = dl.get_library()
aa = dg.get_analysis_area(l, geoid='CG0666000')
r = l.find(dl.QueryCommand().identity(id='a2z2HM').partition(table='incidents',space=aa.geoid)).pop()
p = l.get(r.partition).partition
a = aa.new_array()
k = dg.ConstantKernel(9)
print aa
k.apply_add(a, dg.Point(400,1919))
k.apply_add(a, dg.Point(400,1920))
k.apply_add(a, dg.Point(400,1921))
k.apply_add(a, dg.Point(400,1922))
k.apply_add(a, dg.Point(400,1923))
for row in p.query("select date, time, cellx, celly from incidents"):
p = dg.Point(row['cellx'],row['celly'])
k.apply_add(a, p)
a /= np.max(a)
print np.sum(a)
img = plt.imshow(a, interpolation='nearest')
img.set_cmap('spectral_r')
plt.colorbar()
plt.show()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | bsd-2-clause |
UP-RS-ESP/bpl | example.py | 1 | 3471 | """
Examples to help illustrate the module BPL.PY
=============================================
Created: Mon May 23, 2016 02:56PM
Last modified: Tue May 24, 2016 06:36PM
Copyright: Bedartha Goswami <[email protected]>
"""
import numpy as np
import matplotlib.pyplot as pl
import bpl
def show_examples():
"""
Returns precipitation time series with power-law-like event distribution.
"""
# set power-law parameters
alpha = 2.5
xmin = 1E1
xmax = 1E5
size = 1E5
# get random samples from the bounded power-laws
s1 = bpl.sample(alpha=alpha, size=size, xmin=xmin, xmax=None)
s2 = bpl.sample(alpha=alpha, size=size, xmin=xmin, xmax=xmax)
t = np.arange(size)
# choose a random window of 1000 points
start = np.random.randint(low=0, high=size, size=1)
stop = start + 1000
# plot time series with different markers for extreme & anomalous events
fig = pl.figure(figsize=[14.5, 6.5])
# power-law time series bounded only from below
ax1 = fig.add_axes([0.07, 0.57, 0.35, 0.35])
ax1.fill_between(t[start:stop], s1[start:stop],
facecolor="RoyalBlue", edgecolor="none")
ax1.set_title("Power-law random variate snapshot\n(bounded only below)",
fontsize=12)
# power-law time series bounded from above and below
ax2 = fig.add_axes([0.07, 0.07, 0.35, 0.35])
ax2.fill_between(t[start:stop], s2[start:stop],
facecolor="IndianRed", edgecolor="none")
ax2.set_title("Power-law random variate snapshot\n(bounded above & below)",
fontsize=12)
ax2.set_xlabel("Time (units)", fontsize=12)
for ax in fig.axes:
ax.set_ylabel("Observable (units)", fontsize=12)
ax.set_xlim(start, stop)
# plot histograms and PDFs
ax3 = fig.add_axes([0.51, 0.09, 0.35, 0.85])
ax3.set_xscale("log")
ax3.set_yscale("log")
# power-law time series bounded only from below
h1, be1 = bpl.histogram(s1, bins=None, density=True,
ax=None, plot=False)
mp1 = 0.5 * (be1[:-1] + be1[1:])
ax3.plot(mp1, h1, "o", mec="RoyalBlue", mfc="none", ms=8, mew=1.1)
ylo, yhi = ax3.get_ylim()
xlo, xhi = ax3.get_xlim()
x1 = np.logspace(np.log10(xlo), np.log10(xhi),
num=100, base=10)
pl1 = bpl.pdf(x1, alpha, xmin, xmax=None)
ax3.plot(x1, pl1, "-", c="RoyalBlue", lw=1.5, zorder=-1)
# power-law time series bounded from above and below
h2, be2 = bpl.histogram(s2, bins=None, density=True,
ax=None, plot=False)
mp2 = 0.5 * (be2[:-1] + be2[1:])
ax3.plot(mp2, h2, "s", mec="IndianRed", mfc="none", ms=8, mew=1.1)
x2 = np.logspace(np.log10(xmin), np.log10(xmax),
num=100, base=10)
pl2 = bpl.pdf(x2, alpha, xmin, xmax=xmax)
ax3.plot(x2, pl2, "-", c="IndianRed", lw=1.5, zorder=-1)
# a few prettification adjustments
ax3.set_ylim(ylo, yhi)
ax3.grid(which="both", axis="both")
ax3.legend(["Histogram (bounded below)",
"Theoretical (bounded below)",
"Histogram (bounded both sides)",
"Theoretical (bounded both sides)"
])
ax3.set_xlabel("Observable (units)", fontsize=12)
ax3.set_ylabel("Probability", fontsize=12)
# save/show plot
pl.show(fig)
return None
if __name__ == "__main__":
print("running example...")
show_examples()
print("done.")
| gpl-3.0 |
ycasg/PyNLO | src/pynlo/media/fibers/calculators.py | 2 | 2855 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 28 13:56:17 2014
This file is part of pyNLO.
pyNLO is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public gLicense as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyNLO is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyNLO. If not, see <http://www.gnu.org/licenses/>.
@author: dim1
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy.special import factorial
from scipy import constants
import matplotlib.pyplot as plt
def DTabulationToBetas(lambda0, DData, polyOrder, DDataIsFile = True, return_diagnostics = False):
""" Read in a tabulation of D vs Lambda. Returns betas in array
[beta2, beta3, ...]. If return_diagnostics is True, then return
(betas, fit_x_axis (omega in THz), data (ps^2), fit (ps^2) ) """
#
# Expand about lambda0
makePlots = 0
if DDataIsFile:
DTab = np.genfromtxt(DData,delimiter=',',skiprows=1)
else:
DTab = DData[:]
# Units of D are ps/nm/km
# Convert to s/m/m
DTab[:,1] = DTab[:,1] * 1e-12 * 1e9 * 1e-3
c = constants.speed_of_light
omegaAxis = 2*np.pi*c / (DTab[:,0]*1e-9) - 2*np.pi*c /(lambda0 * 1e-9)
# Convert from D to beta via beta2 = -D * lambda^2 / (2*pi*c)
betaTwo = -DTab[:,1] * (DTab[:,0]*1e-9)**2 / (2*np.pi*c)
# The units of beta2 for the GNLSE solver are ps^2/m; convert
betaTwo = betaTwo * 1e24
# Also convert angular frequency to rad/ps
omegaAxis = omegaAxis * 1e-12 # s/ps
# How betas are interpreted in gnlse.m:
#B=0;
#for i=1:length(betas)
# B = B + betas(i)/factorial(i+1).*V.^(i+1);
#end
# Fit beta2 with high-order polynomial
polyFitCo = np.polyfit(omegaAxis, betaTwo, polyOrder)
Betas = polyFitCo[::-1]
polyFit = np.zeros((len(omegaAxis),))
for i in range(len(Betas)):
Betas[i] = Betas[i] * factorial(i)
polyFit = polyFit + Betas[i] / factorial(i)*omegaAxis**i
if makePlots == 1:
# try:
# set(0,'CurrentFigure',dispfig);
# catch ME
# dispfig = figure('WindowStyle', 'docked');
# end
plt.plot(omegaAxis, betaTwo,'o')
plt.plot(omegaAxis, polyFit)
plt.show()
if return_diagnostics:
return Betas, omegaAxis, betaTwo, polyFit
else:
return Betas
| gpl-3.0 |
polyanskiy/refractiveindex.info-scripts | scripts/Ozaki 1993 - ZnS.py | 1 | 3191 | # -*- coding: utf-8 -*-
# Author: Mikhail Polyanskiy
# Last modified: 2017-04-12
# Original data: Ozaki and Adachi 1993, https://doi.org/10.1143/JJAP.32.5008
import numpy as np
import matplotlib.pyplot as plt
π = np.pi
# model parameters from table I
E0 = 3.75 #eV
Δ0 = 3.82-E0 #eV
A = 32.0 #eV**1.5
A0x = 0.07 #eV**-1
G0 = 0.040 #eV
Γ0 = 0.07 #eV
E1minusG1=5.74 #eV
B1x = 4.3 #eV
Γ1 = 0.48 #eV
E2 = 7.0 #eV
C = 0.25
γ = 0.06
εinf = 2.35
def Epsilon_A(ħω):
χ0 = (ħω + 1j*Γ0) / E0
χs0 = (ħω + 1j*Γ0) / (E0+Δ0)
fχ0 = χ0**-2 * ( 2 -(1+χ0)**0.5 - (1-χ0)**0.5 )
fχs0 = χs0**-2 * ( 2 - (1+χs0)**0.5 - (1-χs0)**0.5 )
return A*E0**-1.5 * (fχ0+0.5*(E0/(E0+Δ0))**1.5*fχs0)
def Epsilon_Ax(ħω):
y=0
for n in range(1,1000):
y += A0x/n**3 * ( 1/(E0-G0/n**2-ħω-1j*Γ0) + 0.5/(E0+Δ0-G0/n**2-ħω-1j*Γ0) )
return y
def Epsilon_Bx(ħω):
return B1x/(E1minusG1-ħω-1j*Γ1)
def Epsilon_C(ħω):
χ2 = ħω/E2
return C/((1-χ2**2)-1j*χ2*γ)
ev_min = 1.2
ev_max = 5.6
npoints = 500
eV = np.linspace(ev_min, ev_max, npoints)
μm = 4.13566733e-1*2.99792458/eV
εA = Epsilon_A(eV)
εAx = Epsilon_Ax(eV)
εBx = Epsilon_Bx(eV)
εC = Epsilon_C(eV)
ε = εA + εAx + εBx + εC + εinf
n = (ε**.5).real
k = (ε**.5).imag
α = 4*π*k/μm*1e4 #1/cm
#============================ DATA OUTPUT =================================
file = open('out.txt', 'w')
for i in range(npoints-1, -1, -1):
file.write('\n {:.4e} {:.4e} {:.4e}'.format(μm[i],n[i],k[i]))
file.close()
#=============================== PLOT =====================================
plt.rc('font', family='Arial', size='14')
#plot ε vs eV
plt.figure(1)
plt.plot(eV, ε.real, label="ε1")
plt.plot(eV, ε.imag, label="ε2")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot intermediate data (for debugging)
plt.figure(2)
plt.plot(eV, εA.real, label="Re(εA)")
plt.plot(eV, εAx.real, label="Re(εAx)")
plt.plot(eV, εBx.real, label="Re(εBx)")
plt.plot(eV, εC.real, label="Re(εC)")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
plt.figure(3)
plt.plot(eV, εA.imag, label="Im(εA)")
plt.plot(eV, εAx.imag, label="Im(εAx)")
plt.plot(eV, εBx.imag, label="Im(εBx)")
plt.plot(eV, εC.imag, label="Re(εC)")
plt.xlabel('Photon energy (eV)')
plt.ylabel('ε')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs eV
plt.figure(4)
plt.plot(eV, n, label="n")
plt.plot(eV, k, label="k")
plt.xlabel('Photon energy (eV)')
plt.ylabel('n, k')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot n,k vs μm
plt.figure(5)
plt.plot(μm, n, label="n")
plt.plot(μm, k, label="k")
plt.xlabel('Wavelength (μm)')
plt.ylabel('n, k')
plt.xscale('log')
plt.yscale('log')
plt.legend(bbox_to_anchor=(0,1.02,1,0),loc=3,ncol=2,borderaxespad=0)
#plot α vs eV
plt.figure(6)
plt.plot(eV,α)
plt.yscale('log')
plt.ylim([1e3,1e7])
plt.xlabel('Photon energy (eV)')
plt.ylabel('α (1/cm)') | gpl-3.0 |
dsullivan7/scikit-learn | benchmarks/bench_tree.py | 297 | 3617 | """
To run this, you'll need to have installed.
* scikit-learn
Does two benchmarks
First, we fix a training set, increase the number of
samples to classify and plot number of classified samples as a
function of time.
In the second benchmark, we increase the number of dimensions of the
training set, classify a sample and plot the time taken as a function
of the number of dimensions.
"""
import numpy as np
import pylab as pl
import gc
from datetime import datetime
# to store the results
scikit_classifier_results = []
scikit_regressor_results = []
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
def bench_scikit_tree_classifier(X, Y):
"""Benchmark with scikit-learn decision tree classifier"""
from sklearn.tree import DecisionTreeClassifier
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeClassifier()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_classifier_results.append(
delta.seconds + delta.microseconds / mu_second)
def bench_scikit_tree_regressor(X, Y):
"""Benchmark with scikit-learn decision tree regressor"""
from sklearn.tree import DecisionTreeRegressor
gc.collect()
# start time
tstart = datetime.now()
clf = DecisionTreeRegressor()
clf.fit(X, Y).predict(X)
delta = (datetime.now() - tstart)
# stop time
scikit_regressor_results.append(
delta.seconds + delta.microseconds / mu_second)
if __name__ == '__main__':
print('============================================')
print('Warning: this is going to take a looong time')
print('============================================')
n = 10
step = 10000
n_samples = 10000
dim = 10
n_classes = 10
for i in range(n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
n_samples += step
X = np.random.randn(n_samples, dim)
Y = np.random.randint(0, n_classes, (n_samples,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(n_samples)
bench_scikit_tree_regressor(X, Y)
xx = range(0, n * step, step)
pl.figure('scikit-learn tree benchmark results')
pl.subplot(211)
pl.title('Learning with varying number of samples')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
scikit_classifier_results = []
scikit_regressor_results = []
n = 10
step = 500
start_dim = 500
n_classes = 10
dim = start_dim
for i in range(0, n):
print('============================================')
print('Entering iteration %s of %s' % (i, n))
print('============================================')
dim += step
X = np.random.randn(100, dim)
Y = np.random.randint(0, n_classes, (100,))
bench_scikit_tree_classifier(X, Y)
Y = np.random.randn(100)
bench_scikit_tree_regressor(X, Y)
xx = np.arange(start_dim, start_dim + n * step, step)
pl.subplot(212)
pl.title('Learning in high dimensional spaces')
pl.plot(xx, scikit_classifier_results, 'g-', label='classification')
pl.plot(xx, scikit_regressor_results, 'r-', label='regression')
pl.legend(loc='upper left')
pl.xlabel('number of dimensions')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
saiwing-yeung/scikit-learn | sklearn/ensemble/tests/test_iforest.py | 9 | 6928 | """
Testing for Isolation Forest algorithm (sklearn.ensemble.iforest).
"""
# Authors: Nicolas Goix <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.grid_search import ParameterGrid
from sklearn.ensemble import IsolationForest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris
from sklearn.utils import check_random_state
from sklearn.metrics import roc_auc_score
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_iforest():
"""Check Isolation Forest for various parameter settings."""
X_train = np.array([[0, 1], [1, 2]])
X_test = np.array([[2, 1], [1, 1]])
grid = ParameterGrid({"n_estimators": [3],
"max_samples": [0.5, 1.0, 3],
"bootstrap": [True, False]})
with ignore_warnings():
for params in grid:
IsolationForest(random_state=rng,
**params).fit(X_train).predict(X_test)
def test_iforest_sparse():
"""Check IForest for various parameter settings on sparse input."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"bootstrap": [True, False]})
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in grid:
# Trained on sparse format
sparse_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train_sparse)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_classifier = IsolationForest(
n_estimators=10, random_state=1, **params).fit(X_train)
dense_results = dense_classifier.predict(X_test)
assert_array_equal(sparse_results, dense_results)
assert_array_equal(sparse_results, dense_results)
def test_iforest_error():
"""Test that it gives proper exception on deficient input."""
X = iris.data
# Test max_samples
assert_raises(ValueError,
IsolationForest(max_samples=-1).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=0.0).fit, X)
assert_raises(ValueError,
IsolationForest(max_samples=2.0).fit, X)
# The dataset has less than 256 samples, explicitly setting
# max_samples > n_samples should result in a warning. If not set
# explicitly there should be no warning
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
IsolationForest(max_samples=1000).fit, X)
assert_no_warnings(IsolationForest(max_samples='auto').fit, X)
assert_no_warnings(IsolationForest(max_samples=np.int64(2)).fit, X)
assert_raises(ValueError, IsolationForest(max_samples='foobar').fit, X)
assert_raises(ValueError, IsolationForest(max_samples=1.5).fit, X)
def test_recalculate_max_depth():
"""Check max_depth recalculation when max_samples is reset to n_samples"""
X = iris.data
clf = IsolationForest().fit(X)
for est in clf.estimators_:
assert_equal(est.max_depth, int(np.ceil(np.log2(X.shape[0]))))
def test_max_samples_attribute():
X = iris.data
clf = IsolationForest().fit(X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=500)
assert_warns_message(UserWarning,
"max_samples will be set to n_samples for estimation",
clf.fit, X)
assert_equal(clf.max_samples_, X.shape[0])
clf = IsolationForest(max_samples=0.4).fit(X)
assert_equal(clf.max_samples_, 0.4*X.shape[0])
def test_iforest_parallel_regression():
"""Check parallel regression."""
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = IsolationForest(n_jobs=3,
random_state=0).fit(X_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = IsolationForest(n_jobs=1,
random_state=0).fit(X_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_iforest_performance():
"""Test Isolation Forest performs well"""
# Generate train/test data
rng = check_random_state(2)
X = 0.3 * rng.randn(120, 2)
X_train = np.r_[X + 2, X - 2]
X_train = X[:100]
# Generate some abnormal novel observations
X_outliers = rng.uniform(low=-4, high=4, size=(20, 2))
X_test = np.r_[X[100:], X_outliers]
y_test = np.array([0] * 20 + [1] * 20)
# fit the model
clf = IsolationForest(max_samples=100, random_state=rng).fit(X_train)
# predict scores (the lower, the more normal)
y_pred = - clf.decision_function(X_test)
# check that there is at most 6 errors (false positive or false negative)
assert_greater(roc_auc_score(y_test, y_pred), 0.98)
def test_iforest_works():
# toy sample (the last two samples are outliers)
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [6, 3], [-4, 7]]
# Test LOF
clf = IsolationForest(random_state=rng, contamination=0.25)
clf.fit(X)
decision_func = - clf.decision_function(X)
pred = clf.predict(X)
# assert detect outliers:
assert_greater(np.min(decision_func[-2:]), np.max(decision_func[:-2]))
assert_array_equal(pred, 6 * [1] + 2 * [-1])
| bsd-3-clause |
potash/scikit-learn | examples/cluster/plot_kmeans_assumptions.py | 270 | 2040 | """
====================================
Demonstration of k-means assumptions
====================================
This example is meant to illustrate situations where k-means will produce
unintuitive and possibly unexpected clusters. In the first three plots, the
input data does not conform to some implicit assumption that k-means makes and
undesirable clusters are produced as a result. In the last plot, k-means
returns intuitive clusters despite unevenly sized blobs.
"""
print(__doc__)
# Author: Phil Roth <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[ 0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
plt.show()
| bsd-3-clause |
SaikWolf/gnuradio | gr-digital/examples/ofdm/gr_plot_ofdm.py | 77 | 10957 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import math, struct, sys
from optparse import OptionParser
from math import log10
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
from pylab import *
from matplotlib.font_manager import fontManager, FontProperties
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
matplotlib.interactive(True)
matplotlib.use('TkAgg')
class draw_constellation:
def __init__(self, options):
derot_file = "ofdm_frame_sink_c.dat"
acq_file = "ofdm_frame_acq_c.dat"
fft_file = "ofdm_receiver-fft_out_c.dat"
self.h_derot_file = open(derot_file, "r")
self.h_acq_file = open(acq_file, "r")
self.h_fft_file = open(fft_file, "r")
self.occ_tones = options.occ_tones
self.fft_size = options.fft_size
self.symbol = options.start
self.sample_rate = options.sample_rate
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(14, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_sym = figtext(0.05, 0.95, ("Symbol: %s" % self.symbol), weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_eq.get_xlim()
self.manager = get_current_fig_manager()
#connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.text_sym.set_text("Symbol: %d" % (self.symbol))
derot_data = scipy.fromfile(self.h_derot_file, dtype=scipy.complex64, count=self.occ_tones)
acq_data = scipy.fromfile(self.h_acq_file, dtype=scipy.complex64, count=self.occ_tones)
fft_data = scipy.fromfile(self.h_fft_file, dtype=scipy.complex64, count=self.fft_size)
if(len(acq_data) == 0):
print "End of File"
else:
self.acq_data_reals = [r.real for r in acq_data]
self.acq_data_imags = [i.imag for i in acq_data]
self.derot_data_reals = [r.real for r in derot_data]
self.derot_data_imags = [i.imag for i in derot_data]
self.unequalized_angle = [math.atan2(x.imag, x.real) for x in fft_data]
self.equalized_angle = [math.atan2(x.imag, x.real) for x in acq_data]
self.derot_equalized_angle = [math.atan2(x.imag, x.real) for x in derot_data]
self.time = [i*(1/self.sample_rate) for i in range(len(acq_data))]
ffttime = [i*(1/self.sample_rate) for i in range(len(fft_data))]
self.freq = self.get_freq(ffttime, self.sample_rate)
for i in range(len(fft_data)):
if(abs(fft_data[i]) == 0.0):
fft_data[i] = complex(1e-6,1e-6)
self.fft_data = [20*log10(abs(f)) for f in fft_data]
def get_freq(self, time, sample_rate, T=1):
N = len(time)
Fs = 1.0 / (max(time) - min(time))
Fn = 0.5 * sample_rate
freq = [-Fn + i*Fs for i in range(N)]
return freq
def make_plots(self):
self.h_acq_file.seek(8*self.symbol*self.occ_tones, 0)
self.h_fft_file.seek(8*self.symbol*self.fft_size, 0)
self.h_derot_file.seek(8*self.symbol*self.occ_tones, 0)
self.get_data()
# Subplot: constellation of rotated symbols
self.sp_const = self.fig.add_subplot(4,1,1, position=[0.15, 0.55, 0.3, 0.35])
self.sp_const.set_title(("Constellation"), fontsize=self.title_font_size, fontweight="bold")
self.sp_const.set_xlabel("Inphase", fontsize=self.label_font_size, fontweight="bold")
self.sp_const.set_ylabel("Qaudrature", fontsize=self.label_font_size, fontweight="bold")
self.plot_const = plot(self.acq_data_reals, self.acq_data_imags, 'bo')
self.plot_const += plot(self.derot_data_reals, self.derot_data_imags, 'ro')
self.sp_const.axis([-2, 2, -2, 2])
# Subplot: unequalized angle
self.sp_uneq = self.fig.add_subplot(4,2,1, position=[0.575, 0.55, 0.3, 0.35])
self.sp_uneq.set_title(("Unequalized Angle"), fontsize=self.title_font_size, fontweight="bold")
self.sp_uneq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_uneq.set_ylabel("Angle", fontsize=self.label_font_size, fontweight="bold")
uneqscale = range(len(self.unequalized_angle))
self.plot_uneq = plot(uneqscale, self.unequalized_angle, 'bo')
# Subplot: equalized angle
self.sp_eq = self.fig.add_subplot(4,1,2, position=[0.15, 0.1, 0.3, 0.35])
self.sp_eq.set_title(("Equalized Angle"), fontsize=self.title_font_size, fontweight="bold")
self.sp_eq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_eq.set_ylabel("Angle", fontsize=self.label_font_size, fontweight="bold")
eqscale = range(len(self.equalized_angle))
self.plot_eq = plot(eqscale, self.equalized_angle, 'bo')
self.plot_eq += plot(eqscale, self.derot_equalized_angle, 'ro', markersize=4)
# Subplot: FFT
self.sp_fft = self.fig.add_subplot(4,2,2, position=[0.575, 0.1, 0.3, 0.35])
self.sp_fft.set_title(("FFT"), fontsize=self.title_font_size, fontweight="bold")
self.sp_fft.set_xlabel("Frequency (MHz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_fft.set_ylabel("Power (dBm)", fontsize=self.label_font_size, fontweight="bold")
self.plot_fft = plot(self.freq, self.fft_data, '-bo')
draw()
def update_plots(self):
eqscale = range(len(self.equalized_angle))
uneqscale = range(len(self.unequalized_angle))
self.plot_eq[0].set_data([eqscale, self.equalized_angle])
self.plot_eq[1].set_data([eqscale, self.derot_equalized_angle])
self.plot_uneq[0].set_data([uneqscale, self.unequalized_angle])
self.sp_eq.set_ylim([-4, 4])
self.sp_uneq.set_ylim([-4, 4])
#self.sp_iq.axis([min(self.time), max(self.time),
# 1.5*min([min(self.acq_data_reals), min(self.acq_data_imags)]),
# 1.5*max([max(self.acq_data_reals), max(self.acq_data_imags)])])
self.plot_const[0].set_data([self.acq_data_reals, self.acq_data_imags])
self.plot_const[1].set_data([self.derot_data_reals, self.derot_data_imags])
self.sp_const.axis([-2, 2, -2, 2])
self.plot_fft[0].set_data([self.freq, self.fft_data])
draw()
def zoom(self, event):
newxlim = self.sp_eq.get_xlim()
if(newxlim != self.xlim):
self.xlim = newxlim
r = self.reals[int(ceil(self.xlim[0])) : int(ceil(self.xlim[1]))]
i = self.imags[int(ceil(self.xlim[0])) : int(ceil(self.xlim[1]))]
self.plot_const[0].set_data(r, i)
self.sp_const.axis([-2, 2, -2, 2])
self.manager.canvas.draw()
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.symbol += 1
self.get_data()
self.update_plots()
def step_backward(self):
# Step back in file position
self.symbol -= 1
if(self.h_acq_file.tell() >= 16*self.occ_tones):
self.h_acq_file.seek(-16*self.occ_tones, 1)
else:
self.symbol = 0
self.h_acq_file.seek(-self.h_acq_file.tell(),1)
if(self.h_derot_file.tell() >= 16*self.occ_tones):
self.h_derot_file.seek(-16*self.occ_tones, 1)
else:
self.symbol = 0
self.h_derot_file.seek(-self.h_derot_file.tell(),1)
if(self.h_fft_file.tell() >= 16*self.fft_size):
self.h_fft_file.seek(-16*self.fft_size, 1)
else:
self.symbol = 0
self.h_fft_file.seek(-self.h_fft_file.tell(),1)
self.get_data()
self.update_plots()
#FIXME: there must be a way to do this with a Python builtin
def find(item_in, list_search):
for l in list_search:
if item_in == l:
return True
return False
def main():
usage="%prog: [options]"
parser = OptionParser(conflict_handler="resolve", usage=usage)
parser.add_option("", "--fft-size", type="int", default=512,
help="Specify the size of the FFT [default=%default]")
parser.add_option("", "--occ-tones", type="int", default=200,
help="Specify the number of occupied tones [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify the starting symbol to plot [default=%default]")
parser.add_option("-R", "--sample-rate", type="float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
(options, args) = parser.parse_args ()
dc = draw_constellation(options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
LamaHamadeh/Harvard-PH526x | Week4-Case-Studies-Part2/Classifying-Whiskies/Homework_Week4_CaseStudy1.py | 1 | 9921 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 2 19:04:02 2017
@author: lamahamadeh
"""
'''
===================================================
Case Study 1 - Scotch Wishkies Analysis Using Bokeh
===================================================
'''
#In this case study, we have prepared step-by-step instructions for you on how
#to prepare plots in Bokeh, a library designed for simple interactive plotting.
#We will demonstrate Bokeh by continuing the analysis of Scotch whiskies.
#----------------------------------------------------------------------------------------------------------------
# Exercise 1
#-----------
#Here we provide a basic demonstration of an interactive grid plot using Bokeh.
#Execute the following code and follow along with the comments. We will later
#adapt this code to plot the correlations among distillery flavor profiles as
#well as plot a geographical map of distilleries colored by region and flavor
#profile.
#Make sure to study this code now, as we will edit similar code in the
#exercises that follow.
#Once you have plotted the code, hover, click, and drag your cursor on the plot
#to interact with it. Additionally, explore the icons in the top-right corner
#of the plot for more interactive options!
# First, we import a tool to allow text to pop up on a plot when the cursor
# hovers over it. Also, we import a data structure used to store arguments
# of what to plot in Bokeh. Finally, we will use numpy for this section as well!
from bokeh.models import HoverTool, ColumnDataSource
import numpy as np
# Let's plot a simple 5x5 grid of squares, alternating in color as red and blue.
plot_values = [1,2,3,4,5]
plot_colors = ["red", "blue"]
# How do we tell Bokeh to plot each point in a grid? Let's use a function that
# finds each combination of values from 1-5.
from itertools import product
grid = list(product(plot_values, plot_values))
print(grid)
#------------------------------------------------------------------------------
# Exercise 2
#-----------
#Let's create the names and colors we will use to plot the correlation matrix
#of whisky flavors. Later, we will also use these colors to plot each distillery
#geographically. Create a dictionary region_colors with regions as keys
#and cluster_colors as values.
cluster_colors = ["red", "orange", "green", "blue", "purple", "gray"]
regions = ["Speyside", "Highlands", "Lowlands", "Islands", "Campbelltown", "Islay"]
region_colors = dict(zip(regions,cluster_colors))
#Print region_colors.
print(region_colors)
#------------------------------------------------------------------------------
# Exercise 3
#-----------
#correlations is a two-dimensional np.array with both rows and columns
#corresponding to distilleries and elements corresponding to the flavor
#correlation of each row/column pair. Let's define a list correlation_colors,
#with string values corresponding to colors to be used to plot each distillery
#pair. Low correlations among distillery pairs will be white, high correlations
#will be a distinct group color if the distilleries from the same group, and
#gray otherwise. Edit the code to define correlation_colors for each distillery
#pair to have input 'white' if their correlation is less than 0.7.
#whisky.Group is a pandas dataframe column consisting of distillery group
#memberships. For distillery pairs with correlation greater than 0.7, if they
#share the same whisky group, use the corresponding color from cluster_colors.
#Otherwise, the correlation_colors value for that distillery pair will be
#defined as 'lightgray'.
distilleries = list(whisky.Distillery)
correlation_colors = []
for i in range(len(distilleries)):
for j in range(len(distilleries)):
if correlations[i,j] < .70: # if low correlation,
correlation_colors.append('white') # just use white.
else: # otherwise,
if whisky.Group[i] == whisky.Group[j]: # if the groups match,
correlation_colors.append(cluster_colors[whisky.Group[i]]) # color them by their mutual group.
else: # otherwise
correlation_colors.append('lightgray') # color them lightgray.
#------------------------------------------------------------------------------
# Exercise 4
#-----------
#We will edit the following code to make an interactive grid of the correlations
#among distillery pairs using correlation_colors and correlations.
#correlation_colors is a list of each distillery pair. To convert correlations
#from a np.array to a list, we will use the flatten method. Define the color
#of each rectangle in the grid using to correlation_colors.
#Define the alpha (transparency) values using correlations.flatten().
#Define correlations and using correlations.flatten(). When the cursor hovers
#over a rectangle, this will output the distillery pair, show both distilleries
#as well as their correlation coefficient.
source = ColumnDataSource(
data = {
"x": np.repeat(distilleries,len(distilleries)),
"y": list(distilleries)*len(distilleries),
"colors": correlation_colors,
"alphas": correlations.flatten(),
"correlations": correlations.flatten(),
}
)
output_file("Whisky Correlations.html", title="Whisky Correlations")
fig = figure(title="Whisky Correlations",
x_axis_location="above", tools="resize,hover,save",
x_range=list(reversed(distilleries)), y_range=distilleries)
fig.grid.grid_line_color = None
fig.axis.axis_line_color = None
fig.axis.major_tick_line_color = None
fig.axis.major_label_text_font_size = "5pt"
fig.xaxis.major_label_orientation = np.pi / 3
fig.rect('x', 'y', .9, .9, source=source,
color='colors', alpha='alphas')
hover = fig.select(dict(type=HoverTool))
hover.tooltips = {
"Whiskies": "@x, @y",
"Correlation": "@correlations",
}
show(fig)
#------------------------------------------------------------------------------
# Exercise 5
#-----------
#Next, we provide an example of plotting points geographically.
#Run the following code, to be adapted in the next section.
#Compare this code to that used in plotting the distillery correlations.
points = [(0,0), (1,2), (3,1)]
xs, ys = zip(*points)
colors = ["red", "blue", "green"]
output_file("Spatial_Example.html", title="Regional Example")
location_source = ColumnDataSource(
data={
"x": xs,
"y": ys,
"colors": colors,
}
)
fig = figure(title = "Title",
x_axis_location = "above", tools="resize, hover, save")
fig.plot_width = 300
fig.plot_height = 380
fig.circle("x", "y", 10, 10, size=10, source=location_source,
color='colors', line_color = None)
hover = fig.select(dict(type = HoverTool))
hover.tooltips = {
"Location": "(@x, @y)"
}
show(fig)
#------------------------------------------------------------------------------
# Exercise 6
#-----------
#Adapt the given code from the beginning to show(fig) in order to define a
#function location_plot(title, colors). This function takes a string title
# and a list of colors corresponding to each distillery and outputs a Bokeh
#plot of each distillery by latitude and longitude. As the cursor hovers over
#each point, it displays the distillery name, latitude, and longitude.
def location_plot(title, colors):
output_file(title+".html")
location_source = ColumnDataSource(
data={
"x": whisky[" Latitude"],
"y": whisky[" Longitude"],
"colors": colors,
"regions": whisky.Region,
"distilleries": whisky.Distillery
}
)
fig = figure(title = title,
x_axis_location = "above", tools="resize, hover, save")
fig.plot_width = 400
fig.plot_height = 500
fig.circle("x", "y", 10, 10, size=9, source=location_source,
color='colors', line_color = None)
fig.xaxis.major_label_orientation = np.pi / 3
hover = fig.select(dict(type = HoverTool))
hover.tooltips = {
"Distillery": "@distilleries",
"Location": "(@x, @y)"
}
show(fig)
#whisky.Region is a pandas column containing the regional group membership for
#each distillery. Make a list consisting of the value of region_colors for
#each distillery, and store this list as region_cols.
region_cols = [region_colors[i] for i in list(whisky["Region"])]
#Use location_plot to plot each distillery, colored by its regional grouping.
location_plot("Whisky Locations and Regions", region_cols)
#------------------------------------------------------------------------------
# Exercise 7
#-----------
#Use list comprehensions to create the list region_cols consisting of the color
# in region_colors that corresponds to each whisky in whisky.Region.
region_cols = [region_colors[i] for i in whisky['Region']]
#Similarly, create a list classification_cols consisting of the color in
#cluster_colors that corresponds to each cluster membership in whisky.Group.
classification_cols = [cluster_colors[j] for j in whisky['Group']]
#location_plot remains stored from the previous exercise. Use it to create two
#interactive plots of distilleries, one colored by defined region called
#region_cols and the other with colors defined by coclustering designation
#called classification_cols. How well do the coclustering groupings match the
#regional groupings?
location_plot("Whisky Locations and Regions", region_cols)
location_plot("Whisky Locations and Groups", classification_cols)
'''
We see that there is not very much overlap between the regional classifications
and the coclustering classifications. This means that regional classifications
are not a very good guide to Scotch whisky flavor profiles.
'''
#------------------------------------------------------------------------------
| mit |
wwf5067/statsmodels | statsmodels/graphics/tests/test_functional.py | 30 | 2816 | from statsmodels.compat.python import range
import numpy as np
from numpy.testing import dec, assert_equal, assert_almost_equal
from statsmodels.graphics.functional import \
banddepth, fboxplot, rainbowplot
try:
import matplotlib.pyplot as plt
import matplotlib
have_matplotlib = True
except ImportError:
have_matplotlib = False
def test_banddepth_BD2():
xx = np.arange(500) / 150.
y1 = 1 + 0.5 * np.sin(xx)
y2 = 0.3 + np.sin(xx + np.pi/6)
y3 = -0.5 + np.sin(xx + np.pi/6)
y4 = -1 + 0.3 * np.cos(xx + np.pi/6)
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='BD2')
expected_depth = [0.5, 5./6, 5./6, 0.5]
assert_almost_equal(depth, expected_depth)
## Plot to visualize why we expect this output
#fig = plt.figure()
#ax = fig.add_subplot(111)
#for ii, yy in enumerate([y1, y2, y3, y4]):
# ax.plot(xx, yy, label="y%s" % ii)
#ax.legend()
#plt.show()
def test_banddepth_MBD():
xx = np.arange(5001) / 5000.
y1 = np.zeros(xx.shape)
y2 = 2 * xx - 1
y3 = np.ones(xx.shape) * 0.5
y4 = np.ones(xx.shape) * -0.25
data = np.asarray([y1, y2, y3, y4])
depth = banddepth(data, method='MBD')
expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6]
assert_almost_equal(depth, expected_depth, decimal=4)
@dec.skipif(not have_matplotlib)
def test_fboxplot_rainbowplot():
# Test fboxplot and rainbowplot together, is much faster.
def harmfunc(t):
"""Test function, combination of a few harmonic terms."""
# Constant, 0 with p=0.9, 1 with p=1 - for creating outliers
ci = int(np.random.random() > 0.9)
a1i = np.random.random() * 0.05
a2i = np.random.random() * 0.05
b1i = (0.15 - 0.1) * np.random.random() + 0.1
b2i = (0.15 - 0.1) * np.random.random() + 0.1
func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \
ci * (b1i * np.sin(t) + b2i * np.cos(t))
return func
np.random.seed(1234567)
# Some basic test data, Model 6 from Sun and Genton.
t = np.linspace(0, 2 * np.pi, 250)
data = []
for ii in range(20):
data.append(harmfunc(t))
# fboxplot test
fig = plt.figure()
ax = fig.add_subplot(111)
_, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax)
ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2,
12, 17, 11, 14, 10, 0, 18])
assert_equal(ix_depth, ix_expected)
ix_expected2 = np.array([2, 11, 17, 18])
assert_equal(ix_outliers, ix_expected2)
plt.close(fig)
# rainbowplot test (re-uses depth variable)
xdata = np.arange(data[0].size)
fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow)
plt.close(fig)
| bsd-3-clause |
dgwakeman/mne-python | examples/decoding/plot_decoding_sensors.py | 6 | 3384 | """
==========================
Decoding sensor space data
==========================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG
data in sensor space. Here the classifier is applied to every time
point.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
plt.close('all')
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.Raw(raw_fname, preload=True)
raw.filter(2, None, method='iir') # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
picks = mne.pick_types(raw.info, meg='grad', eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
epochs_list = [epochs[k] for k in event_id]
mne.epochs.equalize_epoch_counts(epochs_list)
###############################################################################
# Decoding in sensor space using a linear SVM
n_times = len(epochs.times)
# Take only the data channels (here the gradiometers)
data_picks = mne.pick_types(epochs.info, meg=True, exclude='bads')
# Make arrays X and y such that :
# X is 3d with X.shape[0] is the total number of epochs to classify
# y is filled with integers coding for the class to predict
# We must have X.shape[0] equal to y.shape[0]
X = [e.get_data()[:, data_picks, :] for e in epochs_list]
y = [k * np.ones(len(this_X)) for k, this_X in enumerate(X)]
X = np.concatenate(X)
y = np.concatenate(y)
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import cross_val_score, ShuffleSplit # noqa
clf = SVC(C=1, kernel='linear')
# Define a monte-carlo cross-validation generator (reduce variance):
cv = ShuffleSplit(len(X), 10, test_size=0.2)
scores = np.empty(n_times)
std_scores = np.empty(n_times)
for t in range(n_times):
Xt = X[:, :, t]
# Standardize features
Xt -= Xt.mean(axis=0)
Xt /= Xt.std(axis=0)
# Run cross-validation
# Note : for sklearn the Xt matrix should be 2d (n_samples x n_features)
scores_t = cross_val_score(clf, Xt, y, cv=cv, n_jobs=1)
scores[t] = scores_t.mean()
std_scores[t] = scores_t.std()
times = 1e3 * epochs.times
scores *= 100 # make it percentage
std_scores *= 100
plt.plot(times, scores, label="Classif. score")
plt.axhline(50, color='k', linestyle='--', label="Chance level")
plt.axvline(0, color='r', label='stim onset')
plt.legend()
hyp_limits = (scores - std_scores, scores + std_scores)
plt.fill_between(times, hyp_limits[0], y2=hyp_limits[1], color='b', alpha=0.5)
plt.xlabel('Times (ms)')
plt.ylabel('CV classification score (% correct)')
plt.ylim([30, 100])
plt.title('Sensor space decoding')
plt.show()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/tests/sas/test_sas7bdat.py | 7 | 4973 | import pandas as pd
from pandas.compat import PY2
import pandas.util.testing as tm
import os
import io
import numpy as np
class TestSAS7BDAT(tm.TestCase):
def setUp(self):
self.dirpath = tm.get_data_path()
self.data = []
self.test_ix = [list(range(1, 16)), [16]]
for j in 1, 2:
fname = os.path.join(self.dirpath, "test_sas7bdat_%d.csv" % j)
df = pd.read_csv(fname)
epoch = pd.datetime(1960, 1, 1)
t1 = pd.to_timedelta(df["Column4"], unit='d')
df["Column4"] = epoch + t1
t2 = pd.to_timedelta(df["Column12"], unit='d')
df["Column12"] = epoch + t2
for k in range(df.shape[1]):
col = df.iloc[:, k]
if col.dtype == np.int64:
df.iloc[:, k] = df.iloc[:, k].astype(np.float64)
elif col.dtype == np.dtype('O'):
if PY2:
f = lambda x: (x.decode('utf-8') if
isinstance(x, str) else x)
df.iloc[:, k] = df.iloc[:, k].apply(f)
self.data.append(df)
def test_from_file(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
df = pd.read_sas(fname, encoding='utf-8')
tm.assert_frame_equal(df, df0)
def test_from_buffer(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
with open(fname, 'rb') as f:
byts = f.read()
buf = io.BytesIO(byts)
rdr = pd.read_sas(buf, format="sas7bdat",
iterator=True, encoding='utf-8')
df = rdr.read()
tm.assert_frame_equal(df, df0, check_exact=False)
def test_from_iterator(self):
for j in 0, 1:
df0 = self.data[j]
for k in self.test_ix[j]:
fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
rdr = pd.read_sas(fname, iterator=True, encoding='utf-8')
df = rdr.read(2)
tm.assert_frame_equal(df, df0.iloc[0:2, :])
df = rdr.read(3)
tm.assert_frame_equal(df, df0.iloc[2:5, :])
def test_iterator_loop(self):
# github #13654
for j in 0, 1:
for k in self.test_ix[j]:
for chunksize in 3, 5, 10, 11:
fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
rdr = pd.read_sas(fname, chunksize=10, encoding='utf-8')
y = 0
for x in rdr:
y += x.shape[0]
self.assertTrue(y == rdr.row_count)
def test_iterator_read_too_much(self):
# github #14734
k = self.test_ix[0][0]
fname = os.path.join(self.dirpath, "test%d.sas7bdat" % k)
rdr = pd.read_sas(fname, format="sas7bdat",
iterator=True, encoding='utf-8')
d1 = rdr.read(rdr.row_count + 20)
rdr = pd.read_sas(fname, iterator=True, encoding="utf-8")
d2 = rdr.read(rdr.row_count + 20)
tm.assert_frame_equal(d1, d2)
def test_encoding_options():
dirpath = tm.get_data_path()
fname = os.path.join(dirpath, "test1.sas7bdat")
df1 = pd.read_sas(fname)
df2 = pd.read_sas(fname, encoding='utf-8')
for col in df1.columns:
try:
df1[col] = df1[col].str.decode('utf-8')
except AttributeError:
pass
tm.assert_frame_equal(df1, df2)
from pandas.io.sas.sas7bdat import SAS7BDATReader
rdr = SAS7BDATReader(fname, convert_header_text=False)
df3 = rdr.read()
rdr.close()
for x, y in zip(df1.columns, df3.columns):
assert(x == y.decode())
def test_productsales():
dirpath = tm.get_data_path()
fname = os.path.join(dirpath, "productsales.sas7bdat")
df = pd.read_sas(fname, encoding='utf-8')
fname = os.path.join(dirpath, "productsales.csv")
df0 = pd.read_csv(fname)
vn = ["ACTUAL", "PREDICT", "QUARTER", "YEAR", "MONTH"]
df0[vn] = df0[vn].astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_12659():
dirpath = tm.get_data_path()
fname = os.path.join(dirpath, "test_12659.sas7bdat")
df = pd.read_sas(fname)
fname = os.path.join(dirpath, "test_12659.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0)
def test_airline():
dirpath = tm.get_data_path()
fname = os.path.join(dirpath, "airline.sas7bdat")
df = pd.read_sas(fname)
fname = os.path.join(dirpath, "airline.csv")
df0 = pd.read_csv(fname)
df0 = df0.astype(np.float64)
tm.assert_frame_equal(df, df0, check_exact=False)
| gpl-3.0 |
jblackburne/scikit-learn | examples/linear_model/plot_lasso_lars.py | 363 | 1080 | #!/usr/bin/env python
"""
=====================
Lasso path using LARS
=====================
Computes Lasso Path along the regularization parameter using the LARS
algorithm on the diabetes dataset. Each color represents a different
feature of the coefficient vector, and this is displayed as a function
of the regularization parameter.
"""
print(__doc__)
# Author: Fabian Pedregosa <[email protected]>
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
print("Computing regularization path using the LARS ...")
alphas, _, coefs = linear_model.lars_path(X, y, method='lasso', verbose=True)
xx = np.sum(np.abs(coefs.T), axis=1)
xx /= xx[-1]
plt.plot(xx, coefs.T)
ymin, ymax = plt.ylim()
plt.vlines(xx, ymin, ymax, linestyle='dashed')
plt.xlabel('|coef| / max|coef|')
plt.ylabel('Coefficients')
plt.title('LASSO Path')
plt.axis('tight')
plt.show()
| bsd-3-clause |
borismarin/genesis2.4gamma | Scripts/gpython-tools/weight_hist.py | 2 | 3321 | #!/usr/bin/env python
# weight_hist.py ver 1.0 - a command line utility to plot a wildcarded argument
# list of files containing a line of connection weight values as a histogram.
import sys, os
import matplotlib.pyplot as plt
import numpy as np
def print_help():
msg = """
weight_hist.py ver 1.0 - a utility to plot synaptic weight histograms
Usage: weight_hist.py filenames [filenames2] [filenames3] ...
weight_hist.py plots a histogram of synaptic weight values for the
connections to a synaptically activated channel, such as a GENESIS
synchan element. Here 'filenames' is a wildcarded list of filenames, e.g.
'weight_hist W128ex32inh-gmax0.2*.txt', or a single filename. Each
file should consist of a single line of synaptic weight values.
If there is more than one file, the data are plotted as separately
colored and labeled bars.
"""
print msg
def do_plot_files(filenames):
formats = ['r', 'g', 'b', 'k', 'm', 'c']
plotnum = 0
datasets = []
colors = []
for file in filenames:
# print file
format = formats[plotnum % len(formats)]
if os.path.exists(file):
fp = open(file, 'r')
values = np.loadtxt(fp, dtype='float')
print format, plotnum
nvalues = len(values)
print 'Number of values = %d' % nvalues
datasets.append(values)
colors.append(format)
ndatasets = len(datasets)
print 'Number of datasets = %d' % ndatasets
print 'Plotting %s' % file
plotnum = plotnum + 1
else:
print '*** Error: Incorrect file name or path specified ***'
sys.exit()
# I need to do better error handling!
# Now make the histogram with the data in values
n, bins, patches = axes.hist(datasets, bins=20, label=filenames, color=colors)
# print n, bins, patches
if __name__ == "__main__":
# Get the arguments (possibly wildcarded) into a list of filenames
filenames = sys.argv[1:]
if len(filenames) == 0:
print_help()
sys.exit()
print filenames
# Generate a RUNID from a string like "W128ex32inh_0.20.txt"
fn1 = sys.argv[1]
fnbase,ext = os.path.splitext(fn1)
if len(filenames) == 1:
runid = fnbase
else:
# get string following final '_' and remove 1 char suffix
# runid = 'series ' + fnbase.split('_')[-1][:-1]
runid = 'series ' + fnbase[:-1]
# print filenames, runid
# create the plot
fig = plt.figure()
fig.canvas.set_window_title('GENESIS Weight Histogram')
fig.suptitle('Histogram of connection weights')
# A pleasant light blue background
# figbg = (191, 209, 212)
figbg = (160, 216, 248)
fig.set_facecolor('#%02x%02x%02x' % figbg)
axes = fig.add_subplot(111)
do_plot_files(filenames)
# n, bins, patches = axes.hist([values[0], values[1], values[2]],
# bins=20, histtype='bar',color=format)
axes.set_title('Weights for ' + runid)
axes.set_xlabel('Synaptic Weights')
axes.set_ylabel('Number')
axes.legend(loc='upper center')
plt.show()
| gpl-2.0 |
liangz0707/scikit-learn | sklearn/utils/__init__.py | 79 | 14202 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable,
check_symmetric, DataConversionWarning)
from .class_weight import compute_class_weight, compute_sample_weight
from ..externals.joblib import cpu_count
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"check_random_state",
"compute_class_weight", "compute_sample_weight",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable',
"check_symmetric"]
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
try:
return X.iloc[indices]
except ValueError:
# Cython typed memoryviews internally used in pandas do not support
# readonly buffers.
warnings.warn("Copying input dataframe for slicing.",
DataConversionWarning)
return X.copy().iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
resampled_arrays : sequence of indexable data-structures
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
# convert sparse matrices to CSR for row-based indexing
arrays = [a.tocsr() if issparse(a) else a for a in arrays]
resampled_arrays = [safe_indexing(a, indices) for a in arrays]
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
*arrays : sequence of indexable data-structures
Indexable data-structures can be arrays, lists, dataframes or scipy
sparse matrices with consistent first dimension.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
shuffled_arrays : sequence of indexable data-structures
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
copy : boolean, optional, default True
Whether to create a copy of X and operate on it or to perform
inplace computation (default behaviour).
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], ensure_2d=False)
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1" % n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def _get_n_jobs(n_jobs):
"""Get number of jobs for the computation.
This function reimplements the logic of joblib to determine the actual
number of jobs depending on the cpu count. If -1 all CPUs are used.
If 1 is given, no parallel computing code is used at all, which is useful
for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
Thus for n_jobs = -2, all CPUs but one are used.
Parameters
----------
n_jobs : int
Number of jobs stated in joblib convention.
Returns
-------
n_jobs : int
The actual number of jobs as positive integer.
Examples
--------
>>> from sklearn.utils import _get_n_jobs
>>> _get_n_jobs(4)
4
>>> jobs = _get_n_jobs(-2)
>>> assert jobs == max(cpu_count() - 1, 1)
>>> _get_n_jobs(0)
Traceback (most recent call last):
...
ValueError: Parameter n_jobs == 0 has no meaning.
"""
if n_jobs < 0:
return max(cpu_count() + 1 + n_jobs, 1)
elif n_jobs == 0:
raise ValueError('Parameter n_jobs == 0 has no meaning.')
else:
return n_jobs
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality"""
| bsd-3-clause |
MostafaGazar/tensorflow | tensorflow/examples/skflow/text_classification_builtin_rnn_model.py | 11 | 2984 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_bool('test_with_fake_data', False,
'Test the example code with fake data.')
MAX_DOCUMENT_LENGTH = 10
EMBEDDING_SIZE = 50
n_words = 0
def input_op_fn(x):
"""Customized function to transform batched x into embeddings."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = learn.ops.categorical_variable(x, n_classes=n_words,
embedding_size=EMBEDDING_SIZE, name='words')
# Split into list of embedding per word, while removing doc length dim.
# word_list results to be a list of tensors [batch_size, EMBEDDING_SIZE].
word_list = tf.unpack(word_vectors, axis=1)
return word_list
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = learn.preprocessing.VocabularyProcessor(MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model: a single direction GRU with a single layer
classifier = learn.TensorFlowRNNClassifier(
rnn_size=EMBEDDING_SIZE, n_classes=15, cell_type='gru',
input_op_fn=input_op_fn, num_layers=1, bidirectional=False,
sequence_length=None, steps=1000, optimizer='Adam',
learning_rate=0.01, continue_training=True)
# Train and predict
classifier.fit(x_train, y_train, steps=100)
y_predicted = classifier.predict(x_test)
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
ruffoa/Qhacks2016 | Sigma-Securities/src/analysis/Analyser.py | 1 | 1691 | import csv
import sys
import numpy
from numpy import array
import decimal
import string
from sklearn import linear_model
from operator import itemgetter
from sklearn import tree
import random
from sklearn import cross_validation
rawList= []
positiveRawList = []
negativeRawList = []
element = []
positiveResponses=0
negativeResponses=0
f = open("alchemy.txt", 'rt')
try:
reader = csv.reader(f)
for row in reader:
element = []
element.append(float(row[0]))
element.append(float(row[1]))
element.append(float(row[2]))
element.append(float(row[3]))
element.append(float(row[4]))
rawList.append(element)
finally:
f.close()
a = array( rawList)
X = a[:, 0:2]
Y= a[:,4]
print X
print Y
lr = linear_model.LogisticRegression()
lr.fit(X,Y)
proba1 = lr.predict(X)
proba = lr.predict_proba(X)
print("lr output scores %s",proba1)
#lr_scores = cross_validation.cross_val_score(lr, X, Y, cv=10)
#print "lr accuracy: %0.2f (+/- %0.2f)" % (lr_scores.mean(), lr_scores.std() / 2)
dt = tree.DecisionTreeClassifier()
dt.fit(X,Y)
proba2 = lr.predict(X)
proba = dt.predict_proba(X)
print("dt output scores %s",proba2)
#dt_scores = cross_validation.cross_val_score(dt, X, Y, cv=10)
#print "dt accuracy: %0.2f (+/- %0.2f)" % (dt_scores.mean(), dt_scores.std() / 2)
'''
from sklearn.linear_model import SGDClassifier
clf =SGDClassifier(loss="modified_huber", penalty="l2")
clf.fit(X,Y)
proba = clf.predict_proba(X)
clf_scores = cross_validation.cross_val_score(clf, X, Y, cv=10)
print "SGDClassifier accuracy: %0.2f (+/- %0.2f)" % (clf_scores.mean(), clf_scores.std() / 2)
'''
| mit |
comp-imaging/ProxImaL | proximal/examples/test_conv.py | 1 | 2524 | # Proximal
import sys
sys.path.append('../../')
from proximal.utils.utils import *
from proximal.halide.halide import *
from proximal.lin_ops import *
import numpy as np
from scipy import signal
from scipy import ndimage
import matplotlib.pyplot as plt
############################################################
# Load image
np_img = get_test_image(2048)
print('Type ', np_img.dtype, 'Shape', np_img.shape)
imgplot = plt.imshow(np_img, interpolation='nearest', clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Numpy')
# Force recompile in local dir
tic()
Halide('A_conv', recompile=True)
Halide('At_conv', recompile=True) # Force recompile in local dir
print('Compilation took: {0:.1f}ms'.format(toc()))
# Test the runner
output = np.zeros_like(np_img)
K = get_kernel(15, len(np_img.shape))
tic()
Halide('A_conv').A_conv(np_img, K, output) # Call
print('Running took: {0:.1f}ms'.format(toc()))
plt.figure()
imgplot = plt.imshow(output, interpolation='nearest', clim=(0.0, 1.0))
imgplot.set_cmap('gray')
plt.title('Output from Halide')
tic()
output_scipy = signal.convolve2d(np_img, K, mode='same', boundary='wrap')
print('Running Scipy.convolve2d took: {0:.1f}ms'.format(toc()))
fn = conv(K, Variable(np_img.shape), implem='halide')
output_ref = np.zeros(np_img.shape, dtype=np.float32, order='F')
tic()
fn.forward([np_img], [output_ref])
print('Running conv fft convolution took: {0:.1f}ms'.format(toc()))
# Error
print('Maximum error {0}'.format(np.amax(np.abs(output_ref - output))))
plt.figure()
imgplot = plt.imshow(output_ref * 255,
interpolation='nearest',
clim=(0.0, 255.0))
imgplot.set_cmap('gray')
plt.title('Output from Scipy')
############################################################################
# Check correlation
############################################################################
output_corr = np.zeros_like(np_img)
tic()
Halide('At_conv').At_conv(np_img, K, output_corr) # Call
print('Running correlation took: {0:.1f}ms'.format(toc()))
#output_corr_ref = signal.convolve2d(np_img, np.flipud(np.fliplr(K)), mode='same', boundary='wrap')
output_corr_ref = ndimage.correlate(np_img, K, mode='wrap')
# Adjoint.
output_corr_ref = np.zeros(np_img.shape, dtype=np.float32, order='F')
tic()
fn.adjoint([np_img], [output_corr_ref])
print('Running transpose conv fft convolution took: {0:.1f}ms'.format(toc()))
# Error
print('Maximum error correlation {0}'.format(
np.amax(np.abs(output_corr_ref - output_corr))))
plt.show()
| mit |
Mushirahmed/gnuradio | gr-utils/src/python/plot_data.py | 17 | 5768 | #
# Copyright 2007,2008,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
class plot_data:
def __init__(self, datatype, filenames, options):
self.hfile = list()
self.legend_text = list()
for f in filenames:
self.hfile.append(open(f, "r"))
self.legend_text.append(f)
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.datatype = datatype
self.sizeof_data = datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 9), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file_pos = figtext(0.10, 0.88, "File Position: ", weight="heavy", size=self.text_size)
self.text_block = figtext(0.40, 0.88, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.88, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = self.sp_f.get_xlim()
self.manager = get_current_fig_manager()
connect('key_press_event', self.click)
show()
def get_data(self, hfile):
self.text_file_pos.set_text("File Position: %d" % (hfile.tell()//self.sizeof_data))
try:
f = scipy.fromfile(hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
else:
self.f = scipy.array(f)
self.time = scipy.array([i*(1/self.sample_rate) for i in range(len(self.f))])
def make_plots(self):
self.sp_f = self.fig.add_subplot(2,1,1, position=[0.075, 0.2, 0.875, 0.6])
self.sp_f.set_title(("Amplitude"), fontsize=self.title_font_size, fontweight="bold")
self.sp_f.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_f.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
self.plot_f = list()
maxval = -1e12
minval = 1e12
for hf in self.hfile:
# if specified on the command-line, set file pointer
hf.seek(self.sizeof_data*self.start, 1)
self.get_data(hf)
# Subplot for real and imaginary parts of signal
self.plot_f += plot(self.time, self.f, 'o-')
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
self.leg = self.sp_f.legend(self.plot_f, self.legend_text)
draw()
def update_plots(self):
maxval = -1e12
minval = 1e12
for hf,p in zip(self.hfile,self.plot_f):
self.get_data(hf)
p.set_data([self.time, self.f])
maxval = max(maxval, self.f.max())
minval = min(minval, self.f.min())
self.sp_f.set_ylim([1.5*minval, 1.5*maxval])
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
self.update_plots()
def step_backward(self):
for hf in self.hfile:
# Step back in file position
if(hf.tell() >= 2*self.sizeof_data*self.block_length ):
hf.seek(-2*self.sizeof_data*self.block_length, 1)
else:
hf.seek(-hf.tell(),1)
self.update_plots()
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
| gpl-3.0 |
rvraghav93/scikit-learn | examples/cluster/plot_ward_structured_vs_unstructured.py | 52 | 3435 | """
===========================================================
Hierarchical clustering: structured vs unstructured ward
===========================================================
Example builds a swiss roll dataset and runs
hierarchical clustering on their position.
For more information, see :ref:`hierarchical_clustering`.
In a first step, the hierarchical clustering is performed without connectivity
constraints on the structure and is solely based on distance, whereas in
a second step the clustering is restricted to the k-Nearest Neighbors
graph: it's a hierarchical clustering with structure prior.
Some of the clusters learned without connectivity constraints do not
respect the structure of the swiss roll and extend across different folds of
the manifolds. On the opposite, when opposing connectivity constraints,
the clusters form a nice parcellation of the swiss roll.
"""
# Authors : Vincent Michel, 2010
# Alexandre Gramfort, 2010
# Gael Varoquaux, 2010
# License: BSD 3 clause
print(__doc__)
import time as time
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.cluster import AgglomerativeClustering
from sklearn.datasets.samples_generator import make_swiss_roll
# #############################################################################
# Generate data (swiss roll dataset)
n_samples = 1500
noise = 0.05
X, _ = make_swiss_roll(n_samples, noise)
# Make it thinner
X[:, 1] *= .5
# #############################################################################
# Compute clustering
print("Compute unstructured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.scatter(X[label == l, 0], X[label == l, 1], X[label == l, 2],
color=plt.cm.jet(np.float(l) / np.max(label + 1)),
s=20, edgecolor='k')
plt.title('Without connectivity constraints (time %.2fs)' % elapsed_time)
# #############################################################################
# Define the structure A of the data. Here a 10 nearest neighbors
from sklearn.neighbors import kneighbors_graph
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# #############################################################################
# Compute clustering
print("Compute structured hierarchical clustering...")
st = time.time()
ward = AgglomerativeClustering(n_clusters=6, connectivity=connectivity,
linkage='ward').fit(X)
elapsed_time = time.time() - st
label = ward.labels_
print("Elapsed time: %.2fs" % elapsed_time)
print("Number of points: %i" % label.size)
# #############################################################################
# Plot result
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.view_init(7, -80)
for l in np.unique(label):
ax.scatter(X[label == l, 0], X[label == l, 1], X[label == l, 2],
color=plt.cm.jet(float(l) / np.max(label + 1)),
s=20, edgecolor='k')
plt.title('With connectivity constraints (time %.2fs)' % elapsed_time)
plt.show()
| bsd-3-clause |
daaugusto/ppi | script/plot.py | 1 | 6876 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
import subprocess
import sys
import argparse
################################################################################
# usage: plot.py [-h] -e EXE -f FRONT_FILE -d TEST_DATASET [-t TITLE]
# [-x XLABEL] [-y YLABEL] [-fbl FIRST_BAR_LABEL]
# [-sbl SECOND_BAR_LABEL] [-out OUT_FILE] [-dup] [-min MIN]
# [-max MAX]
#
# optional arguments:
# -h, --help show this help message and exit
# -e EXE, --exe EXE Executable filename
# -f FRONT_FILE, --front-file FRONT_FILE
# Pareto-front file
# -d TEST_DATASET, --test-dataset TEST_DATASET
# Test dataset
# -t TITLE, --title TITLE
# Title of the figure [default=none]
# -x XLABEL, --xlabel XLABEL
# Label of the x-axis [default='Complexity (nodes)']
# -y YLABEL, --ylabel YLABEL
# Label of the y-axis [default='Error']
# -fbl FIRST_BAR_LABEL, --first-bar-label FIRST_BAR_LABEL
# Label of the first bar [default='Training set']
# -sbl SECOND_BAR_LABEL, --second-bar-label SECOND_BAR_LABEL
# Label of the second bar [default='Test set']
# -out OUT_FILE, --out-file OUT_FILE
# Figure output filename [default=plot.pdf]
# -dup, --plot-duplicate
# Plot duplicate entries instead of taking their mean
# [default=false]
# -min MIN, --min MIN Ignore sizes less than the given minimum size
# [default=0]
# -max MAX, --max MAX Ignore sizes greater than the given maximum size
# [default=inf]
###
# Example:
#
# python script/plot.py -e build/speed -f build/speed.front -d build/test.csv
################################################################################
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--exe", required=True, help="Executable filename")
parser.add_argument("-f", "--front-file", required=True, help="Pareto-front file")
parser.add_argument("-d", "--test-dataset", required=True, help="Test dataset")
parser.add_argument("-t", "--title", default="", help="Title of the figure [default=none]")
parser.add_argument("-x", "--xlabel", default="Complexity (nodes)", help="Label of the x-axis [default='Complexity (nodes)']")
parser.add_argument("-y", "--ylabel", default="Error", help="Label of the y-axis [default='Error']")
parser.add_argument("-fbl", "--first-bar-label", default="Training set", help="Label of the first bar [default='Training set']")
parser.add_argument("-sbl", "--second-bar-label", default="Test set", help="Label of the second bar [default='Test set']")
parser.add_argument("-out", "--out-file", default="plot.pdf", help="Figure output filename [default=plot.pdf]")
parser.add_argument("-dup", "--plot-duplicate", action='store_true', default=False, help="Plot duplicate entries instead of taking their mean [default=false]")
parser.add_argument("-min", "--min", type=int, default=0, help="Ignore sizes less than the given minimum size [default=0]")
parser.add_argument("-max", "--max", type=int, default=sys.maxint, help="Ignore sizes greater than the given maximum size [default=inf]")
args = parser.parse_args()
try:
f = open(args.front_file,"r")
except IOError:
print >> sys.stderr, "Could not open file '" + args.front_file + "'"
sys.exit(1)
lines = f.readlines()
f.close()
size = []
tra_error = []
solution = []
for i in range(len(lines)):
sz = int(lines[i].split(';')[1])
if sz >= args.min and sz <= args.max:
size.append(sz)
tra_error.append(lines[i].split(';')[2])
solution.append(lines[i].split(';')[4])
size = np.array(size).astype(int)
tra_error = np.array(tra_error).astype(float)
try:
f = open(args.test_dataset,"r")
except IOError:
print >> sys.stderr, "Could not open file '" + args.test_dataset + "'"
sys.exit(1)
lines = f.readlines()
f.close()
tes_error = np.empty(len(solution));
print "# training error -> '%s'" % (args.first_bar_label)
print "# test error -> '%s'" % (args.second_bar_label)
print "ID size :: training error :: test error :: solution (encoded terminals)"
print "-- ---- -------------- -------------- ----------------------------"
for i in range(len(solution)):
tes_error[i] = subprocess.check_output(["./" + args.exe, "-d", args.test_dataset, "-sol", str(size[i]) + " " + str(solution[i].rstrip())])
print "[%-3d] %-4s :: %-14s :: %-14s :: %s" % (i+1, size[i], tra_error[i], tes_error[i], str(solution[i].rstrip()))
min_value = sys.float_info.max; max_value = 0.
fig = plt.figure(figsize=(18,4))
ax = fig.add_subplot(111)
if args.title:
ax.set_title(args.title, fontsize=14, fontweight='bold')
if args.xlabel:
ax.set_xlabel(args.xlabel, fontweight='bold', fontsize=12)
if args.ylabel:
ax.set_ylabel(args.ylabel, fontweight='bold', fontsize=12)
width = 0.4
vector = []; marks = []
for i in range(1,max(size[tes_error<1.e+30])+1):
if args.plot_duplicate:
if len(tra_error[(tes_error<1.e+30)]) > 0:
for complexity in tra_error[(tes_error<1.e+30) & (size==i)]:
marks.append(i)
vector.append(complexity)
else:
if len(tra_error[(tes_error<1.e+30) & (size==i)]) > 0:
marks.append(i)
vector.append(np.mean(tra_error[(tes_error<1.e+30) & (size==i)]))
ax.bar(range(1,len(marks)+1), vector, width, color='b', label=args.first_bar_label)
vector = []
for i in range(1,max(size[tes_error<1.e+30])+1):
if args.plot_duplicate:
if len(tes_error[(tes_error<1.e+30)]) > 0:
for complexity in tes_error[(tes_error<1.e+30) & (size==i)]:
vector.append(complexity)
else:
if len(tes_error[(tes_error<1.e+30) & (size==i)]) > 0:
vector.append(np.mean(tes_error[(tes_error<1.e+30) & (size==i)]))
ax.bar([x + width for x in range(1,len(marks)+1)], vector, width, color='r', label=args.second_bar_label)
if min(tra_error) < min_value: min_value = min(tra_error)
if max(tra_error) > max_value: max_value = max(tra_error)
if min(tes_error) < min_value: min_value = min(tes_error)
if max(tes_error[tes_error<1.e+30]) > max_value: max_value = max(tes_error[tes_error<1.e+30])
ax.set_ylim(min_value-0.03*min_value,max_value+0.03*max_value)
ax.set_xlim(width,len(marks)+4*width)
ax.set_xticks([x + width for x in range(1,len(marks)+1)])
xtickNames = ax.set_xticklabels(marks)
plt.setp(xtickNames, rotation=45, fontsize=8)
ax.legend(loc='upper right', scatterpoints=1, ncol=1, fontsize=12)
fig.savefig(args.out_file, dpi=300, facecolor='w', edgecolor='w', orientation='portrait', papertype='letter', bbox_inches='tight')
print "\nPlotted to file '%s'" % (args.out_file)
#plt.show()
| gpl-3.0 |
diana-hep/carl | tests/learning/test_parameterize.py | 1 | 2839 | # Carl is free software; you can redistribute it and/or modify it
# under the terms of the Revised BSD License; see LICENSE file for
# more details.
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_raises
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
import theano
from carl.distributions import Normal
from carl.learning import ParameterStacker
from carl.learning import ParameterizedClassifier
from carl.learning import ParameterizedRegressor
from carl.learning import make_parameterized_classification
def test_parameter_stacker():
mu = theano.shared(0)
sigma = theano.shared(1)
p = Normal(mu=mu, sigma=sigma)
X = p.rvs(10)
tf = ParameterStacker(params=[mu, sigma])
Xt = tf.transform(X)
assert Xt.shape == (10, 1+2)
assert_array_almost_equal(Xt[:, 1], np.zeros(10))
assert_array_almost_equal(Xt[:, 2], np.ones(10))
mu.set_value(1)
Xt = tf.transform(X)
assert_array_almost_equal(Xt[:, 1], np.ones(10))
def test_parameterized_classifier():
mu0 = theano.shared(0)
mu1 = theano.shared(1)
p0 = Normal(mu=mu0)
p1 = Normal(mu=mu1)
X, y = make_parameterized_classification(p0, p1, 100, [mu0, mu1])
clf = ParameterizedClassifier(DecisionTreeClassifier(), params=[mu0, mu1])
clf.fit(X, y)
assert clf.n_features_ == 1
assert_array_almost_equal(y, clf.predict(X))
def test_parameterized_regressor():
mu = theano.shared(0)
p = Normal(mu=mu)
X = p.rvs(100)
y = p.pdf(X).astype(np.float32)
tf = ParameterStacker(params=[mu])
clf = ParameterizedRegressor(DecisionTreeRegressor(), params=[mu])
clf.fit(tf.transform(X), y)
assert clf.n_features_ == 1
assert_array_almost_equal(y, clf.predict(tf.transform(X)), decimal=3)
def test_make_parameterized_classification():
# Simple case
mu0 = theano.shared(0.)
mu1 = theano.shared(1.)
p0 = Normal(mu=mu0)
p1 = Normal(mu=mu1)
X, y = make_parameterized_classification(p0, p1, 100, [mu0, mu1])
assert X.shape == (100, 1+2)
assert_array_almost_equal(X[:, 1], np.zeros(100))
assert_array_almost_equal(X[:, 2], np.ones(100))
# Grid of parameter values
X, y = make_parameterized_classification(p0, p1, 100,
[(mu0, [0, 0.5]),
(mu1, [0.5, 1.5])])
assert X.shape == (100, 1+2)
assert_array_equal(np.unique(X[:, 1]), [0, 0.5])
assert_array_equal(np.unique(X[:, 2]), [0.5, 1.5])
d = set()
for row in X[:, 1:]:
d.add(tuple(row))
assert_array_equal(np.array(sorted(d)), [[0., 0.5], [0., 1.5],
[0.5, 0.5], [0.5, 1.5]])
| bsd-3-clause |
OSSHealth/ghdata | augur/metrics/contributor.py | 1 | 23901 | #SPDX-License-Identifier: MIT
"""
Metrics that provides data about contributors & their associated activity
"""
import datetime
import sqlalchemy as s
import pandas as pd
from augur.util import register_metric
@register_metric()
def contributors(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""
Returns a timeseries of all the contributions to a project.
DataFrame has these columns:
date
commits
pull_requests
issues
commit_comments
pull_request_comments
issue_comments
total
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
# In this version, pull request, pr request comments,issue comments haven't be calculated
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if repo_id:
contributorsSQL = s.sql.text("""
SELECT id AS user_id,
SUM(commits) AS commits,
SUM(issues) AS issues,
SUM(commit_comments) AS commit_comments,
SUM(issue_comments) AS issue_comments,
SUM(pull_requests) AS pull_requests,
SUM(pull_request_comments) AS pull_request_comments,
SUM(a.commits + a.issues + a.commit_comments + a.issue_comments + a.pull_requests +
a.pull_request_comments) AS total,
a.repo_id, repo.repo_name
FROM (
(SELECT gh_user_id AS id,
0 AS commits,
COUNT(*) AS issues,
0 AS commit_comments,
0 AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments,
repo_id
FROM issues
WHERE repo_id = :repo_id
AND created_at BETWEEN :begin_date AND :end_date
AND gh_user_id IS NOT NULL
AND pull_request IS NULL
GROUP BY gh_user_id, repo_id)
UNION ALL
(SELECT cmt_ght_author_id AS id,
COUNT(*) AS commits,
0 AS issues,
0 AS commit_comments,
0 AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments,
repo_id
FROM commits
WHERE repo_id = :repo_id
AND cmt_ght_author_id IS NOT NULL
AND cmt_committer_date BETWEEN :begin_date AND :end_date
GROUP BY cmt_ght_author_id, repo_id)
UNION ALL
(SELECT cntrb_id AS id,
0 AS commits,
0 AS issues,
COUNT(*) AS commit_comments,
0 AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments,
repo_id
FROM commit_comment_ref,
commits,
message
WHERE commit_comment_ref.cmt_id = commit_comment_ref.cmt_id
AND message.msg_id = commit_comment_ref.msg_id
AND repo_id = :repo_id
AND created_at BETWEEN :begin_date AND :end_date
GROUP BY id, repo_id)
UNION ALL
(
SELECT message.cntrb_id AS id,
0 AS commits,
0 AS issues,
0 AS commit_comments,
count(*) AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments,
repo_id
FROM issues,
issue_message_ref,
message
WHERE repo_id = :repo_id
AND gh_user_id IS NOT NULL
AND issues.issue_id = issue_message_ref.issue_id
AND issue_message_ref.msg_id = message.msg_id
AND issues.pull_request IS NULL
AND created_at BETWEEN :begin_date AND :end_date
GROUP BY id, repo_id
)
) a, repo
WHERE a.repo_id = repo.repo_id
GROUP BY a.id, a.repo_id, repo_name
ORDER BY total DESC
""")
results = pd.read_sql(contributorsSQL, self.database, params={'repo_id': repo_id, 'period': period,
'begin_date': begin_date, 'end_date': end_date})
else:
contributorsSQL = s.sql.text("""
SELECT id AS user_id,
SUM(commits) AS commits,
SUM(issues) AS issues,
SUM(commit_comments) AS commit_comments,
SUM(issue_comments) AS issue_comments,
SUM(pull_requests) AS pull_requests,
SUM(pull_request_comments) AS pull_request_comments,
SUM(a.commits + a.issues + a.commit_comments + a.issue_comments + a.pull_requests +
a.pull_request_comments) AS total, a.repo_id, repo_name
FROM (
(SELECT gh_user_id AS id,
repo_id,
0 AS commits,
COUNT(*) AS issues,
0 AS commit_comments,
0 AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments
FROM issues
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND created_at BETWEEN :begin_date AND :end_date
AND gh_user_id IS NOT NULL
AND pull_request IS NULL
GROUP BY gh_user_id, repo_id)
UNION ALL
(SELECT cmt_ght_author_id AS id,
repo_id,
COUNT(*) AS commits,
0 AS issues,
0 AS commit_comments,
0 AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments
FROM commits
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND cmt_ght_author_id IS NOT NULL
AND cmt_committer_date BETWEEN :begin_date AND :end_date
GROUP BY cmt_ght_author_id, repo_id)
UNION ALL
(SELECT cntrb_id AS id,
repo_id,
0 AS commits,
0 AS issues,
COUNT(*) AS commit_comments,
0 AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments
FROM commit_comment_ref,
commits,
message
WHERE commit_comment_ref.cmt_id = commit_comment_ref.cmt_id
AND message.msg_id = commit_comment_ref.msg_id
AND repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND created_at BETWEEN :begin_date AND :end_date
GROUP BY id, repo_id)
UNION ALL
(
SELECT message.cntrb_id AS id,
repo_id,
0 AS commits,
0 AS issues,
0 AS commit_comments,
count(*) AS issue_comments,
0 AS pull_requests,
0 AS pull_request_comments
FROM issues,
issue_message_ref,
message
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND gh_user_id IS NOT NULL
AND issues.issue_id = issue_message_ref.issue_id
AND issue_message_ref.msg_id = message.msg_id
AND issues.pull_request IS NULL
AND created_at BETWEEN :begin_date AND :end_date
GROUP BY id, repo_id
)
) a, repo
WHERE a.repo_id = repo.repo_id
GROUP BY a.id, a.repo_id, repo_name
ORDER BY total DESC
""")
results = pd.read_sql(contributorsSQL, self.database, params={'repo_group_id': repo_group_id, 'period': period,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def contributors_new(self, repo_group_id, repo_id=None, period='day', begin_date=None, end_date=None):
"""
Returns a timeseries of new contributions to a project.
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
:param period: To set the periodicity to 'day', 'week', 'month' or 'year', defaults to 'day'
:param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
:param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
# In this version, pull request, pr request comments,issue comments haven't be calculated
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if repo_id:
contributorsNewSQL = s.sql.text("""
SELECT date_trunc(:period, b.created_at::DATE) AS date, COUNT(id) AS new_contributors, repo.repo_id, repo_name
FROM (
SELECT id as id, MIN(created_at) AS created_at, a.repo_id
FROM (
(SELECT gh_user_id AS id, MIN(created_at) AS created_at, repo_id
FROM issues
WHERE repo_id = :repo_id
AND created_at BETWEEN :begin_date AND :end_date
AND gh_user_id IS NOT NULL
AND pull_request IS NULL
GROUP BY gh_user_id, repo_id)
UNION ALL
(SELECT cmt_ght_author_id AS id,
MIN(TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD')) AS created_at,
repo_id
FROM commits
WHERE repo_id = :repo_id
AND cmt_ght_author_id IS NOT NULL
AND TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD') BETWEEN :begin_date AND :end_date
GROUP BY cmt_ght_author_id, repo_id)
UNION ALL
(SELECT cntrb_id as id, MIN(created_at) AS created_at, commits.repo_id
FROM commit_comment_ref,
commits,
message
where commits.cmt_id = commit_comment_ref.cmt_id
and commits.repo_id = :repo_id
and commit_comment_ref.msg_id = message.msg_id
group by id, commits.repo_id)
UNION ALL
(SELECT issue_events.cntrb_id AS id, MIN(issue_events.created_at) AS created_at, repo_id
FROM issue_events, issues
WHERE issues.repo_id = :repo_id
AND issues.issue_id = issue_events.issue_id
AND issues.pull_request IS NULL
AND issue_events.created_at BETWEEN :begin_date AND :end_date
AND issue_events.cntrb_id IS NOT NULL
AND action = 'closed'
GROUP BY issue_events.cntrb_id, repo_id)
) a
GROUP BY a.id, a.repo_id) b, repo
WHERE repo.repo_id = b.repo_id
GROUP BY date, repo.repo_id, repo_name
""")
results = pd.read_sql(contributorsNewSQL, self.database, params={'repo_id': repo_id, 'period': period,
'begin_date': begin_date, 'end_date': end_date})
else:
contributorsNewSQL = s.sql.text("""
SELECT date_trunc(:period, b.created_at::DATE) AS date, COUNT(id) AS new_contributors, repo.repo_id, repo_name
FROM (
SELECT id as id, MIN(created_at) AS created_at, a.repo_id
FROM (
(SELECT gh_user_id AS id, MIN(created_at) AS created_at, repo_id
FROM issues
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND created_at BETWEEN :begin_date AND :end_date
AND gh_user_id IS NOT NULL
AND pull_request IS NULL
GROUP BY gh_user_id, repo_id)
UNION ALL
(SELECT cmt_ght_author_id AS id,
MIN(TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD')) AS created_at,
repo_id
FROM commits
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND cmt_ght_author_id IS NOT NULL
AND TO_TIMESTAMP(cmt_author_date, 'YYYY-MM-DD') BETWEEN :begin_date AND :end_date
GROUP BY cmt_ght_author_id, repo_id)
UNION ALL
(SELECT cntrb_id as id, MIN(created_at) AS created_at, commits.repo_id
FROM commit_comment_ref,
commits,
message
where commits.cmt_id = commit_comment_ref.cmt_id
and commits.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
and commit_comment_ref.msg_id = message.msg_id
group by id, commits.repo_id)
UNION ALL
(SELECT issue_events.cntrb_id AS id, MIN(issue_events.created_at) AS created_at, repo_id
FROM issue_events, issues
WHERE issues.repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
AND issues.issue_id = issue_events.issue_id
AND issues.pull_request IS NULL
AND issue_events.created_at BETWEEN :begin_date AND :end_date
AND issue_events.cntrb_id IS NOT NULL
AND action = 'closed'
GROUP BY issue_events.cntrb_id, repo_id)
) a
GROUP BY a.id, a.repo_id) b, repo
WHERE repo.repo_id = b.repo_id
GROUP BY date, repo.repo_id, repo_name
""")
results = pd.read_sql(contributorsNewSQL, self.database, params={'repo_group_id': repo_group_id, 'period': period,
'begin_date': begin_date, 'end_date': end_date})
return results
@register_metric()
def lines_changed_by_author(self, repo_group_id, repo_id=None):
"""
Returns number of lines changed per author per day
:param repo_url: the repository's URL
"""
if repo_id:
linesChangedByAuthorSQL = s.sql.text("""
SELECT cmt_author_email, date_trunc('week', cmt_author_date::date) as cmt_author_date, cmt_author_affiliation as affiliation,
SUM(cmt_added) as additions, SUM(cmt_removed) as deletions, SUM(cmt_whitespace) as whitespace, repo_name
FROM commits JOIN repo ON commits.repo_id = repo.repo_id
WHERE commits.repo_id = :repo_id
GROUP BY commits.repo_id, date_trunc('week', cmt_author_date::date), cmt_author_affiliation, cmt_author_email, repo_name
ORDER BY date_trunc('week', cmt_author_date::date) ASC;
""")
results = pd.read_sql(linesChangedByAuthorSQL, self.database, params={"repo_id": repo_id})
return results
else:
linesChangedByAuthorSQL = s.sql.text("""
SELECT cmt_author_email, date_trunc('week', cmt_author_date::date) as cmt_author_date, cmt_author_affiliation as affiliation,
SUM(cmt_added) as additions, SUM(cmt_removed) as deletions, SUM(cmt_whitespace) as whitespace
FROM commits
WHERE repo_id in (SELECT repo_id FROM repo WHERE repo_group_id=:repo_group_id)
GROUP BY repo_id, date_trunc('week', cmt_author_date::date), cmt_author_affiliation, cmt_author_email
ORDER BY date_trunc('week', cmt_author_date::date) ASC;
""")
results = pd.read_sql(linesChangedByAuthorSQL, self.database, params={"repo_group_id": repo_group_id})
return results
@register_metric()
def contributors_code_development(self, repo_group_id, repo_id=None, period='all', begin_date=None, end_date=None):
"""
Returns a timeseries of all the contributions to a project.
DataFrame has these columns:
date
commits
:param repo_id: The repository's id
:param repo_group_id: The repository's group id
----- :param period: To set the periodicity to 'all', day', 'week', 'month' or 'year', defaults to 'all'
----- :param begin_date: Specifies the begin date, defaults to '1970-1-1 00:00:00'
----- :param end_date: Specifies the end date, defaults to datetime.now()
:return: DataFrame of persons/period
"""
# In this version, pull request, pr request comments,issue comments haven't be calculated
if not begin_date:
begin_date = '1970-1-1 00:00:01'
if not end_date:
end_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if repo_id:
contributorsSQL = s.sql.text("""
SELECT
email AS email,
SUM(commits) AS commits,
SUM(lines_added) AS lines_added,
a.repo_id, repo.repo_name
FROM (
(
SELECT repo_id, email, SUM(patches)::int as commits, 0 as lines_added
FROM
(SELECT repo_id, email, patches
FROM dm_repo_annual
WHERE repo_id = :repo_id
ORDER BY patches DESC) a
GROUP BY email, a.repo_id
)
UNION ALL
(
SELECT repo_id, cmt_author_email as email, 0 as commits, SUM(cmt_added) as lines_added
-- cmt_author_affiliation as affiliation,
-- SUM(cmt_added) as additions, SUM(cmt_removed) as deletions, SUM(cmt_whitespace) as whitespace,
FROM commits
WHERE commits.repo_id = :repo_id
GROUP BY commits.repo_id, cmt_author_date, cmt_author_affiliation, cmt_author_email
ORDER BY cmt_author_date ASC
)
) a, repo
WHERE a.repo_id = repo.repo_id
GROUP BY a.email, a.repo_id, repo_name
""")
results = pd.read_sql(contributorsSQL, self.database, params={'repo_id': repo_id, 'period': period,
'begin_date': begin_date, 'end_date': end_date})
else:
contributorsSQL = s.sql.text("""
SELECT
email AS email,
SUM(commits) AS commits,
SUM(lines_added) AS lines_added,
a.repo_id, repo.repo_name
FROM (
(
SELECT repo_id, email, SUM(patches)::INT AS commits, 0 AS lines_added
FROM
(SELECT dm_repo_annual.repo_id, email, patches
FROM dm_repo_annual JOIN repo ON repo.repo_id = dm_repo_annual.repo_id
WHERE repo_group_id = :repo_group_id
ORDER BY patches DESC) a
GROUP BY email, a.repo_id
)
UNION ALL
(
SELECT commits.repo_id, cmt_author_email AS email, 0 AS commits, SUM(cmt_added) AS lines_added
-- cmt_author_affiliation as affiliation,
-- SUM(cmt_added) as additions, SUM(cmt_removed) as deletions, SUM(cmt_whitespace) as whitespace,
FROM commits JOIN repo ON repo.repo_id = commits.repo_id
WHERE repo_group_id = :repo_group_id
GROUP BY commits.repo_id, cmt_author_date, cmt_author_affiliation, cmt_author_email
ORDER BY cmt_author_date ASC
)
) a, repo
WHERE a.repo_id = repo.repo_id
GROUP BY a.email, a.repo_id, repo_name
ORDER BY commits desc, email
""")
results = pd.read_sql(contributorsSQL, self.database, params={'repo_group_id': repo_group_id, 'period': period,
'begin_date': begin_date, 'end_date': end_date})
return results
| mit |
tomsilver/nupic | examples/opf/clients/hotgym/anomaly/one_gym/run.py | 15 | 4940 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Groups together code used for creating a NuPIC model and dealing with IO.
(This is a component of the One Hot Gym Anomaly Tutorial.)
"""
import importlib
import sys
import csv
import datetime
from nupic.data.inference_shifter import InferenceShifter
from nupic.frameworks.opf.modelfactory import ModelFactory
import nupic_anomaly_output
DESCRIPTION = (
"Starts a NuPIC model from the model params returned by the swarm\n"
"and pushes each line of input from the gym into the model. Results\n"
"are written to an output file (default) or plotted dynamically if\n"
"the --plot option is specified.\n"
)
GYM_NAME = "rec-center-hourly"
DATA_DIR = "."
MODEL_PARAMS_DIR = "./model_params"
# '7/2/10 0:00'
DATE_FORMAT = "%m/%d/%y %H:%M"
def createModel(modelParams):
"""
Given a model params dictionary, create a CLA Model. Automatically enables
inference for kw_energy_consumption.
:param modelParams: Model params dict
:return: OPF Model object
"""
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": "kw_energy_consumption"})
return model
def getModelParamsFromName(gymName):
"""
Given a gym name, assumes a matching model params python module exists within
the model_params directory and attempts to import it.
:param gymName: Gym name, used to guess the model params module name.
:return: OPF Model params dictionary
"""
importName = "model_params.%s_model_params" % (
gymName.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% gymName)
return importedModelParams
def runIoThroughNupic(inputData, model, gymName, plot):
"""
Handles looping over the input data and passing each row into the given model
object, as well as extracting the result object and passing it into an output
handler.
:param inputData: file path to input data CSV
:param model: OPF Model object
:param gymName: Gym name, used for output handler naming
:param plot: Whether to use matplotlib or not. If false, uses file output.
"""
inputFile = open(inputData, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
shifter = InferenceShifter()
if plot:
output = nupic_anomaly_output.NuPICPlotOutput(gymName)
else:
output = nupic_anomaly_output.NuPICFileOutput(gymName)
counter = 0
for row in csvReader:
counter += 1
if (counter % 100 == 0):
print "Read %i lines..." % counter
timestamp = datetime.datetime.strptime(row[0], DATE_FORMAT)
consumption = float(row[1])
result = model.run({
"timestamp": timestamp,
"kw_energy_consumption": consumption
})
if plot:
result = shifter.shift(result)
prediction = result.inferences["multiStepBestPredictions"][1]
anomalyScore = result.inferences["anomalyScore"]
output.write(timestamp, consumption, prediction, anomalyScore)
inputFile.close()
output.close()
def runModel(gymName, plot=False):
"""
Assumes the gynName corresponds to both a like-named model_params file in the
model_params directory, and that the data exists in a like-named CSV file in
the current directory.
:param gymName: Important for finding model params and input CSV file
:param plot: Plot in matplotlib? Don't use this unless matplotlib is
installed.
"""
print "Creating model from %s..." % gymName
model = createModel(getModelParamsFromName(gymName))
inputData = "%s/%s.csv" % (DATA_DIR, gymName.replace(" ", "_"))
runIoThroughNupic(inputData, model, gymName, plot)
if __name__ == "__main__":
print DESCRIPTION
plot = False
args = sys.argv[1:]
if "--plot" in args:
plot = True
runModel(GYM_NAME, plot=plot) | gpl-3.0 |
laurent-george/bokeh | examples/plotting/server/glucose.py | 18 | 1612 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import pandas as pd
from bokeh.sampledata.glucose import data
from bokeh.plotting import figure, show, output_server, vplot
output_server("glucose")
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p1 = figure(x_axis_type="datetime", tools=TOOLS)
p1.line(data.index, data['glucose'], color='red', legend='glucose')
p1.line(data.index, data['isig'], color='blue', legend='isig')
p1.title = "Glucose Measurements"
p1.xaxis.axis_label = 'Date'
p1.yaxis.axis_label = 'Value'
day = data.ix['2010-10-06']
highs = day[day['glucose'] > 180]
lows = day[day['glucose'] < 80]
p2 = figure(x_axis_type="datetime", tools=TOOLS)
p2.line(day.index.to_series(), day['glucose'],
line_color="gray", line_dash="4 4", line_width=1, legend="glucose")
p2.circle(highs.index, highs['glucose'], size=6, color='tomato', legend="high")
p2.circle(lows.index, lows['glucose'], size=6, color='navy', legend="low")
p2.title = "Glucose Range"
p2.xgrid[0].grid_line_color=None
p2.ygrid[0].grid_line_alpha=0.5
p2.xaxis.axis_label = 'Time'
p2.yaxis.axis_label = 'Value'
data['inrange'] = (data['glucose'] < 180) & (data['glucose'] > 80)
window = 30.5*288 #288 is average number of samples in a month
inrange = pd.rolling_sum(data.inrange, window)
inrange = inrange.dropna()
inrange = inrange/float(window)
p3 = figure(x_axis_type="datetime", tools=TOOLS)
p3.line(inrange.index, inrange, line_color="navy")
p3.title = "Glucose In-Range Rolling Sum"
p3.xaxis.axis_label = 'Date'
p3.yaxis.axis_label = 'Proportion In-Range'
show(vplot(p1,p2,p3))
| bsd-3-clause |
ElDeveloper/scikit-learn | benchmarks/bench_plot_omp_lars.py | 266 | 4447 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import pylab as pl
fig = pl.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i)
vmax = max(1 - timings.min(), -1 + timings.max())
pl.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
pl.xlabel('n_samples')
pl.ylabel('n_features')
pl.title(label)
pl.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = pl.axes([0.1, 0.08, 0.8, 0.06])
pl.colorbar(cax=ax, orientation='horizontal')
pl.show()
| bsd-3-clause |
gfyoung/pandas | pandas/tests/io/parser/test_quoting.py | 6 | 5084 | """
Tests that quoting specifications are properly handled
during parsing for all of the parsers defined in parsers.py
"""
import csv
from io import StringIO
import pytest
from pandas.errors import ParserError
from pandas import DataFrame
import pandas._testing as tm
@pytest.mark.parametrize(
"kwargs,msg",
[
({"quotechar": "foo"}, '"quotechar" must be a(n)? 1-character string'),
(
{"quotechar": None, "quoting": csv.QUOTE_MINIMAL},
"quotechar must be set if quoting enabled",
),
({"quotechar": 2}, '"quotechar" must be string, not int'),
],
)
def test_bad_quote_char(all_parsers, kwargs, msg):
data = "1,2,3"
parser = all_parsers
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
@pytest.mark.parametrize(
"quoting,msg",
[
("foo", '"quoting" must be an integer'),
(5, 'bad "quoting" value'), # quoting must be in the range [0, 3]
],
)
def test_bad_quoting(all_parsers, quoting, msg):
data = "1,2,3"
parser = all_parsers
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), quoting=quoting)
def test_quote_char_basic(all_parsers):
parser = all_parsers
data = 'a,b,c\n1,2,"cat"'
expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data), quotechar='"')
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quote_char", ["~", "*", "%", "$", "@", "P"])
def test_quote_char_various(all_parsers, quote_char):
parser = all_parsers
expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])
data = 'a,b,c\n1,2,"cat"'
new_data = data.replace('"', quote_char)
result = parser.read_csv(StringIO(new_data), quotechar=quote_char)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])
@pytest.mark.parametrize("quote_char", ["", None])
def test_null_quote_char(all_parsers, quoting, quote_char):
kwargs = {"quotechar": quote_char, "quoting": quoting}
data = "a,b,c\n1,2,3"
parser = all_parsers
if quoting != csv.QUOTE_NONE:
# Sanity checking.
msg = "quotechar must be set if quoting enabled"
with pytest.raises(TypeError, match=msg):
parser.read_csv(StringIO(data), **kwargs)
else:
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,exp_data",
[
({}, [[1, 2, "foo"]]), # Test default.
# QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.
({"quotechar": '"', "quoting": csv.QUOTE_MINIMAL}, [[1, 2, "foo"]]),
# QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.
({"quotechar": '"', "quoting": csv.QUOTE_ALL}, [[1, 2, "foo"]]),
# QUOTE_NONE tells the reader to do no special handling
# of quote characters and leave them alone.
({"quotechar": '"', "quoting": csv.QUOTE_NONE}, [[1, 2, '"foo"']]),
# QUOTE_NONNUMERIC tells the reader to cast
# all non-quoted fields to float
({"quotechar": '"', "quoting": csv.QUOTE_NONNUMERIC}, [[1.0, 2.0, "foo"]]),
],
)
def test_quoting_various(all_parsers, kwargs, exp_data):
data = '1,2,"foo"'
parser = all_parsers
columns = ["a", "b", "c"]
result = parser.read_csv(StringIO(data), names=columns, **kwargs)
expected = DataFrame(exp_data, columns=columns)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"doublequote,exp_data", [(True, [[3, '4 " 5']]), (False, [[3, '4 " 5"']])]
)
def test_double_quote(all_parsers, doublequote, exp_data):
parser = all_parsers
data = 'a,b\n3,"4 "" 5"'
result = parser.read_csv(StringIO(data), quotechar='"', doublequote=doublequote)
expected = DataFrame(exp_data, columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("quotechar", ['"', "\u0001"])
def test_quotechar_unicode(all_parsers, quotechar):
# see gh-14477
data = "a\n1"
parser = all_parsers
expected = DataFrame({"a": [1]})
result = parser.read_csv(StringIO(data), quotechar=quotechar)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("balanced", [True, False])
def test_unbalanced_quoting(all_parsers, balanced):
# see gh-22789.
parser = all_parsers
data = 'a,b,c\n1,2,"3'
if balanced:
# Re-balance the quoting and read in without errors.
expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])
result = parser.read_csv(StringIO(data + '"'))
tm.assert_frame_equal(result, expected)
else:
msg = (
"EOF inside string starting at row 1"
if parser.engine == "c"
else "unexpected end of data"
)
with pytest.raises(ParserError, match=msg):
parser.read_csv(StringIO(data))
| bsd-3-clause |
deeplook/bokeh | bokeh/charts/builder/horizon_builder.py | 43 | 12508 | """This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Horizon class which lets you build your Horizon charts just
passing the arguments to the Chart class and calling the proper functions.
"""
from __future__ import absolute_import, division
import math
from six import string_types
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, Range1d, DataRange1d, FactorRange, GlyphRenderer, CategoricalAxis
from ...models.glyphs import Patches
from ...properties import Any, Color, Int
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def Horizon(values, index=None, num_folds=3, pos_color='#006400',
neg_color='#6495ed', xscale='datetime', xgrid=False, ygrid=False,
**kws):
""" Create a Horizon chart using :class:`HorizonBuilder <bokeh.charts.builder.horizon_builder.HorizonBuilder>`
render the geometry from values, index and num_folds.
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
num_folds (int, optional): The number of folds stacked on top
of each other. (default: 3)
pos_color (color, optional): The color of the positive folds.
(default: "#006400")
neg_color (color, optional): The color of the negative folds.
(default: "#6495ed")
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
import datetime
from collections import OrderedDict
from bokeh.charts import Horizon, output_file, show
now = datetime.datetime.now()
dts = [now+datetime.timedelta(seconds=i) for i in range(10)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26, 27, 27, 28, 26, 20]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126, 122, 95, 90, 110, 112]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26, 25, 26, 45, 26, 30]
hz = Horizon(xyvalues, index='Date', title="Horizon Example", ylabel='Sample Data', xlabel='')
output_file('horizon.html')
show(hz)
"""
tools = kws.get('tools', True)
if tools == True:
tools = "save,resize,reset"
elif isinstance(tools, string_types):
tools = tools.replace('pan', '')
tools = tools.replace('wheel_zoom', '')
tools = tools.replace('box_zoom', '')
tools = tools.replace(',,', ',')
kws['tools'] = tools
chart = create_and_build(
HorizonBuilder, values, index=index, num_folds=num_folds, pos_color=pos_color,
neg_color=neg_color, xscale=xscale, xgrid=xgrid, ygrid=ygrid, **kws
)
# Hide numerical axis
chart.left[0].visible = False
# Add the series names to the y axis
chart.extra_y_ranges = {"series": FactorRange(factors=chart._builders[0]._series)}
chart.add_layout(CategoricalAxis(y_range_name="series"), 'left')
return chart
class HorizonBuilder(Builder):
"""This is the Horizon class and it is in charge of plotting
Horizon charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, separate the data into
a number of folds which stack on top of each others.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
neg_color = Color("#6495ed", help="""
The color of the negative folds. (default: "#6495ed")
""")
num_folds = Int(3, help="""
The number of folds stacked on top of each other. (default: 3)
""")
pos_color = Color("#006400", help="""
The color of the positive folds. (default: "#006400")
""")
def __init__(self, values, **kws):
"""
Args:
values (iterable): iterable 2d representing the data series
values matrix.
index (str|1d iterable, optional): can be used to specify a
common custom index for all data series as follows:
- As a 1d iterable of any sort (of datetime values)
that will be used as series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame). The values
must be datetime values.
legend (str, optional): the legend of your chart. The legend
content is inferred from incoming input.It can be
``top_left``, ``top_right``, ``bottom_left``,
``bottom_right``. ``top_right`` is set if you set it
as True. Defaults to None.
palette(list, optional): a list containing the colormap as
hex values.
num_folds (int, optional):
pos_color (hex color string, optional): t
neg_color (hex color string, optional): the color of
the negative folds
(default: #6495ed)
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ColumnDataSource in each chart inherited class.
Needed for _set_And_get method.
attr (list): to be filled with the new attributes created after
loading the data dict.
Needed for _set_And_get method.
"""
super(HorizonBuilder, self).__init__(values, **kws)
self._fold_names = []
self._source = None
self._series = []
self._fold_height = {}
self._max_y = 0
def fold_coordinates(self, y, fold_no, fold_height, y_origin=0, graph_ratio=1):
""" Function that calculate the coordinates for a value given a fold
"""
height = fold_no * fold_height
quotient, remainder = divmod(abs(y), float(height))
v = fold_height
# quotient would be 0 if the coordinate is represented in this fold
# layer
if math.floor(quotient) == 0:
v = 0
if remainder >= height - fold_height:
v = remainder - height + fold_height
v = v * graph_ratio
# Return tuple of the positive and negative relevant position of
# the coordinate against the provided fold layer
if y > 0:
return (v + y_origin, fold_height * graph_ratio + y_origin)
else:
return (y_origin, fold_height * graph_ratio - v + y_origin)
def pad_list(self, l, padded_value=None):
""" Function that insert padded values at the start and end of
the list (l). If padded_value not provided, then duplicate the
values next to each end of the list
"""
if len(l) > 0:
l.insert(0, l[0] if padded_value is None else padded_value)
l.append(l[-1] if padded_value is None else padded_value)
return l
def _process_data(self):
"""Use x/y data from the horizon values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the multiple area glyphes inside the ``_yield_renderers`` method.
"""
for col in self._values.keys():
if isinstance(self.index, string_types) and col == self.index:
continue
self._series.append(col)
self._max_y = max(max(self._values[col]), self._max_y)
v_index = [x for x in self._values_index]
self.set_and_get("x_", col, self.pad_list(v_index))
self._fold_height = self._max_y / self.num_folds
self._graph_ratio = self.num_folds / len(self._series)
fill_alpha = []
fill_color = []
for serie_no, serie in enumerate(self._series):
self.set_and_get('y_', serie, self._values[serie])
y_origin = serie_no * self._max_y / len(self._series)
for fold_itr in range(1, self.num_folds + 1):
layers_datapoints = [self.fold_coordinates(
x, fold_itr, self._fold_height, y_origin, self._graph_ratio) for x in self._values[serie]]
pos_points, neg_points = map(list, zip(*(layers_datapoints)))
alpha = 1.0 * (abs(fold_itr)) / self.num_folds
# Y coordinates above 0
pos_points = self.pad_list(pos_points, y_origin)
self.set_and_get("y_fold%s_" % fold_itr, serie, pos_points)
self._fold_names.append("y_fold%s_%s" % (fold_itr, serie))
fill_color.append(self.pos_color)
fill_alpha.append(alpha)
# Y coordinates below 0
neg_points = self.pad_list(
neg_points, self._fold_height * self._graph_ratio + y_origin)
self.set_and_get("y_fold-%s_" % fold_itr, serie, neg_points)
self._fold_names.append("y_fold-%s_%s" % (fold_itr, serie))
fill_color.append(self.neg_color)
fill_alpha.append(alpha)
# Groups shown in the legend will only appear once
if serie_no == 0:
self._groups.append(str(self._fold_height * fold_itr))
self._groups.append(str(self._fold_height * -fold_itr))
self.set_and_get('fill_', 'alpha', fill_alpha)
self.set_and_get('fill_', 'color', fill_color)
self.set_and_get('x_', 'all', [self._data[
'x_%s' % serie] for serie in self._series for y in range(self.num_folds * 2)])
self.set_and_get(
'y_', 'all', [self._data[f_name] for f_name in self._fold_names])
def _set_sources(self):
"""Push the Horizon data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d(range_padding=0)
self.y_range = Range1d(start=0, end=self._max_y)
def _yield_renderers(self):
"""Use the patch glyphs to connect the xy points in the time series.
It requires the positive and negative layers
Takes reference points from the data loaded at the ColumnDataSource.
"""
patches = Patches(
fill_color='fill_color', fill_alpha='fill_alpha', xs='x_all', ys='y_all')
renderer = GlyphRenderer(data_source=self._source, glyph=patches)
# self._legends.append((self._groups[i-1], [renderer]))
yield renderer
# TODO: Add the tooltips to display the dates and all absolute y values for each series
# at any vertical places on the plot
# TODO: Add the legend to display the fold ranges based on the color of
# the fold
| bsd-3-clause |
markovmodel/PyEMMA | pyemma/plots/tests/test_plots2d.py | 2 | 4165 | # This file is part of PyEMMA.
#
# Copyright (c) 2017, 2018 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import matplotlib.pyplot as plt
from pyemma.plots.plots2d import contour, scatter_contour
from pyemma.plots.plots2d import plot_density
from pyemma.plots.plots2d import plot_free_energy
from pyemma.plots.plots2d import plot_contour
from pyemma.plots.plots2d import plot_state_map
class TestPlots2d(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.data = np.random.binomial(10, 0.4, (100, 2))
def test_free_energy(self):
fig, ax = plot_free_energy(
self.data[:, 0], self.data[:, 1])
plt.close(fig)
def test_contour(self):
ax = contour(self.data[:,0], self.data[:,1], self.data[:,0])
plt.close(ax.get_figure())
ax = contour(
self.data[:,0], self.data[:,1], self.data[:,0],
zlim=(self.data[:, 0].min(), self.data[:, 0].max()))
plt.close(ax.get_figure())
def test_scatter_contour(self):
ax = scatter_contour(
self.data[:,0], self.data[:,1], self.data[:,0])
plt.close(ax.get_figure())
def test_plot_density(self):
fig, ax, misc = plot_density(
self.data[:, 0], self.data[:, 1], logscale=True)
plt.close(fig)
fig, ax, misc = plot_density(
self.data[:, 0], self.data[:, 1], logscale=False)
plt.close(fig)
fig, ax, misc = plot_density(
self.data[:, 0], self.data[:, 1], alpha=True)
plt.close(fig)
fig, ax, misc = plot_density(
self.data[:, 0], self.data[:, 1], zorder=-1)
plt.close(fig)
fig, ax, misc = plot_density(
self.data[:, 0], self.data[:, 1],
this_should_raise_a_UserWarning=True)
plt.close(fig)
def test_plot_free_energy(self):
fig, ax, misc = plot_free_energy(
self.data[:, 0], self.data[:, 1], legacy=False)
plt.close(fig)
with self.assertRaises(ValueError):
plot_free_energy(
self.data[:, 0], self.data[:, 1],
legacy=False, offset=42)
with self.assertRaises(ValueError):
plot_free_energy(
self.data[:, 0], self.data[:, 1],
legacy=False, ncountours=42)
def test_plot_contour(self):
fig, ax, misc = plot_contour(
self.data[:, 0], self.data[:, 1], self.data[:,0])
plt.close(fig)
fig, ax, misc = plot_contour(
self.data[:, 0], self.data[:, 1], self.data[:,0],
levels='legacy')
plt.close(fig)
fig, ax, misc = plot_contour(
self.data[:, 0], self.data[:, 1], self.data[:,0],
mask=True)
plt.close(fig)
def test_plot_state_map(self):
fig, ax, misc = plot_state_map(
self.data[:, 0], self.data[:, 1], self.data[:,0])
plt.close(fig)
fig, ax, misc = plot_state_map(
self.data[:, 0], self.data[:, 1], self.data[:,0],
zorder=0.5)
plt.close(fig)
fig, ax, misc = plot_state_map(
self.data[:, 0], self.data[:, 1], self.data[:,0],
cbar_orientation='horizontal', cbar_label=None)
plt.close(fig)
with self.assertRaises(ValueError):
fig, ax, misc = plot_state_map(
self.data[:, 0], self.data[:, 1], self.data[:,0],
cbar_orientation='INVALID')
| lgpl-3.0 |
jzt5132/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
meduz/scikit-learn | examples/gaussian_process/plot_gpc_xor.py | 104 | 2132 | """
========================================================================
Illustration of Gaussian process classification (GPC) on the XOR dataset
========================================================================
This example illustrates GPC on XOR data. Compared are a stationary, isotropic
kernel (RBF) and a non-stationary kernel (DotProduct). On this particular
dataset, the DotProduct kernel obtains considerably better results because the
class-boundaries are linear and coincide with the coordinate axes. In general,
stationary kernels often obtain better results.
"""
print(__doc__)
# Authors: Jan Hendrik Metzen <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, DotProduct
xx, yy = np.meshgrid(np.linspace(-3, 3, 50),
np.linspace(-3, 3, 50))
rng = np.random.RandomState(0)
X = rng.randn(200, 2)
Y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0)
# fit the model
plt.figure(figsize=(10, 5))
kernels = [1.0 * RBF(length_scale=1.0), 1.0 * DotProduct(sigma_0=1.0)**2]
for i, kernel in enumerate(kernels):
clf = GaussianProcessClassifier(kernel=kernel, warm_start=True).fit(X, Y)
# plot the decision function for each datapoint on the grid
Z = clf.predict_proba(np.vstack((xx.ravel(), yy.ravel())).T)[:, 1]
Z = Z.reshape(xx.shape)
plt.subplot(1, 2, i + 1)
image = plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
aspect='auto', origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linetypes='--')
plt.scatter(X[:, 0], X[:, 1], s=30, c=Y, cmap=plt.cm.Paired)
plt.xticks(())
plt.yticks(())
plt.axis([-3, 3, -3, 3])
plt.colorbar(image)
plt.title("%s\n Log-Marginal-Likelihood:%.3f"
% (clf.kernel_, clf.log_marginal_likelihood(clf.kernel_.theta)),
fontsize=12)
plt.tight_layout()
plt.show()
| bsd-3-clause |
2uller/LotF | App/Lib/site-packages/numpy/lib/npyio.py | 9 | 65323 | __all__ = ['savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
import numpy as np
import format
import sys
import os
import re
import sys
import itertools
import warnings
import weakref
from operator import itemgetter
from cPickle import load as _cload, loads
from _datasource import DataSource
from _compiled_base import packbits, unpackbits
from _iotools import LineSplitter, NameValidator, StringConverter, \
ConverterError, ConverterLockError, ConversionWarning, \
_is_string_like, has_nested_fields, flatten_dtype, \
easy_dtype, _bytes_to_name
from numpy.compat import asbytes, asstr, asbytes_nested, bytes
if sys.version_info[0] >= 3:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
_string_like = _is_string_like
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
if sys.version_info >= (2, 5):
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ".npy" extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ".npy" extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ".npy" extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.read(key)
if bytes.startswith(format.MAGIC_PREFIX):
value = BytesIO(bytes)
return format.read_array(value)
else:
return bytes
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ".npy" extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load an array(s) or pickled objects from .npy, .npz, or pickled files.
Parameters
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap` for a detailed description of the modes).
A memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful for
accessing small fragments of large files without reading the entire
file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For '.npz' files, the returned instance of
NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the context
manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlyling file descriptor is closed when exiting the 'with' block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz)
own_fid = False
return NpzFile(fid, own_fid=own_fid)
elif magic == format.MAGIC_PREFIX: # .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else: # Try a pickle
try:
return _cload(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the .npz file, are 'arr_0', 'arr_1', etc. If keyword arguments
are given, the corresponding variable names, in the ``.npz`` file will
match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed .npz file format
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of .npz file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed .npz file format
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError("Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zip = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.iteritems():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zip.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zip.close()
# Adapted from matplotlib
def _getconv(dtype):
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
else:
fh = iter(open(fname, 'U'))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing == None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in xrange(skiprows):
fh.next()
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = fh.next()
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in xrange(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).iteritems():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into a ``.npz`` compressed archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to preceed result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'seek'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif type(fmt) is str:
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt),] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_header : int, optional
The numbers of lines to skip at the beginning of the file.
skip_footer : int, optional
The numbers of lines to skip at the end of the file
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
errmsg = "The input argument 'converter' should be a valid dictionary "\
"(got '%s' instead)"
raise TypeError(errmsg % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError("fname mustbe a string, filehandle, or generator. "\
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(\
"The use of `skiprows` is deprecated, it will be removed in numpy 2.0.\n" \
"Please use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in xrange(skip_header):
fhd.next()
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = fhd.next()
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(\
"The use of `missing` is deprecated, it will be removed in Numpy 2.0.\n" \
"Please use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped, then
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = map(itemgetter(i), rows)
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = itertools.imap(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = zip(*[map(converter._loose_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
else:
rows = zip(*[map(converter._strict_call, map(itemgetter(i), rows))
for (i, converter) in enumerate(converters)])
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = zip(names, column_types)
mdtype = zip(names, [np.bool] * len(column_types))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
errmsg = "Nested fields involving objects "\
"are not supported..."
raise NotImplementedError(errmsg)
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(masks,
dtype=np.dtype([('', np.bool)
for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-2.0 |
mjsauvinen/P4UL | pyFootprint/footprintMaskOutput.py | 1 | 4056 | #!/usr/bin/env python3
from utilities import filesFromList
from utilities import writeLog
from footprintTools import *
from mapTools import readNumpyZTile, filterAndScale
import sys
import argparse
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
'''
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
# = # = # = # Function definitions # = # = # = # = # = # = #
# = # = # = # = # = # = # = # = # = # = # = # = # = # = #
# = # = # = # End Function definitions # = # = # = # = # = #
#========================================================== #
parser = argparse.ArgumentParser(prog='footprintMaskOutput.py')
parser.add_argument("-f", "--filename", type=str,help="Footprint file. (npz format)")
parser.add_argument("-fm", "--filemask", type=str, help="Mask file. (npz format)")
parser.add_argument("-pp", "--printOnly", help="Only print the contour. Don't save.",\
action="store_true", default=False)
parser.add_argument("--save", help="Save the figure right away.", action="store_true",\
default=False)
args = parser.parse_args()
writeLog( parser, args, args.printOnly )
#========================================================== #
# Rename ... that's all.
filename = args.filename
filemask = args.filemask
printOnly = args.printOnly
saveOn = args.save # save the fig
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
# xO := origin coords. # xt := target coords. # ut := target speed
try:
Fp, X, Y, Z, C, IDict = readNumpyZFootprint( filename, True ) # IdsOn=True
except:
sys.exit(' Could not read the footprint file: {}'.format(filename))
try:
Rdict = readNumpyZTile( filemask )
Rm = Rdict['R']
Rmdims = np.array(np.shape(R))
RmOrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
Rdict = None
except:
sys.exit(' Could not read the mask file: {}'.format(filemask))
# To unify the treatment, let's add 100% to the IDict.
if(not IDict): IDict = {} # In case IDict comes in as <None>
IDict[100] = np.ones( np.shape(Fp) , bool ) # All true
Nm = int(np.max(Rm))+1 # Number of different mask ID's (assuming first is zero)
dA = np.prod(dPx)
# Read the user specified source strengths
Qd = np.ones(Nm, float) # Default
try:
Q = input("Enter {} source strengths (Q) separated by commas: ".format(Nm))
except:
print(" All source strengths are set to unity. Q[:] = 1.")
Q = Qd
if( len(Q) != Nm ):
sys.exit(" Error! len(Q) = {}. It should be {}. Exiting ...".format(len(Qe),Nm))
for key in IDict.keys():
# CSV header
idx = IDict[key]
print('{}%:\n \"Mask ID\",\"[%]\",\"SUM( Fp*M*dA )\",\"SUM( FP*dA )\" ,\" Q \"'.format(key))
#Fptot = np.sum(Fp[idx]*dA)
Fptot = 0.
FpM = np.zeros( Nm )
for im in range(Nm):
M = (Rm == im).astype(int)
#print(' sum(Fp*idx) = {}, min(M) = {}'.format(np.min(Fp*idx), np.min(M)))
FpM[im] = Q[im] * np.sum(Fp*idx*M*dA)
Fptot += FpM[im]
for im in range(Nm):
pStr = '{}, {}, {}, {}, {}'.format(im,FpM[im]/Fptot*100.,FpM[im],Fptot,Q[im])
print(pStr)
print('----------')
mpl.rcParams['font.size'] = 18.0
plt.figure(num=1, figsize=(9.,6.));
lbl = np.array(['Buildings','Impervious','Grass',\
'Low Vegetation','High Vegetation', 'Water', '','Road'])
dat = FpM/Fptot*100.
ix = (dat > 0.) # Valid points. In order to isolate meaningless entries.
#colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
cs=np.array(['lightskyblue', 'g', 'r', 'c', 'm', 'y', 'k', 'w'])
expl = np.zeros(Nm)
expl[2:] = 0.1
plt.pie(dat[ix],explode=expl[ix],labels=lbl[ix],colors=cs[ix],\
autopct='%1.1f%%',shadow=True, startangle=90)
#plt.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)
# Set aspect ratio to be equal so that pie is drawn as a circle.
plt.axis('equal')
if(saveOn):
plt.savefig( filename.strip('.npz')+'.jpg' )
plt.show()
#print(' Footprint: {} '.format(np.shape(Fp)))
#print(' Mask : {} '.format(np.shape(Rm)))
#print(' FpM : {} '.format(FpM))
| mit |
DaveBerkeley/arduino | sketchbook/current_meter/graph2.py | 1 | 1242 |
import time
import sys
import serial
target_cycle = 0
if len(sys.argv) > 1:
target_cycle = int(sys.argv[1])
s = serial.Serial("/dev/ttyUSB0", 9600, timeout=1)
for i in range(10):
time.sleep(0.1)
s.write("W\n") # Set Wave mode
def get_data_():
ii = []
vv = []
p_total = 0.0
while True:
line = s.readline()
#print `line`
if not line:
continue
if line.startswith("***"):
break
try:
parts = line.split(" ")
v = int(parts[0], 10) * (378 / 512.0)
i = float(parts[1]) * (128.2 / 512.0)
except:
continue
ii.append(i * 20)
vv.append(v)
p_total += i * v
if vv:
print p_total / len(vv)
sys.stdout.flush()
return ii, vv
def get_data():
while True:
ii, vv = get_data_()
if len(ii) >= 128:
return ii, vv
#
#
fout = open("/tmp/mains.csv", "w")
i, v = get_data()
i, v = get_data()
print i, v
from matplotlib import pyplot
pyplot.ion()
axes = pyplot.axes()
v_line, = pyplot.plot(v)
i_line, = pyplot.plot(i)
while True:
i, v = get_data()
v_line.set_ydata(v)
i_line.set_ydata(i)
pyplot.draw()
# FIN
| gpl-2.0 |
raghavrv/scikit-learn | sklearn/model_selection/_split.py | 5 | 74919 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# Raghav RV <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.validation import check_array
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import signature, comb
from ..base import _pprint
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'RepeatedStratifiedKFold',
'RepeatedKFold',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The 'X' parameter should not be None.")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``shuffle`` == True.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
See also
--------
RepeatedStratifiedKFold: Repeats Stratified K-Fold n times.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = np.bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
max_train_size : int, optional
Maximum size for a single training set.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(max_train_size=None, n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3, max_train_size=None):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
self.max_train_size = max_train_size
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
if self.max_train_size and self.max_train_size < test_start:
yield (indices[test_start - self.max_train_size:test_start],
indices[test_start:test_start + test_size])
else:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> logo = LeaveOneGroupOut()
>>> logo.get_n_splits(X, y, groups)
2
>>> logo.get_n_splits(groups=groups) # 'groups' is always required
2
>>> print(logo)
LeaveOneGroupOut()
>>> for train_index, test_index in logo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
# We make a copy of groups to avoid side-effects during iteration
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if len(unique_groups) <= 1:
raise ValueError(
"The groups parameter contains fewer than 2 unique groups "
"(%s). LeaveOneGroupOut expects at least 2." % unique_groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object, optional
Always ignored, exists for compatibility.
y : object, optional
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpgo = LeavePGroupsOut(n_groups=2)
>>> lpgo.get_n_splits(X, y, groups)
3
>>> lpgo.get_n_splits(groups=groups) # 'groups' is always required
3
>>> print(lpgo)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpgo.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, copy=True, ensure_2d=False, dtype=None)
unique_groups = np.unique(groups)
if self.n_groups >= len(unique_groups):
raise ValueError(
"The groups parameter contains fewer than (or equal to) "
"n_groups (%d) numbers of unique groups (%s). LeavePGroupsOut "
"expects that at least n_groups + 1 (%d) unique groups be "
"present" % (self.n_groups, unique_groups, self.n_groups + 1))
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object, optional
Always ignored, exists for compatibility.
y : object, optional
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set. This 'groups' parameter must always be specified to
calculate the number of splits, though the other parameters can be
omitted.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class _RepeatedSplits(with_metaclass(ABCMeta)):
"""Repeated splits for an arbitrary randomized CV splitter.
Repeats splits for cross-validators n times with different randomization
in each repetition.
Parameters
----------
cv : callable
Cross-validator class.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
**cvargs : additional params
Constructor parameters for cv. Must not contain random_state
and shuffle.
"""
def __init__(self, cv, n_repeats=10, random_state=None, **cvargs):
if not isinstance(n_repeats, (np.integer, numbers.Integral)):
raise ValueError("Number of repetitions must be of Integral type.")
if n_repeats <= 0:
raise ValueError("Number of repetitions must be greater than 0.")
if any(key in cvargs for key in ('random_state', 'shuffle')):
raise ValueError(
"cvargs must not contain random_state or shuffle.")
self.cv = cv
self.n_repeats = n_repeats
self.random_state = random_state
self.cvargs = cvargs
def split(self, X, y=None, groups=None):
"""Generates indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
n_repeats = self.n_repeats
rng = check_random_state(self.random_state)
for idx in range(n_repeats):
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
for train_index, test_index in cv.split(X, y, groups):
yield train_index, test_index
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
y : object
Always ignored, exists for compatibility.
``np.zeros(n_samples)`` may be used as a placeholder.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
rng = check_random_state(self.random_state)
cv = self.cv(random_state=rng, shuffle=True,
**self.cvargs)
return cv.get_n_splits(X, y, groups) * self.n_repeats
class RepeatedKFold(_RepeatedSplits):
"""Repeated K-Fold cross validator.
Repeats K-Fold n times with different randomization in each repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : int, RandomState instance or None, optional, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.model_selection import RepeatedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rkf = RepeatedKFold(n_splits=2, n_repeats=2, random_state=2652124)
>>> for train_index, test_index in rkf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [0 1] TEST: [2 3]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
See also
--------
RepeatedStratifiedKFold: Repeates Stratified K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedKFold, self).__init__(
KFold, n_repeats, random_state, n_splits=n_splits)
class RepeatedStratifiedKFold(_RepeatedSplits):
"""Repeated Stratified K-Fold cross validator.
Repeats Stratified K-Fold n times with different randomization in each
repetition.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of folds. Must be at least 2.
n_repeats : int, default=10
Number of times cross-validator needs to be repeated.
random_state : None, int or RandomState, default=None
Random state to be used to generate random state for each
repetition.
Examples
--------
>>> from sklearn.model_selection import RepeatedStratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> rskf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2,
... random_state=36851234)
>>> for train_index, test_index in rskf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
...
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
See also
--------
RepeatedKFold: Repeats K-Fold n times.
"""
def __init__(self, n_splits=5, n_repeats=10, random_state=None):
super(RepeatedStratifiedKFold, self).__init__(
StratifiedKFold, n_repeats, random_state, n_splits=n_splits)
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size="default", train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default 10
Number of re-shuffling & splitting iterations.
test_size : float, int, None, default=0.1
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. By default (the is parameter
unspecified), the value is set to 0.1.
The default will change in version 0.21. It will remain 0.1 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float, int, or None, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples,
self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float, int, None, optional
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. By default, the value is set to 0.2.
The default will change in version 0.21. It will remain 0.2 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float, int, or None, default is None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
'''
def __init__(self, n_splits=5, test_size="default", train_size=None,
random_state=None):
if test_size == "default":
if train_size is not None:
warnings.warn("From version 0.21, test_size will always "
"complement train_size unless both "
"are specified.",
FutureWarning)
test_size = 0.2
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The 'groups' parameter should not be None.")
groups = check_array(groups, ensure_2d=False, dtype=None)
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
rng = check_random_state(rng)
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = rng.choice(inds, size=add_now, replace=False)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default 10
Number of re-shuffling & splitting iterations.
test_size : float, int, None, optional
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. By default, the value is set to 0.1.
The default will change in version 0.21. It will remain 0.1 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float, int, or None, default is None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size="default", train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
y = check_array(y, ensure_2d=False, dtype=None)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
if y.ndim == 2:
# for multi-label y, map each distinct row to its string repr:
y = np.array([str(row) for row in y])
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = np.bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Note that providing ``y`` is sufficient to generate the splits and
hence ``np.zeros(n_samples)`` may be used as a placeholder for
``X`` instead of actual training data.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
Stratification is done based on the y labels.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
y = check_array(y, ensure_2d=False, dtype=None)
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size == "default":
if train_size is not None:
warnings.warn("From version 0.21, test_size will always "
"complement train_size unless both "
"are specified.",
FutureWarning)
test_size = 0.1
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and
np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and
np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if test_size == "default":
test_size = 0.1
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = list(cv)
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv)
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, None, optional
If float, should be between 0.0 and 1.0 and represent the proportion
of the dataset to include in the test split. If int, represents the
absolute number of test samples. If None, the value is set to the
complement of the train size. By default, the value is set to 0.25.
The default will change in version 0.21. It will remain 0.25 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
train_size : float, int, or None, default None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : boolean, optional (default=True)
Whether or not to shuffle the data before splitting. If shuffle=False
then stratify must be None.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the class labels.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
>>> train_test_split(y, shuffle=False)
[[0, 1, 2], [3, 4]]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', 'default')
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
shuffle = options.pop('shuffle', True)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size == 'default':
test_size = None
if train_size is not None:
warnings.warn("From version 0.21, test_size will always "
"complement train_size unless both "
"are specified.",
FutureWarning)
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if shuffle is False:
if stratify is not None:
raise ValueError(
"Stratified train/test split is not implemented for "
"shuffle=False")
n_samples = _num_samples(arrays[0])
n_train, n_test = _validate_shuffle_split(n_samples, test_size,
train_size)
train = np.arange(n_train)
test = np.arange(n_train, n_train + n_test)
else:
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
WagnerLabPapers/Waskom_JNeurosci_2014 | process_searchlight.py | 1 | 4248 | import os
import sys
import os.path as op
import argparse
import numpy as np
import pandas as pd
from scipy import ndimage
from scipy.stats import zscore
import nibabel as nib
import subprocess as sp
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import LeaveOneLabelOut
from nilearn.decoding import SearchLight
import lyman
project = lyman.gather_project_info()
data_dir = project["data_dir"]
analysis_dir = project["analysis_dir"]
def main(arglist):
args = parse_args(arglist)
if args.subjects is None:
args.subjects = lyman.determine_subjects()
for subj in args.subjects:
print "Running subject", subj
searchlight_dir = op.join(analysis_dir, "dksort", subj,
"mvpa/searchlight")
if not op.exists(searchlight_dir):
os.mkdir(searchlight_dir)
vol_fname = op.join(searchlight_dir, "dimension_dksort_pfc.nii.gz")
if "fit" in args.do and (not op.exists(vol_fname) or args.overwrite):
print " Doing searchlight"
mask_img, X, y, runs = load_data(subj)
s = SearchLight(mask_img, radius=10, n_jobs=10,
estimator=LogisticRegression(),
cv=LeaveOneLabelOut(runs))
s.fit(X, y)
out_img = nib.Nifti1Image(s.scores_, s.mask_img.get_affine())
out_img.to_filename(vol_fname)
surf_fnames = [op.join(searchlight_dir, "lh.dimension_dksort_pfc.mgz"),
op.join(searchlight_dir, "rh.dimension_dksort_pfc.mgz")]
if "surf" in args.do and (not all(map(op.exists, surf_fnames))
or args.overwrite):
print " Doing surfreg"
reg_fname = op.join(analysis_dir, "dksort", subj,
"preproc/run_1/func2anat_tkreg.dat")
for i, hemi in enumerate(["lh", "rh"]):
cmdline = ["mri_vol2surf",
"--mov", vol_fname,
"--reg", reg_fname,
"--trgsubject", "fsaverage",
"--projfrac-avg", "0", "1", ".1",
"--surf-fwhm", "5",
"--hemi", hemi,
"--o", surf_fnames[i]]
sp.check_output(" ".join(cmdline), shell=True)
def load_data(subj):
design = pd.read_csv(op.join(data_dir, subj, "design/dimension.csv"))
mask_img = nib.load(op.join(data_dir, subj, "masks/dksort_all_pfc.nii.gz"))
mean_img = nib.load(op.join(analysis_dir, "dksort", subj,
"preproc/run_1/mean_func.nii.gz"))
orig_mask_data = mask_img.get_data()
mask_data = ndimage.binary_dilation(orig_mask_data, iterations=2)
mask_img = nib.Nifti1Image(mask_data.astype(int),
mask_img.get_affine(),
mask_img.get_header())
mask_img.to_filename(op.join(analysis_dir, "dksort", subj,
"mvpa/searchlight/dksort_pfc_mask.nii.gz"))
X_data = []
for run in range(1, 5):
ts_img = nib.load(op.join(analysis_dir, "dksort", subj, "reg/epi",
"unsmoothed/run_%d" % run, "timeseries_xfm.nii.gz"))
ts_data = ts_img.get_data()
onsets = design.loc[design.run == run, "onset"].values
indices = np.round(onsets / 2).astype(int)
frame1 = ts_data[..., indices + 2]
frame2 = ts_data[..., indices + 3]
X_run = np.mean([frame1, frame2], axis=0)
X_run = zscore(X_run, axis=-1)
X_data.append(X_run)
X = nib.Nifti1Image(np.concatenate(X_data, axis=-1),
mean_img.get_affine(), mean_img.get_header())
y = design["condition"].values
runs = design["run"].values
return mask_img, X, y, runs
def parse_args(arglist):
parser = argparse.ArgumentParser()
parser.add_argument("-subjects", nargs="*")
parser.add_argument("-do", nargs="*", default=[])
parser.add_argument("-overwrite", action="store_true")
args = parser.parse_args(arglist)
return args
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-3-clause |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/cross_validation.py | 1 | 67355 | """
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import division
from __future__ import print_function
import numbers
import time
import warnings
from abc import ABCMeta, abstractmethod
from itertools import chain, combinations
from math import ceil, floor, factorial
import numpy as np
import scipy.sparse as sp
from .externals.six.moves import zip
from .base import is_classifier, clone
from .exceptions import FitFailedWarning
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .gaussian_process.kernels import Kernel as GPKernel
from .metrics.scorer import check_scoring
from .utils import indexable, check_random_state, safe_indexing
from .utils.fixes import bincount
from .utils.multiclass import type_of_target
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used a validation set once while the k - 1 remaining
fold form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = (n // n_folds) * np.ones(n_folds, dtype=np.int)
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) < self.n_train or len(test) < self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
train.extend(missing_idx[:(self.n_train - len(train))])
test.extend(missing_idx[-(self.n_test - len(test)):])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
inv_locs = np.empty(len(locs), dtype=int)
inv_locs[locs] = np.arange(len(locs))
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = "no parameters to be set"
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| mit |
baklanovp/pystella | tests/test_flx.py | 1 | 1102 | import unittest
from os.path import dirname, abspath, join
import os
import matplotlib.pyplot as plt
import pystella.model.sn_flx as flx
from pystella.model.stella import Stella
__author__ = 'bakl'
class TestStellaFlx(unittest.TestCase):
def setUp(self):
pass
# name = 'cat_R1000_M15_Ni007_E15'
# path = join(dirname(abspath(__file__)), 'data', 'stella')
# stella = Stella(name, path=path)
# self.flx = stella.get_flx()
def test_flx_reader(self):
name = 'cat_R1000_M15_Ni007_E15'
path = join(dirname(abspath(__file__)), 'data', 'stella')
fname = os.path.join(path, name + '.flx')
flx_data = flx.flx_reader(fname)
flx_data.show_emergent_Fl(logy=False)
plt.show()
def test_stella_get_flx(self):
name = 'cat_R1000_M15_Ni007_E15'
path = join(dirname(abspath(__file__)), 'data', 'stella')
flx_data = Stella(name, path=path).get_flx()
flx_data.show_emergent_Fl(logy=False)
plt.show()
def main():
unittest.main()
if __name__ == '__main__':
main()
| mit |
TheSriram/MLT4Trading | Project 3/testlearner.py | 1 | 6856 | from __future__ import division
from Randomforestlearner import Randomforestlearner
import numpy
import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import math
def get_graph_two_plots(x_series,y_series,y1_series,xlabel,ylabel,name_file):
plt.clf()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
first = plt.plot(x_series,y_series,color='r')
second = plt.plot(x_series,y1_series,color='b')
plt.legend(["Ypredict","Yactual"])
plt.savefig(name_file)
def get_correlation(Y_return,Y_test):
covariance_matrix = np.corrcoef(Y_return, np.squeeze(np.asarray(Y_test)))
return covariance_matrix[0, 1]
def get_rmse(Y_return,Y_Test):
return math.sqrt(np.mean(np.square(Y_return-Y_Test)))
def scatter(Y_return,Y_test,name_file):
plt.clf()
fig = plt.figure(figsize=(6, 5))
graph = fig.add_subplot(1,1,1)
graph.scatter(Y_return,Y_test)
graph.set_title("predicted Y vs actual Y")
graph.set_xlabel="Days"
graph.set_ylabel="Y"
fig.savefig(name_file)
def all_feature_graph(y1,y2,y3,y4,y5,x,xlabel,ylabel,name_file):
plt.clf()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
first = plt.plot(x,y1)
second = plt.plot(x,y2)
third = plt.plot(x,y3)
fourth = plt.plot(x,y4)
fifth = plt.plot(x,y5)
plt.legend(["mean","stddev","rsi","roc","slope"])
plt.savefig(name_file)
def read_file(filename):
required_all=list()
inf = open(filename)
for s in reversed(inf.readlines()):
all_needed = s.split(',')
required = all_needed[:2]
if(all_needed[0]!='Date'):
required_all.append(float(required[1]))
return required_all
def get_mean(dataset_21):
return numpy.mean(dataset_21)
def get_stddev(dataset_21):
return numpy.std(dataset_21)
def get_relative_strength_idx(dataset_21):
comparer = dataset_21[0]
gain =0.0
loss =0.0
new_dataset= dataset_21[0:]
for each in new_dataset:
if each > comparer:
gain = gain + (each-comparer)
elif each < comparer:
loss = loss + (comparer-each)
if loss ==0.0:
return 100.0
elif gain ==0.0:
return 0.0
else:
rs = (gain/len(new_dataset))/(loss/len(new_dataset))
rsi = float(100 - (100/(1+rs)))
return rsi
def roc(dataset_21):
latest = dataset_21[len(dataset_21)-1]
oldest = dataset_21[0]
ROC = (latest - oldest) / (oldest) * 100
return ROC
def slope(dataset_21):
x = numpy.array([i for i in range(1,22)])
y = numpy.array([data for data in dataset_21])
A = numpy.vstack([x, numpy.ones(len(x))]).T
m, c = numpy.linalg.lstsq(A, y)[0]
return m
def createX(datesandY):
totalX =[]
index = 21
start = 0
while index <=len(datesandY)-5:
dataset_21 = datesandY[start:index]
totalX.append([get_mean(dataset_21),get_stddev(dataset_21),get_relative_strength_idx(dataset_21),roc(dataset_21),slope(dataset_21),datesandY[index+4]-datesandY[index]])
start = start+1
index= index+1
return numpy.array(totalX)
def main():
curr_dirr = os.getcwd()
os.chdir('proj3-data-fixed')
datesandY = read_file('ML4T-000.csv')
stack = createX(datesandY)
# print len(stack)
for i in range(1,10):
datesandY = read_file('ML4T-00'+str(i)+'.csv')
learnedvalues = createX(datesandY)
stack = numpy.vstack((stack,learnedvalues))
for i in range(11,100):
datesandY = read_file('ML4T-0'+str(i)+'.csv')
learnedvalues = createX(datesandY)
stack = numpy.vstack((stack,learnedvalues))
print len(stack)
testdatesandY = read_file('ML4T-292.csv')
test = createX(testdatesandY)
(XTrain,YTrain) = numpy.split(stack,[5],axis=1)
(XTest,YTest) = numpy.split(test,[5],axis=1)
# print XTest
randomforestlearner = Randomforestlearner(k=50)
randomforestlearner.addEvidence(XTrain,YTrain)
Y_Return = numpy.multiply(numpy.array(randomforestlearner.query(XTest)),-1)
Y_Test = np.squeeze(np.asarray(YTest))
start=0
index=5
print len(Y_Return)
print len(Y_Test)
print len(testdatesandY)
while index<len(testdatesandY)-26:
Y_Return[start]=Y_Return[start]+testdatesandY[index]
Y_Test[start]=Y_Test[start]+testdatesandY[index]
start = start+1
index=index+1
os.chdir(curr_dirr)
get_graph_two_plots(numpy.arange(1,101),Y_Return[:100],Y_Test[:100],"Days","Y","YpredictvsYactual_292_first100.jpg")
last126_test = Y_Test[-126:]
last126_return = Y_Return[-126:]
get_graph_two_plots(numpy.arange(1,101),last126_return[:100],last126_test[:100],"Days","Y","YpredictvsYactual_292_last100.jpg")
scatter(Y_Return,Y_Test,"scatterplot_292.jpg")
mean_series = XTest[:,0]
std_series =XTest[:,1]
rsi_series = XTest[:,2]
roc_series = XTest[:,3]
slope_series = XTest[:,4]
all_feature_graph(mean_series[:100],std_series[:100],rsi_series[:100],roc_series[:100],slope_series[:100],numpy.arange(1,101),"Days","Features","Allfeature_292.jpg")
print "Correlation 292 is {0}".format(get_correlation(Y_Test,Y_Return))
print "RMSE 292 is {0}".format(get_rmse(Y_Test,Y_Return))
os.chdir('proj3-data-fixed')
testdatesandY = read_file('ML4T-132.csv')
test = createX(testdatesandY)
(XTrain,YTrain) = numpy.split(stack,[5],axis=1)
(XTest,YTest) = numpy.split(test,[5],axis=1)
# print XTest
randomforestlearner = Randomforestlearner(k=50)
randomforestlearner.addEvidence(XTrain,YTrain)
Y_Return = numpy.multiply(numpy.array(randomforestlearner.query(XTest)),-1)
Y_Test = np.squeeze(np.asarray(YTest))
start=0
index=5
print len(Y_Return)
print len(Y_Test)
print len(testdatesandY)
while index<len(testdatesandY)-26:
Y_Return[start]=Y_Return[start]+testdatesandY[index]
Y_Test[start]=Y_Test[start]+testdatesandY[index]
start = start+1
index=index+1
os.chdir(curr_dirr)
get_graph_two_plots(numpy.arange(1,101),Y_Return[:100],Y_Test[:100],"Days","Y","YpredictvsYactual_132_first100.jpg")
last126_test = Y_Test[-126:]
last126_return = Y_Return[-126:]
get_graph_two_plots(numpy.arange(1,101),last126_return[:100],last126_test[:100],"Days","Y","YpredictvsYactual_132_last100.jpg")
scatter(Y_Return,Y_Test,"scatterplot_132.jpg")
mean_series = XTest[:,0]
std_series =XTest[:,1]
rsi_series = XTest[:,2]
roc_series = XTest[:,3]
slope_series = XTest[:,4]
all_feature_graph(mean_series[:100],std_series[:100],rsi_series[:100],roc_series[:100],slope_series[:100],numpy.arange(1,101),"Days","Features","Allfeature_132.jpg")
print "Correlation 132 is {0}".format(get_correlation(Y_Test,Y_Return))
print "RMSE 132 is {0}".format(get_rmse(Y_Test,Y_Return))
if __name__ == '__main__':
main()
| apache-2.0 |
rebeccabilbro/machine-learning | code/polyregviz.py | 3 | 2466 | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from utils import load_energy, DATA_DIR
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def interpolation_viz(degree_max=6,):
"""
Shows interpolation of polynomial degree with Ridge.
"""
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in xrange(degree_max):
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
def nba_viz(degree=None):
"""
Regression of NBA Data set with Ridge
"""
df = pd.read_csv(os.path.join(DATA_DIR, 'nba_players.csv'))
plt.scatter(df['PER'], df['SALARY'], label="tranining points",
alpha=.6, color='#70B7BA')
score = None
if degree is not None:
x = np.linspace(0, 35, 100)
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(df['PER'].reshape((len(df['PER']), 1)), df['SALARY'])
y_plot = model.predict(x.reshape((len(x), 1)))
plt.plot(x, y_plot, label="ridge degree %d" % degree,
color="#F1433F", linewidth=2, alpha=.7)
score = model.score(df['PER'].reshape((len(df['PER']), 1)),
df['SALARY'])
plt.ylim(0, df['SALARY'].max() + 100000)
plt.xlim(0, df['PER'].max() + 5)
plt.ylabel('salary')
plt.xlabel('player efficiency rating')
if score is not None:
plt.title('NBA 2013 PER to Salary; Score: %0.3f' % score)
else:
plt.title('NBA 2013 PER to Salary Correlation')
plt.legend(loc='lower right')
# plt.show()
plt.savefig("/Users/benjamin/Desktop/nba_regression_degree_%i.png" % degree)
if __name__ == '__main__':
nba_viz(3)
| mit |
andreashorn/lead_dbs | ext_libs/OSS-DBS/OSS_platform/Electrode_files/Boston_Scientific_Vercise_Cartesia_profile.py | 1 | 34041 | # -*- coding: utf-8 -*-
###
### This file is generated automatically by SALOME v8.3.0 with dump python functionality
###### Run with DPS_lead_position_V9.py
import sys
import salome
salome.salome_init()
theStudy = salome.myStudy
import salome_notebook
notebook = salome_notebook.NoteBook(theStudy)
###
### GEOM component
###
########################################### extra code 1 V10 15/12/18#############################################
###### This file runs with DBS_lead_position_V10.py
import os
sys.path.insert( 0, r'{}'.format(os.getcwd()))
sys.path.append('/usr/local/lib/python2.7/dist-packages')
#from pandas import read_csv
##### DEFAULT LIST #####
#Lead2nd_Enable = True
#Xt = 0
#Yt = 5
#Zt = 0
#X_2nd = 0
#Y_2nd = 5
#Z_2nd = 0
#OZ_angle = 0
#Xm = 0
#Ym = 0
#Zm = 0
#encap_thickness = 0.1
#ROI_radial = 13
#Vertice_enable = False
#Brain_map = '/home/trieu/electrode_dir/brain_elipse.brep'
#stretch=1.0
#if(Lead2nd_Enable):
# Xt2 = 0
# Yt2 = -5
# Zt2 = 0
# OX_angle2 = 0
# OY_angle2 = 0
# OZ_angle2 = 0
#stretch=1.0
##### VARIABLE LIST #####
########## End of variable list#############
#specific parameters of the lead
if Z_2nd == Zt:
Z_2nd_artif = Zt+1.0 # just to ensure the rotation is possible
else:
Z_2nd_artif=Z_2nd
#for Lead-DBS, the tip point should be shifted down (they use the middle of the lowest contact as the reference point)
Zt_tip=Zt-0.75 #for Boston Scientific Vercise Cartesia (directional)
# hardwired turn because of the Lead-DBS definition (marker points against X-axis)
Vert_array =[0];
number_vertex = len(Vert_array)
Vert = []
VolumeObject1 = []
ContactObject1 = []
VolumeObject2 = []
ContactObject2 = []
print " DBS_lead's Geometry buid\n"
######################################### end of extra code 1 ########################################
######################################################################################################
from salome.geom import geomBuilder
import math
import SALOMEDS
contact_angle=90*math.pi/180.0
ang_betw_contacts=120*math.pi/180.0
contact_thickness=0.1
geompy = geomBuilder.New(theStudy)
O = geompy.MakeVertex(0, 0, 0)
OX = geompy.MakeVectorDXDYDZ(1, 0, 0)
OY = geompy.MakeVectorDXDYDZ(0, 1, 0)
OZ = geompy.MakeVectorDXDYDZ(0, 0, 1)
#Circle_1 = geompy.MakeCircle(O, OZ, 0.635)
#Contact_1 = geompy.MakePrismVecH(Circle_1, OZ, 1.5)
Circle_1 = geompy.MakeCircle(O, OZ, 0.65)
Contact_1_prism = geompy.MakePrismVecH(Circle_1, OZ, 0.75*stretch+0.1)
Vertex_1 = geompy.MakeVertex(0, 0.65, 0)
Vertex_2 = geompy.MakeVertex(0, -0.65, 0)
Vertex_3 = geompy.MakeVertex(0.65, 0, 0)
Arc_1 = geompy.MakeArc(Vertex_1, Vertex_3, Vertex_2)
Revolution_1 = geompy.MakeRevolution(Arc_1, OY, 180*math.pi/180.0)
Contact_1 = geompy.MakeFuseList([Contact_1_prism, Revolution_1], True, True)
Contact_1_fake = geompy.MakePrismVecH(Circle_1, OZ, 0.75*stretch+0.75)
geompy.TranslateDXDYDZ(Contact_1_fake, 0, 0, 0.75*stretch+0.1)
#geompy.TranslateDXDYDZ(Contact_1, 0, 0, 0.85)
#Contact_2 = geompy.MakeTranslation(Contact_1, 0, 0, 2)
#Contact_3 = geompy.MakeTranslation(Contact_1, 0, 0, 4)
Contact_8 = geompy.MakeTranslation(Contact_1_fake, 0, 0, (6-1.5)*stretch)
#first, we will create contact surfaces
Cylinder_1 = geompy.MakeCylinderRH(0.65, 149.365)
Sphere_1 = geompy.MakeSphereR(0.65)
Fuse_1 = geompy.MakeFuseList([Cylinder_1, Sphere_1], True, True)
Cylinder_2 = geompy.MakeCylinderRH(encap_thickness+0.65, 149.365)
Sphere_2 = geompy.MakeSphereR(encap_thickness+0.65)
Fuse_2 = geompy.MakeFuseList([Cylinder_2, Sphere_2], True, True)
encap_layer = geompy.MakeCutList(Fuse_2, [Fuse_1], True)
encap_layer2 = geompy.MakeCutList(Fuse_2, [Fuse_1], True)
Circle_2 = geompy.MakeCircle(None, None, 0.65)
Common_1 = geompy.MakeCommonList([encap_layer, Circle_2], True)
Vertex_1 = geompy.MakeVertex(0.65, -0, 0)
Rotation_1 = geompy.MakeRotation(Vertex_1, OZ, contact_angle)
Arc_1 = geompy.MakeArcCenter(O, Vertex_1, Rotation_1,False)
#Vertex_1 = geompy.MakeVertex(0, 0, 0)
#Vertex_2 = geompy.MakeVertex(0.635, 0, 0)
#Rotation_1 = geompy.MakeRotation(Vertex_2, OZ, contact_angle)
#Arc_1 = geompy.MakeArcOfEllipse(O, Vertex_2, Rotation_1)
Contact_2_fake = geompy.MakePrismVecH(Arc_1, OZ, 1.5*stretch)
geompy.TranslateDXDYDZ(Contact_2_fake, 0, 0, (0.75+0.5)*stretch+0.1)
Contact_2=Contact_2_fake
Contact_3 = geompy.MakeRotation(Contact_2, OZ, ang_betw_contacts) # Boston has another notation (counter-clockwise from the top)
Contact_4 = geompy.MakeRotation(Contact_2, OZ, 2*ang_betw_contacts)
Contact_5 = geompy.MakeTranslation(Contact_2, 0, 0, 2*stretch)
Contact_6 = geompy.MakeRotation(Contact_5, OZ, ang_betw_contacts)
Contact_7 = geompy.MakeRotation(Contact_5, OZ, 2*ang_betw_contacts)
Rotation_2 = geompy.MakeRotation(Vertex_1, OZ, contact_angle/2.0)
Vector_1 = geompy.MakeVector(Rotation_2, O)
CV2 = geompy.MakePrismVecH(Contact_2_fake, Vector_1, contact_thickness)
#geompy.TranslateDXDYDZ(CV2, 0, 0, 1.5)
CV3 = geompy.MakeRotation(CV2, OZ, ang_betw_contacts) # Boston has another notation (counter-clockwise from the top)
CV4 = geompy.MakeRotation(CV2, OZ, 2*ang_betw_contacts)
CV5 = geompy.MakeTranslation(CV2, 0, 0, 2*stretch)
CV6 = geompy.MakeRotation(CV5, OZ, ang_betw_contacts)
CV7 = geompy.MakeRotation(CV5, OZ, 2*ang_betw_contacts)
#CV1 = geompy.MakeCylinderRH(0.65, 1.5)
#geompy.TranslateDXDYDZ(CV1, 0, 0, 1.5)
Sphere_cyl = geompy.MakeSphereR(0.65)
CV1_cyl= geompy.MakeCylinderRH(0.65, 0.75*stretch+0.1)
CV1 = geompy.MakeFuseList([CV1_cyl, Sphere_cyl], True, True)
geompy.TranslateDXDYDZ(CV1, 0, 0, 0.65)
CV1_fake = geompy.MakeCylinderRH(0.65, 0.75*stretch+0.75)
CV8 = geompy.MakeTranslation(CV1_fake, 0, 0, (5.25*stretch+0.65)+0.1)
geompy.TranslateDXDYDZ(Circle_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_3, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_4, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_5, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_6, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_7, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Contact_8, 0, 0, 0.65)
geompy.TranslateDXDYDZ(CV2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(CV3, 0, 0, 0.65)
geompy.TranslateDXDYDZ(CV4, 0, 0, 0.65)
geompy.TranslateDXDYDZ(CV5, 0, 0, 0.65)
geompy.TranslateDXDYDZ(CV6, 0, 0, 0.65)
geompy.TranslateDXDYDZ(CV7, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Cylinder_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Sphere_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Fuse_1, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Cylinder_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Sphere_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(Fuse_2, 0, 0, 0.65)
geompy.TranslateDXDYDZ(encap_layer, 0, 0, 0.65)
Sphere_ROI = geompy.MakeSphereR(ROI_radial)
encap_outer_ROI = geompy.MakeCutList(encap_layer, [Sphere_ROI], True)
encap_inner_ROI = geompy.MakeCutList(encap_layer, [encap_outer_ROI], True)
Fuse_all_lead_encap_ROI = geompy.MakeFuseList([Sphere_ROI, Fuse_2], True, True)
ROI = geompy.MakeCutList(Sphere_ROI, [Fuse_2], True)
##################################################################################################################
########################################### extra code 2 V10 15/12/18#############################################
print " Load brain image \n"
if (Brain_map[-4:] == 'brep'):
brain_solid = geompy.ImportBREP( Brain_map )
elif (Brain_map[-4:] == 'step'):
brain_solid = geompy.ImportSTEP( Brain_map )
elif (Brain_map[-4:] == 'iges'):
brain_solid = geompy.ImportIGES( Brain_map )
elif (Brain_map[-4:] == '.stl'):
brain_solid = geompy.ImportSTL( Brain_map )
else:
print " unknow imported file format"
Fuse_all_lead_encap_ROI_no_internal_face = geompy.RemoveInternalFaces(Fuse_all_lead_encap_ROI)
#################################################### Geometry and extra code interface ##############################################################
VolumeObject1 = [ encap_outer_ROI,ROI,encap_inner_ROI,CV1,CV2,CV3,CV4,CV5,CV6,CV7,CV8] # Declare objects included to partition, encap_outer_ROI always @1st position
Volume_name1 = ['encap_outer_ROI1','ROI1','encap_inner_ROI1','CV1_1','CV1_2','CV1_3','CV1_4','CV1_5','CV1_6','CV1_7','CV1_8'] # Declare name of the group in the partition for volume
ContactObject1 = [Contact_1,Contact_2,Contact_3,Contact_4,Contact_5,Contact_6,Contact_7,Contact_8]
Contact_name1 = ['Contact1_1','Contact1_2','Contact1_3','Contact1_4','Contact1_5','Contact1_6','Contact1_7','Contact1_8']
if(Lead2nd_Enable): ################## 2nd LEAD ###############################################
VolumeObject2 = [ROI]*len(VolumeObject1)
ContactObject2 = [Contact_1]*len(ContactObject1)
Volume_name2 = [ 'encap_outer_ROI2','ROI2','encap_inner_ROI2','CV2_1','CV2_2','CV2_3','CV2_4']
Contact_name2 = ['Contact2_1','Contact2_2','Contact2_3','Contact2_4']
##############################################################################################################################################
print "Position 2nd Fuse all object at [{},{},{}], [{}',{}',{}']\n".format(Xt2,Yt2,Zt2,OX_angle2,OY_angle2,OZ_angle2)
Fuse_all_lead_encap_ROI_no_internal_face2 = geompy.MakeTranslation(Fuse_all_lead_encap_ROI_no_internal_face,Xt2,Yt2,Zt2)
OX2 = geompy.MakeTranslation(OX,Xt2,Yt2,Zt2)
OY2 = geompy.MakeTranslation(OY,Xt2,Yt2,Zt2)
OZ2 = geompy.MakeTranslation(OZ,Xt2,Yt2,Zt2)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face2, OZ2,OZ_angle2*math.pi/180.0)
print "Position 2nd Lead at [{},{},{}], [{}',{}',{}']\n".format(Xt2,Yt2,Zt2,OX_angle2,OY_angle2,OZ_angle2)
for i in range(0,len(VolumeObject1)):
VolumeObject2[i] = geompy.MakeTranslation(VolumeObject1[i],Xt2,Yt2,Zt2)
geompy.Rotate(VolumeObject2[i], OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(VolumeObject2[i], OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(VolumeObject2[i], OZ2,OZ_angle2*math.pi/180.0)
for i in range(0,len(ContactObject1)):
ContactObject2[i] = geompy.MakeTranslation(ContactObject1[i],Xt2,Yt2,Zt2)
geompy.Rotate(ContactObject2[i], OX2,OX_angle2*math.pi/180.0)
geompy.Rotate(ContactObject2[i], OY2,OY_angle2*math.pi/180.0)
geompy.Rotate(ContactObject2[i], OZ2,OZ_angle2*math.pi/180.0)
print "Cut outer ROI2 with brain\n"
cut_outer_ROI = geompy.MakeCutList(VolumeObject2[0], [brain_solid], True)
VolumeObject2[0] = geompy.MakeCutList(VolumeObject2[0], [cut_outer_ROI], True)
print "Cut ROI2 with brain\n"
VolumeObject2[1] = geompy.MakeCommonList([VolumeObject2[1], brain_solid], True)
print "Group 2nd:volume and area extraction for group ID identification process\n"
Volume2_Pro = [geompy.BasicProperties( VolumeObject2[0])]*len(VolumeObject2)
Contact2_Pro = [geompy.BasicProperties( ContactObject2[0])]*len(ContactObject2)
for i in range(0,len(VolumeObject2)):
Volume2_Pro[i] = geompy.BasicProperties( VolumeObject2[i])
for i in range(0,len(ContactObject2)):
Contact2_Pro[i] = geompy.BasicProperties( ContactObject2[i])
################## LEAD 1st #############################################################
#print "Position 1st Fuse all object at [{},{},{}], [{}',{}',{}']\n".format(Xt,Yt,Zt,OX_angle,OY_angle,OZ_angle)
geompy.TranslateDXDYDZ(Fuse_all_lead_encap_ROI_no_internal_face,Xt,Yt,Zt_tip)
OX1 = geompy.MakeTranslation(OX,Xt,Yt,Zt_tip)
OY1 = geompy.MakeTranslation(OY,Xt,Yt,Zt_tip)
OZ1 = geompy.MakeTranslation(OZ,Xt,Yt,Zt_tip)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face, OZ1,45.0*math.pi/180.0) # hardwired turn because of the Lead-DBS definition (1st constact on positive Y)
geompy.Rotate(Fuse_all_lead_encap_ROI_no_internal_face, OZ1,OZ_angle*math.pi/180.0)
Vertex_1 = geompy.MakeVertex(X_2nd,Y_2nd,Z_2nd)
Vertex_O = geompy.MakeVertex(Xt,Yt,Zt)
Vertex_3 = geompy.MakeVertex(Xt,Yt,Z_2nd_artif)
if X_2nd!=Xt or Y_2nd!=Yt:
Fuse_all_lead_encap_ROI_no_internal_face=geompy.MakeRotationThreePoints(Fuse_all_lead_encap_ROI_no_internal_face, Vertex_O, Vertex_3, Vertex_1)
#print "Position 1st Lead at [{},{},{}], [{}',{}',{}']\n".format(Xt,Yt,Zt,OX_angle,OY_angle,OZ_angle)
for i in range(0,len(VolumeObject1)):
geompy.TranslateDXDYDZ(VolumeObject1[i],Xt,Yt,Zt_tip)
geompy.Rotate(VolumeObject1[i], OZ1,45.0*math.pi/180.0) # hardwired turn because of the Lead-DBS definition (1st constact on positive Y)
geompy.Rotate(VolumeObject1[i], OZ1,OZ_angle*math.pi/180.0)
if X_2nd!=Xt or Y_2nd!=Yt:
VolumeObject1[i]=geompy.MakeRotationThreePoints(VolumeObject1[i], Vertex_O, Vertex_3, Vertex_1)
for i in range(0,len(ContactObject1)):
geompy.TranslateDXDYDZ(ContactObject1[i],Xt,Yt,Zt_tip)
geompy.Rotate(ContactObject1[i], OZ1,45.0*math.pi/180.0) # hardwired turn because of the Lead-DBS definition (1st constact on positive Y)
geompy.Rotate(ContactObject1[i], OZ1,OZ_angle*math.pi/180.0)
if X_2nd!=Xt or Y_2nd!=Yt:
ContactObject1[i]=geompy.MakeRotationThreePoints(ContactObject1[i], Vertex_O, Vertex_3, Vertex_1)
print "Cut outer ROI1 with brain\n"
cut_outer_ROI = geompy.MakeCutList(VolumeObject1[0], [brain_solid], True)
VolumeObject1[0] = geompy.MakeCutList(VolumeObject1[0], [cut_outer_ROI], True)
print "Cut ROI1 with brain\n"
VolumeObject1[1] = geompy.MakeCommonList([VolumeObject1[1], brain_solid], True)
print "Group 1st:volume and area extraction for group ID identification process\n"
Volume1_Pro = [geompy.BasicProperties( VolumeObject1[0])]*len(VolumeObject1)
Contact1_Pro = [geompy.BasicProperties( ContactObject1[0])]*len(ContactObject1)
for i in range(0,len(VolumeObject1)):
Volume1_Pro[i] = geompy.BasicProperties( VolumeObject1[i])
for i in range(0,len(ContactObject1)):
Contact1_Pro[i] = geompy.BasicProperties( ContactObject1[i])
print "Create reference groups for ID identification process\n"
if(Lead2nd_Enable):
Rest = geompy.MakeCutList(brain_solid, [Fuse_all_lead_encap_ROI_no_internal_face,Fuse_all_lead_encap_ROI_no_internal_face2], True)
Partition_profile = geompy.MakePartition(VolumeObject1+VolumeObject2+ContactObject1+ContactObject2, [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
###reference_volume
reference_volume = VolumeObject1 + VolumeObject2
reference_volume_Pro = Volume1_Pro + Volume2_Pro
Volume_name = Volume_name1+Volume_name2
### reference_area
reference_surface = ContactObject1 + ContactObject2
reference_surface_Pro = Contact1_Pro + Contact2_Pro
Contact_name = Contact_name1+Contact_name2
Group_volume = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])] * (len(VolumeObject1)+len(VolumeObject2)+1) # +1 is Rest Group
Group_surface = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])] * (len(ContactObject1)+len(ContactObject2))
else:
Rest = geompy.MakeCutList(brain_solid, [Fuse_all_lead_encap_ROI_no_internal_face], True)
Partition_profile = geompy.MakePartition(VolumeObject1+ContactObject1, [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
###reference_volume
reference_volume = VolumeObject1
reference_volume_Pro = Volume1_Pro
Volume_name = Volume_name1
### reference_area
reference_surface = ContactObject1
reference_surface_Pro = Contact1_Pro
Contact_name = Contact_name1
Group_volume = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])] * (len(VolumeObject1)+1) # +1 is Rest Group
Group_surface = [geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])] * len(ContactObject1)
### find out subshape and subshape ID
Group_surface_ListIDs =[]
Group_volume_ListIDs =[]
Group_partition_volume = []
Group_partition_surface = []
### find group volume ID ######################################################################
Partition_volume_IDsList = geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["SOLID"]) # list all sub shape volume in Partition
print "Partition_volume_IDsList",Partition_volume_IDsList, '\n'
for ref_ind in range (0, len(reference_volume)):
temp_volume = []
for sub_ind in range (0, len (Partition_volume_IDsList)):
subshape = geompy.GetSubShape(Partition_profile, [Partition_volume_IDsList[sub_ind]]) # get subshape
subshape_Pro = geompy.BasicProperties(subshape) # extract volume of subshape
Common_volume = geompy.MakeCommonList([subshape, reference_volume[ref_ind]], True) # check common intersection
Common_volume_Pro = geompy.BasicProperties(Common_volume)
print "volume difference",abs(Common_volume_Pro[2]-subshape_Pro[2]),"/",abs(Common_volume_Pro[2]-reference_volume_Pro[ref_ind][2])
# if ( common volume = subshape) and (common volume = ref volume) => ref volume = sub shape
if (abs(Common_volume_Pro[2]-subshape_Pro[2])< 0.0003) and (abs(Common_volume_Pro[2]-reference_volume_Pro[ref_ind][2])<0.0003):
Group_partition_volume.append([Volume_name[ref_ind],Partition_volume_IDsList[sub_ind]])
# if ( common volume = subshape) and (common volume < ref volume) => sub shape belong to ref volume
elif (abs(Common_volume_Pro[2]-subshape_Pro[2])< 0.0003) and ((Common_volume_Pro[2] - reference_volume_Pro[ref_ind][2])<-0.0003):
temp_volume.append( Partition_volume_IDsList[sub_ind] )
if len(temp_volume) >1 : # the volume is devided
Group_partition_volume.append([Volume_name[ref_ind],temp_volume ])
print Volume_name[ref_ind]," is devided and has sub IDs:{}\n".format(temp_volume)
if len(reference_volume) != len(Group_partition_volume):
print "Geometry-volume error please check ROI diameter and DBS lead Position ",len(reference_volume),len(Group_partition_volume)
print 'Group_partition_volume',Group_partition_volume,'\n'
### find group surface ID ######################################################################
Partition_surface_IDsList = geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["FACE"]) # list all sub shape face in Partition
print 'Partition_surface_IDsList',Partition_surface_IDsList,'\n'
sub_face = [] ## store devided faces
for reff_ind in range (0, len (reference_surface)):
temp_surface = []
for subf_ind in range (0, len(Partition_surface_IDsList)):
subshapef = geompy.GetSubShape(Partition_profile, [Partition_surface_IDsList[subf_ind]]) # get subshape
Common_face = geompy.MakeCommonList([subshapef, reference_surface[reff_ind]], True) # check common intersection
Common_face_Pro = geompy.BasicProperties(Common_face)
subshapef_Pro = geompy.BasicProperties(subshapef) # extract volume of subshape
print "area difference",abs(Common_face_Pro[1]-subshapef_Pro[1]),"/",abs(Common_face_Pro[1]-reference_surface_Pro[reff_ind][1])
# if ( common face = subface) and (common face = ref face) => ref face = sub face
if (abs(Common_face_Pro[1]-subshapef_Pro[1])<0.000001 )and (abs(Common_face_Pro[1]-reference_surface_Pro[reff_ind][1])<0.000001):
Group_partition_surface.append([ Contact_name[reff_ind],Partition_surface_IDsList[subf_ind] ])
# if ( common face = subface) and (common face < ref face) => sub face belong to ref face
elif (abs(Common_face_Pro[1]-subshapef_Pro[1])<0.000001 ) and ((Common_face_Pro[1] - reference_surface_Pro[reff_ind][1])<-0.000001):
temp_surface.append(Partition_surface_IDsList[subf_ind])
if len(temp_surface) >1 : # the face is devided
Group_partition_surface.append( [Contact_name[reff_ind],temp_surface ])
print Contact_name[reff_ind]," is devided and has sub IDs:{}\n".format(temp_surface)
if len(reference_surface) != len(Group_partition_surface): #+len(Group_partition_Multi_surface):
print "Geometry-Surface error please check ROI diameter and DBS lead Position ",len(reference_surface),len(Group_partition_surface),'\n'
print 'Group_partition_surface',Group_partition_surface,'\n'
if(Lead2nd_Enable):
Partition_profile = geompy.MakePartition(VolumeObject1+VolumeObject2+ContactObject1+ContactObject2+[Rest], [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
else:
Partition_profile = geompy.MakePartition(VolumeObject1+ContactObject1+[Rest], [], [], [], geompy.ShapeType["SOLID"], 0, [], 0)
new_volume_ID= geompy.SubShapeAllIDs(Partition_profile, geompy.ShapeType["SOLID"])
ID= list(set(Partition_volume_IDsList) ^ set (new_volume_ID))
Group_partition_volume.append(['Rest_1',ID[0]])
print "REST ID:",ID
print 'Group_partition_volume',Group_partition_volume,'\n'
print"Create volume and surface group under partition_profile\n"
for i_solid in range (0,len (Group_partition_volume)):
Group_volume[i_solid] = geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])
if (isinstance (Group_partition_volume[i_solid][1],list) == False):
geompy.UnionIDs(Group_volume[i_solid], [Group_partition_volume[i_solid][1]])
if (isinstance (Group_partition_volume[i_solid][1],list) == True):
geompy.UnionIDs(Group_volume[i_solid], Group_partition_volume[i_solid][1])
#############################################
for i_surface in range (0,len (Group_partition_surface)):
Group_surface[i_surface] = geompy.CreateGroup(Partition_profile, geompy.ShapeType["FACE"])
if (isinstance (Group_partition_surface[i_surface][1],list) == False): # not a list
geompy.UnionIDs(Group_surface[i_surface], [Group_partition_surface[i_surface][1]])
if (isinstance (Group_partition_surface[i_surface][1],list) == True): # it is a list
geompy.UnionIDs(Group_surface[i_surface], Group_partition_surface[i_surface][1])
print "Translate whole partition to Xm,Ym,Zm\n"
geompy.TranslateDXDYDZ(Partition_profile, Xm, Ym, Zm)
### add Vertices to geometry
if(Vertice_enable):
for ver_ind in range (0,number_vertex):
print"Add vertices to model\n"
Vert.append(geompy.MakeVertex(Vert_array[ver_ind][0],Vert_array[ver_ind][1],Vert_array[ver_ind][2]))
geompy.TranslateDXDYDZ(Vert[ver_ind], Xm, Ym, Zm) ###Translate vertices to Xm,Ym,Zm
geompy.addToStudy( Vert[ver_ind], 'Vert_{}'.format(ver_ind))
print"add to study\n"
############################################ end of extra code 2 ############################################
#############################################################################################################
geompy.addToStudy( O, 'O' )
geompy.addToStudy( OX, 'OX' )
geompy.addToStudy( OY, 'OY' )
geompy.addToStudy( OZ, 'OZ' )
geompy.addToStudy( Circle_1, 'Circle_1' )
geompy.addToStudy( Contact_1, 'Contact_1' )
geompy.addToStudy( Contact_2, 'Contact_2' )
geompy.addToStudy( Contact_3, 'Contact_3' )
geompy.addToStudy( Contact_4, 'Contact_4' )
geompy.addToStudy( Contact_5, 'Contact_5' )
geompy.addToStudy( Contact_6, 'Contact_6' )
geompy.addToStudy( Contact_7, 'Contact_7' )
geompy.addToStudy( Contact_8, 'Contact_8' )
geompy.addToStudy( CV1, 'CV1' )
geompy.addToStudy( CV2, 'CV2' )
geompy.addToStudy( CV3, 'CV3' )
geompy.addToStudy( CV4, 'CV4' )
geompy.addToStudy( CV5, 'CV5' )
geompy.addToStudy( CV6, 'CV6' )
geompy.addToStudy( CV7, 'CV7' )
geompy.addToStudy( CV8, 'CV8' )
geompy.addToStudy( Cylinder_1, 'Cylinder_1' )
geompy.addToStudy( Sphere_1, 'Sphere_1' )
geompy.addToStudy( Fuse_1, 'Fuse_1' )
geompy.addToStudy( ROI, 'ROI' )
geompy.addToStudy( encap_outer_ROI, 'encap_outer_ROI' )
geompy.addToStudy( encap_inner_ROI, 'encap_inner_ROI' )
geompy.addToStudy( Fuse_all_lead_encap_ROI, 'Fuse_all_lead_encap_ROI' )
################################################################################################################
####################################### extra code 3 V10 15/12/18##############################################/
#for i in range(0,len(VolumeObject2)):/
# geompy.addToStudy( VolumeObject2[i], 'VolumeObject2_{}'.format(i) )
#for i in range(0,len(ContactObject2)):
# geompy.addToStudy( ContactObject2[i], 'ContactObject2_{}'.format(i) )
#for i in range(0,len(VolumeObject1)):
# geompy.addToStudy( VolumeObject1[i], 'VolumeObject1_{}'.format(i) )
#for i in range(0,len(ContactObject1)):
# geompy.addToStudy( ContactObject1[i], 'ContactObject1_{}'.format(i) )
geompy.addToStudy( Partition_profile, 'Partition_profile' )
for i_solid1 in range (0,len (Group_partition_volume)):
geompy.addToStudyInFather( Partition_profile, Group_volume [i_solid1], Group_partition_volume[i_solid1][0])
for i_surface1 in range (0,len (Group_partition_surface)):
geompy.addToStudyInFather( Partition_profile, Group_surface [i_surface1], Group_partition_surface[i_surface1][0])
##################################### end of extra code 3##########################################
###################################################################################################
Contact1_1=Group_surface[0]
Contact1_2=Group_surface[1]
Contact1_3=Group_surface[2]
Contact1_4=Group_surface[3]
Contact1_5=Group_surface[4]
Contact1_6=Group_surface[5]
Contact1_7=Group_surface[6]
Contact1_8=Group_surface[7]
encap_inner_ROI1=Group_volume[2]
encap_outer_ROI1=Group_volume[0]
ROI1=Group_volume[1]
Rest_1=Group_volume[11]
Floating_contacts=[]
float_indices=[]
for i in xrange(len(Phi_vector)):
if Phi_vector[i]==None:
Floating_contacts.append(Group_volume[i+3]) #because the first contact is Group_volume[3]
float_indices.append(i+3)
Auto_group_for_floating = geompy.CreateGroup(Partition_profile, geompy.ShapeType["SOLID"])
geompy.UnionList(Auto_group_for_floating, Floating_contacts[:])
geompy.addToStudyInFather( Partition_profile, Auto_group_for_floating, 'Auto_group_for_floating' )
###
### SMESH component
###
import SMESH, SALOMEDS
from salome.smesh import smeshBuilder
smesh = smeshBuilder.New(theStudy)
Mesh_1 = smesh.Mesh(Partition_profile)
NETGEN_1D_2D_3D = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D)
NETGEN_3D_Parameters_1 = NETGEN_1D_2D_3D.Parameters()
NETGEN_3D_Parameters_1.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_1.SetSecondOrder( 0 )
NETGEN_3D_Parameters_1.SetOptimize( 1 )
NETGEN_3D_Parameters_1.SetFineness( 0 )
NETGEN_3D_Parameters_1.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_1.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_1.SetFuseEdges( 1 )
NETGEN_3D_Parameters_1.SetQuadAllowed( 0 )
NETGEN_1D_2D = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_1)
Sub_mesh_1 = NETGEN_1D_2D.GetSubMesh()
NETGEN_2D_Parameters_1 = NETGEN_1D_2D.Parameters()
NETGEN_2D_Parameters_1.SetMaxSize( 0.05 )
NETGEN_2D_Parameters_1.SetSecondOrder( 0 )
NETGEN_2D_Parameters_1.SetOptimize( 1 )
NETGEN_2D_Parameters_1.SetFineness( 4 )
NETGEN_2D_Parameters_1.SetMinSize( 0.0001 )
NETGEN_2D_Parameters_1.SetUseSurfaceCurvature( 1 )
NETGEN_2D_Parameters_1.SetFuseEdges( 1 )
NETGEN_2D_Parameters_1.SetQuadAllowed( 0 )
NETGEN_1D_2D_1 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_2)
Sub_mesh_2 = NETGEN_1D_2D_1.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_2)
NETGEN_1D_2D_2 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_3)
Sub_mesh_3 = NETGEN_1D_2D_2.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_3)
NETGEN_1D_2D_3 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_4)
Sub_mesh_4 = NETGEN_1D_2D_3.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_4)
NETGEN_1D_2D_4 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_5)
Sub_mesh_41 = NETGEN_1D_2D_4.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_5)
NETGEN_1D_2D_5 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_6)
Sub_mesh_42 = NETGEN_1D_2D_5.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_6)
NETGEN_1D_2D_6 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_7)
Sub_mesh_43 = NETGEN_1D_2D_6.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_7)
NETGEN_1D_2D_7 = Mesh_1.Triangle(algo=smeshBuilder.NETGEN_1D2D,geom=Contact1_8)
Sub_mesh_44 = NETGEN_1D_2D_7.GetSubMesh()
status = Mesh_1.AddHypothesis(NETGEN_2D_Parameters_1,Contact1_8)
NETGEN_1D_2D_3D_1 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=encap_inner_ROI1)
Sub_mesh_5 = NETGEN_1D_2D_3D_1.GetSubMesh()
NETGEN_3D_Parameters_2 = NETGEN_1D_2D_3D_1.Parameters()
NETGEN_3D_Parameters_2.SetMaxSize( encap_thickness )
NETGEN_3D_Parameters_2.SetSecondOrder( 0 )
NETGEN_3D_Parameters_2.SetOptimize( 1 )
NETGEN_3D_Parameters_2.SetFineness( 2 )
NETGEN_3D_Parameters_2.SetMinSize( 0.00283583 )
NETGEN_3D_Parameters_2.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_2.SetFuseEdges( 1 )
NETGEN_3D_Parameters_2.SetQuadAllowed( 0 )
#isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5 ] ])
NETGEN_1D_2D_3D_2 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=encap_outer_ROI1)
Sub_mesh_6 = NETGEN_1D_2D_3D_2.GetSubMesh()
NETGEN_3D_Parameters_3 = NETGEN_1D_2D_3D_2.Parameters()
NETGEN_3D_Parameters_3.SetMaxSize( encap_thickness )
NETGEN_3D_Parameters_3.SetSecondOrder( 0 )
NETGEN_3D_Parameters_3.SetOptimize( 1 )
NETGEN_3D_Parameters_3.SetFineness( 2 )
NETGEN_3D_Parameters_3.SetMinSize( 0.0333798 )
NETGEN_3D_Parameters_3.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_3.SetFuseEdges( 1 )
NETGEN_3D_Parameters_3.SetQuadAllowed( 0 )
#isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5, Sub_mesh_6 ] ])
NETGEN_1D_2D_3D_3 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=ROI1)
Sub_mesh_7 = NETGEN_1D_2D_3D_3.GetSubMesh()
NETGEN_3D_Parameters_4 = NETGEN_1D_2D_3D_3.Parameters()
NETGEN_3D_Parameters_4.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_4.SetSecondOrder( 0 )
NETGEN_3D_Parameters_4.SetOptimize( 1 )
NETGEN_3D_Parameters_4.SetFineness( 2 )
NETGEN_3D_Parameters_4.SetMinSize( 0.00328242 )
NETGEN_3D_Parameters_4.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_4.SetFuseEdges( 1 )
NETGEN_3D_Parameters_4.SetQuadAllowed( 0 )
#isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1, Sub_mesh_5, Sub_mesh_6, Sub_mesh_7 ] ])
NETGEN_1D_2D_3D_4 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Rest_1)
Sub_mesh_8 = NETGEN_1D_2D_3D_4.GetSubMesh()
NETGEN_3D_Parameters_5 = NETGEN_1D_2D_3D_4.Parameters()
NETGEN_3D_Parameters_5.SetMaxSize( 2.5 )
NETGEN_3D_Parameters_5.SetSecondOrder( 0 )
NETGEN_3D_Parameters_5.SetOptimize( 1 )
NETGEN_3D_Parameters_5.SetFineness( 2 )
NETGEN_3D_Parameters_5.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_5.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_5.SetFuseEdges( 1 )
NETGEN_3D_Parameters_5.SetQuadAllowed( 0 )
NETGEN_1D_2D_3D_5 = Mesh_1.Tetrahedron(algo=smeshBuilder.NETGEN_1D2D3D,geom=Auto_group_for_floating)
Sub_mesh_9 = NETGEN_1D_2D_3D_5.GetSubMesh()
NETGEN_3D_Parameters_6 = NETGEN_1D_2D_3D_5.Parameters()
NETGEN_3D_Parameters_6.SetMaxSize( 25.4615 )
NETGEN_3D_Parameters_6.SetSecondOrder( 0 )
NETGEN_3D_Parameters_6.SetOptimize( 1 )
NETGEN_3D_Parameters_6.SetFineness( 2 )
NETGEN_3D_Parameters_6.SetMinSize( 0.000374134 )
NETGEN_3D_Parameters_6.SetUseSurfaceCurvature( 1 )
NETGEN_3D_Parameters_6.SetFuseEdges( 1 )
NETGEN_3D_Parameters_6.SetQuadAllowed( 0 )
isDone = Mesh_1.SetMeshOrder( [ [ Sub_mesh_4, Sub_mesh_3, Sub_mesh_2, Sub_mesh_1,Sub_mesh_41, Sub_mesh_42, Sub_mesh_43, Sub_mesh_44, Sub_mesh_5,Sub_mesh_9,Sub_mesh_6, Sub_mesh_7, Sub_mesh_8 ] ])
#if Phi_vector[0]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_1 )
#if Phi_vector[1]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_2 )
#if Phi_vector[2]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_3 )
#if Phi_vector[3]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_4 )
#if Phi_vector[4]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_41 )
#if Phi_vector[5]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_42 )
#if Phi_vector[6]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_43 )
#if Phi_vector[7]==None:
# Mesh_1.GetMesh().RemoveSubMesh( Sub_mesh_44 )
isDone = Mesh_1.Compute()
if Phi_vector[0]!=None:
Mesh_1.GroupOnGeom(Contact1_1,'C1_1',SMESH.FACE)
if Phi_vector[1]!=None:
Mesh_1.GroupOnGeom(Contact1_2,'C1_2',SMESH.FACE)
if Phi_vector[2]!=None:
Mesh_1.GroupOnGeom(Contact1_3,'C1_3',SMESH.FACE)
if Phi_vector[3]!=None:
Mesh_1.GroupOnGeom(Contact1_4,'C1_4',SMESH.FACE)
if Phi_vector[4]!=None:
Mesh_1.GroupOnGeom(Contact1_5,'C1_5',SMESH.FACE)
if Phi_vector[5]!=None:
Mesh_1.GroupOnGeom(Contact1_6,'C1_6',SMESH.FACE)
if Phi_vector[6]!=None:
Mesh_1.GroupOnGeom(Contact1_7,'C1_7',SMESH.FACE)
if Phi_vector[7]!=None:
Mesh_1.GroupOnGeom(Contact1_8,'C1_8',SMESH.FACE)
Encap_contact = Mesh_1.GroupOnGeom(encap_inner_ROI1,'Encap_contact',SMESH.VOLUME)
Encap_rest = Mesh_1.GroupOnGeom(encap_outer_ROI1,'Encap_rest',SMESH.VOLUME)
RegOfInt = Mesh_1.GroupOnGeom(ROI1,'RegOfInt',SMESH.VOLUME)
Rst = Mesh_1.GroupOnGeom(Rest_1,'Rst',SMESH.VOLUME)
Flt_cnt=Mesh_1.GroupOnGeom(Auto_group_for_floating,'Flt_cnt',SMESH.VOLUME)
## Set names of Mesh objects
smesh.SetName(NETGEN_1D_2D_3D.GetAlgorithm(), 'NETGEN 1D-2D-3D')
smesh.SetName(NETGEN_1D_2D.GetAlgorithm(), 'NETGEN 1D-2D')
smesh.SetName(NETGEN_2D_Parameters_1, 'NETGEN 2D Parameters_1')
smesh.SetName(NETGEN_3D_Parameters_2, 'NETGEN 3D Parameters_2')
smesh.SetName(NETGEN_3D_Parameters_1, 'NETGEN 3D Parameters_1')
smesh.SetName(NETGEN_3D_Parameters_5, 'NETGEN 3D Parameters_5')
smesh.SetName(NETGEN_3D_Parameters_6, 'NETGEN 3D Parameters_6')
smesh.SetName(NETGEN_3D_Parameters_3, 'NETGEN 3D Parameters_3')
smesh.SetName(NETGEN_3D_Parameters_4, 'NETGEN 3D Parameters_4')
smesh.SetName(Sub_mesh_4, 'Sub-mesh_4')
smesh.SetName(Sub_mesh_41, 'Sub-mesh_41')
smesh.SetName(Sub_mesh_42, 'Sub-mesh_42')
smesh.SetName(Sub_mesh_43, 'Sub-mesh_43')
smesh.SetName(Sub_mesh_44, 'Sub-mesh_44')
smesh.SetName(Sub_mesh_1, 'Sub-mesh_1')
smesh.SetName(Sub_mesh_3, 'Sub-mesh_3')
smesh.SetName(Sub_mesh_2, 'Sub-mesh_2')
smesh.SetName(Mesh_1.GetMesh(), 'Mesh_1')
smesh.SetName(Rst, 'Rst')
smesh.SetName(Flt_cnt, 'Flt_cnt')
smesh.SetName(RegOfInt, 'RegOfInt')
smesh.SetName(Encap_rest, 'Encap_rest')
smesh.SetName(Encap_contact, 'Encap_contact')
smesh.SetName(Sub_mesh_7, 'Sub-mesh_7')
smesh.SetName(Sub_mesh_6, 'Sub-mesh_6')
smesh.SetName(Sub_mesh_5, 'Sub-mesh_5')
smesh.SetName(Sub_mesh_8, 'Sub-mesh_8')
smesh.SetName(Sub_mesh_9, 'Sub-mesh_9')
Mesh_1.ExportMED(os.environ['PATIENTDIR']+'/Meshes/Mesh_unref.med')
#if salome.sg.hasDesktop():
# salome.sg.updateObjBrowser(True)
import killSalome
killSalome.killAllPorts()
| gpl-3.0 |
SimonGreenhill/pyvolve | helper_scripts/count_simulated_dnds.py | 1 | 16767 | '''
SJS.
This script implements a module counting site-specific dN/dS values, as simulated by pyvolve. The counting method implemented is similar to the SLAC method [Kosakovsky Pond & Frost (2005) MBE], although as the actual ancestral sequences are known, no reconstruction is performed.
NOTE: this module is meant to be used ONLY with sequences simulated with pyvolve and thus can't handle gaps or ambiguities.
Usage:
from count_simulated_dnds import *
c = dNdS_Counter(<alnfile>, <treefile>, <mutation_dictionary>)
# alnfile: a FASTA-formatted sequence alignment file produced by a pyvolve simulation. This file **MUST** contain ancestral sequences (get this alignment by including the argument `write_anc=True` when calling a pyvolve Evolver class).
# treefile: a file containing the *exact* newick tree used in the pyvolve simulation which produced the alnfile.
# mutation_dictionary: **optional** argument indicating the mutation rates between nucleotides. This argument is analogous to the "mu" dictionary provided to pyvolve when simulating with custom mutation rates. If this argument is not provided, this module assumes equal mutation rates (e.g. a JC69 situation).
c.calculate_dnds()
# This method will compute site-specific dN/dS values and output a tab-delimited file of this format:
# site ns_changes s_changes ns_sites s_sites
# ... ... ... ... ...
# You can then calculate dN/dS from this file with this calculation: (ns_changes/ns_sites) / (s_changes/s_sites) . This is straight-forward in something like R or the python pandas package. Beware your parentheses placement, and also keep a look-out for NA or Inf values (these can happen when no synonymous changes occurred).
# By default, the output file will be called "counted_dnds.txt". To change this name, include the argument savefile, e.g. c.calculate_dnds(savefile = "my_preferred_filename.txt")
Please post all questions, bugs, etc. to https://github.com/sjspielman/pyvolve/Issues
'''
from copy import deepcopy
from pyvolve import newick
from Bio import AlignIO
import numpy as np
class CalcOverBranch(object):
'''
Compute quantities for dN/dS over a branch on a phylogeny, using a SLAC approach.
Takes 4 ordered input arguments:
source = the source codon (string)
target = the target codon (string
branch_length = the branch length (float)
nuc_sub_probs = a dictionary of normalized (sum to 1) nucleotide substitution probabilities
'''
def __init__(self, source, target, branch_length, nuc_sub_probs):
# Genetics variables
self.nucleotides = ["A", "C", "G", "T"]
self.stop_codons = ["TAA", "TAG", "TGA"]
self.codons = ["AAA", "AAC", "AAG", "AAT", "ACA", "ACC", "ACG", "ACT", "AGA", "AGC", "AGG", "AGT", "ATA", "ATC", "ATG", "ATT", "CAA", "CAC", "CAG", "CAT", "CCA", "CCC", "CCG", "CCT", "CGA", "CGC", "CGG", "CGT", "CTA", "CTC", "CTG", "CTT", "GAA", "GAC", "GAG", "GAT", "GCA", "GCC", "GCG", "GCT", "GGA", "GGC", "GGG", "GGT", "GTA", "GTC", "GTG", "GTT", "TAC", "TAT", "TCA", "TCC", "TCG", "TCT", "TGC", "TGG", "TGT", "TTA", "TTC", "TTG", "TTT"]
self.translation_dict = {"AAA":"K", "AAC":"N", "AAG":"K", "AAT":"N", "ACA":"T", "ACC":"T", "ACG":"T", "ACT":"T", "AGA":"R", "AGC":"S", "AGG":"R", "AGT":"S", "ATA":"I", "ATC":"I", "ATG":"M", "ATT":"I", "CAA":"Q", "CAC":"H", "CAG":"Q", "CAT":"H", "CCA":"P", "CCC":"P", "CCG":"P", "CCT":"P", "CGA":"R", "CGC":"R", "CGG":"R", "CGT":"R", "CTA":"L", "CTC":"L", "CTG":"L", "CTT":"L", "GAA":"E", "GAC":"D", "GAG":"E", "GAT":"D", "GCA":"A", "GCC":"A", "GCG":"A", "GCT":"A", "GGA":"G", "GGC":"G", "GGG":"G", "GGT":"G", "GTA":"V", "GTC":"V", "GTG":"V", "GTT":"V", "TAC":"Y", "TAT":"Y", "TCA":"S", "TCC":"S", "TCG":"S", "TCT":"S", "TGC":"C", "TGG":"W", "TGT":"C", "TTA":"L", "TTC":"F", "TTG":"L", "TTT":"F"}
# Input arguments
self.source_codon = source # originating codon
self.target_codon = target # final codon
self.bl = branch_length # branch length
self.B = nuc_sub_probs # dictionary of nucleotide substitution probabilities
assert(self.source_codon in self.codons and self.target_codon in self.codons), "\n\nImproper codons provided for dN/dS calculation. They are either stop codons, or straight-up not DNA."
self.path_codon_counts = {} # dictionary to contain the counts for all codons appearing in the paths between source, target
self.n_changes = 0.
self.s_changes = 0.
self.n_sites = 0.
self.s_sites = 0.
def compute_branch_dnds(self):
'''
*This* is the function users should call to compute nonsyn, syn changes and sites over a branch.
'''
self.compute_paths_and_changes()
self.count_sites_over_branch()
assert(self.n_sites + self.s_sites > 0.), "\n\nNo sites were counted for this branch. Bad news."
return self.n_changes, self.s_changes, self.n_sites, self.s_sites
def num_nuc_diff(self, s, t):
'''
Count the number of nucleotide differences between two codons: s,t.
'''
return sum([1 for i in range(3) if s[i] != t[i]])
def eval_path(self, l, n_count, s_count):
'''
Enumerate and evaluate the changes within a path.
'''
stop = False
temp_s_count = 0.
temp_n_count = 0.
path = [self.source_codon]
source = deepcopy(self.source_codon)
for p in l:
new_source = source[0:p] + self.target_codon[p] + source[p+1:3]
# If our path takes us through a stop, we will disregard the whole path
if new_source in self.stop_codons:
stop = True
break
# Evaluate the change as syn, nonsyn
if self.translation_dict[new_source] == self.translation_dict[source]:
temp_s_count += 1.
else:
temp_n_count += 1.
path.append(new_source)
source = new_source
# Save the path to count dictionary if accessible
if not stop:
path.append(self.target_codon) # Append the final target codon to the list
for entry in path:
if entry in self.path_codon_counts:
self.path_codon_counts[entry] += 1
else:
self.path_codon_counts[entry] = 1
# Tally and return
s_count.append(temp_s_count)
n_count.append(temp_n_count)
return n_count, s_count
def compute_paths_and_changes(self):
'''
Assess the shortest paths between self.source and self.target which don`t pass through stop codons.
Builds self.path_codon_counts and computes self.n_changes, self.s_changes
'''
s_count = [] # Total number of synonymous changes. Use list for each averaging.
n_count = [] # Total number of nonsynonymous changes. Use list for each averaging.
# Which sites had a mutation?
diff_sites = [i for i in range(3) if self.source_codon[i] != self.target_codon[i]]
n = len(diff_sites)
# No change?
if n == 0:
self.path_codon_counts[self.source_codon] = 1.
# Single change?
if n == 1:
if self.translation_dict[self.source_codon] == self.translation_dict[self.target_codon]:
self.s_changes = 1.
else:
self.n_changes = 1.
self.path_codon_counts[self.source_codon] = 1.
self.path_codon_counts[self.target_codon] = 1.
# Multiple changes?
elif n > 1:
for i in range(n):
m = diff_sites[i]
new_diff_sites = diff_sites[0:i] + diff_sites[i+1:n]
n_count, s_count = self.eval_path(new_diff_sites, n_count, s_count)
# Evaluate reverse substitution order if needed
if len(new_diff_sites) == 2:
new_diff_sites.reverse()
n_count, s_count = self.eval_path(new_diff_sites, n_count, s_count)
# Average the changes
self.n_changes = sum(n_count) / float(len(n_count))
self.s_changes = sum(s_count) / float(len(s_count))
def count_codon_sites(self,codon):
'''
Count number of synonymous, nonsynonymous sites in a given codon.
Argument "codon" is the sense codon of interest.
'''
codon = codon.upper()
source_aa = self.translation_dict[codon]
# Determine which sense codons are a single nucleotide change from input argument, codon
# One list for nonsyn and one list for syn
n_sites = 0.
s_sites = 0.
for i in range(3):
s_numer = 0.
n_numer = 0.
denom = 0.
for n in self.nucleotides:
if codon[i] != n:
target = codon[0:i] + n + codon[i+1:3]
if target not in self.stop_codons:
target_aa = self.translation_dict[target]
# Evaluate syn, nonsyn
if source_aa == target_aa:
s_numer += self.B[codon[i] + n]
else:
n_numer += self.B[codon[i] + n]
denom += self.B[codon[i] + n]
# Tally if there were changes
if s_numer > 0.:
s_sites += s_numer/denom
if n_numer > 0:
n_sites += n_numer/denom
return n_sites, s_sites
def count_sites_over_branch(self):
'''
Count number of expected number of nonsyn, syn sites for a given branch.
'''
assert(len(self.path_codon_counts) > 0), "\n\nPath is empty."
x = float(sum(self.path_codon_counts.values()))
s_sites_raw = 0.
n_sites_raw = 0.
for key in self.path_codon_counts:
ntemp, stemp = self.count_codon_sites(key)
s_sites_raw += (self.path_codon_counts[key] * stemp)
n_sites_raw += (self.path_codon_counts[key] * ntemp)
self.s_sites = (s_sites_raw / x) * self.bl
self.n_sites = (n_sites_raw / x) * self.bl
class dNdS_Counter(object):
'''
Class to count simulated, site-specific dN/dS.
Required positional arguments:
alnfile: alignment file (fasta) produced by pyvolve *with ancestral sequences*
treefile: file with newick tree
Optional positional arguments:
mu_dict: dictionary of nucleotide mutation rates. If missing, assumes equal mutation rates.
'''
def __init__(self, alnfile, treefile, mu_dict = None):
# Parse initial tree
self.tree = newick.read_tree(file = treefile)
# Read alignment, convert to dictionary, and determine nucleotide pi dictionary
length = 0.
nuc_counts = np.zeros(4)
with open(alnfile, "rU") as handle:
aln = AlignIO.read(handle, "fasta")
self.alnlen = len(aln[0])/3
self.alndict = {}
for rec in aln:
s = str(rec.seq).upper()
nuc_counts[0] += s.count("A")
nuc_counts[1] += s.count("C")
nuc_counts[2] += s.count("G")
nuc_counts[3] += s.count("T")
self.alndict[str(rec.id)] = s
# Build B.
self.compute_B(mu_dict, nuc_counts)
# How many branch lengths?
self.num_edges = 0
self.sum_bl = 0.
self.tally_bl(self.tree)
# Set up storage for raw site-wise dn, ds quantities.
self.n_sites = np.zeros( [self.alnlen, self.num_edges] )
self.s_sites = np.zeros( [self.alnlen, self.num_edges] )
self.n_changes = np.zeros( [self.alnlen, self.num_edges] )
self.s_changes = np.zeros( [self.alnlen, self.num_edges] )
def compute_B(self, mu_dict, nuc_probs):
'''
Given nucleotide mutational information, return a dictionary
giving the relative probability of subbing nucleotide pairs.
'''
nuc_probs /= np.sum(nuc_probs)
pi_dict = {"A": nuc_probs[0], "C": nuc_probs[1], "G": nuc_probs[2], "T": nuc_probs[3]}
# Default.
if mu_dict is None:
mu_dict = {"AC":1., "AG":1., "AT":1., "CG":1., "CT":1., "GT":1.}
# Fill in missing mutation rates symmetrically
temp_mu_dict = {}
for key in mu_dict:
if key[1] + key[0] not in mu_dict:
temp_mu_dict[key[1] + key[0]] = mu_dict[key]
# Build unnormalized B
new = {}
for key in mu_dict:
new[key] = mu_dict[key] * pi_dict[key[1]]
for key in temp_mu_dict:
new[key] = temp_mu_dict[key] * pi_dict[key[1]]
# Build normalized B
k = new.keys()
v = np.array(new.values())
v /= np.sum(v)
self.B = dict(zip(k,v))
def tally_bl(self, t):
'''
Recursive function to retrieve the total number of edges and the sum of all branch lengths.
'''
if t.branch_length is not None:
self.sum_bl += t.branch_length
self.num_edges += 1
if len(self.tree.children)>0:
for child in t.children:
self.tally_bl(child)
def traverse_tree_dnds(self, source_node, target_node, storage_index):
'''
Traverse the tree to compute and store dN, dS quantities (sites and changes) at each edge.
'''
# Compute site-wise dN/dS along this branch, where bl = target_node.branch_length
full_target_seq = self.alndict[target_node.name]
full_source_seq = self.alndict[source_node.name]
bl = target_node.branch_length
for s in range(0, self.alnlen*3, 3):
source_seq = full_source_seq[s:s+3]
target_seq = full_target_seq[s:s+3]
calcer = CalcOverBranch(source_seq, target_seq, bl, self.B)
n_changes, s_changes, n_sites, s_sites= calcer.compute_branch_dnds()
# Save quantities
self.n_changes[s/3][storage_index] = n_changes
self.s_changes[s/3][storage_index] = s_changes
self.n_sites[s/3][storage_index] = n_sites
self.s_sites[s/3][storage_index] = s_sites
storage_index += 1
# Proceed down the tree only if there are no more children
#print "children:", target_node.children[0].name, target_node.children[1].name
if len(target_node.children) > 0:
for child in target_node.children:
storage_index = self.traverse_tree_dnds(target_node, child, storage_index)
return storage_index
def calculate_dnds(self, savefile = "counted_dnds.txt"):
'''
Function *for users to call* in order to compute, save site-wise dnds quantities over a tree.
'''
# Obtain all quantities. We have to start with root's children, not root.
storage_index = 0
for subtree in self.tree.children:
storage_index = self.traverse_tree_dnds(self.tree, subtree, storage_index)
# Normalize sites by total branch length
self.n_sites /= self.sum_bl
self.s_sites /= self.sum_bl
# Obtain per-site average of all quantities
final_nsites = np.mean(self.n_sites, axis=1)
final_ssites = np.mean(self.s_sites, axis=1)
final_nchanges = np.mean(self.n_changes, axis=1)
final_schanges = np.mean(self.s_changes, axis=1)
# Finally, save this csv: site_index, nonsyn_changes, syn_changes, nonsyn_sites, syn_sites
with open(savefile, "w") as f:
site = 1
f.write("site\tns_changes\ts_changes\tns_sites\ts_sites")
for i in range(len(final_nsites)):
f.write("\n" + str(site) + "\t" + str(final_nchanges[i]) + "\t" + str(final_schanges[i]) + "\t" + str(final_nsites[i]) + "\t" + str(final_ssites[i]))
site += 1
| bsd-2-clause |
coreymason/LAHacks2017 | web/linear_regression_engine.py | 1 | 2084 | import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
import pandas as pd
import numpy as np
import sklearn.linear_model as skl
import matplotlib.pyplot as plt
reg = skl.LinearRegression()
data = pd.read_csv('sleep_quality_data.csv', index_col=0)
x_train = data.as_matrix(['temperature', 'humidity', 'brightness'])[:13]
y_train = data.as_matrix(['sleep quality'])[:13]
reg.fit (x_train, y_train)
# if there is a higher correlation coefficient
# then you want to maximise that variable, and vice versa
fields = ["Temperature", "Humidity", "Room brightness"]
index = 0
for cof in reg.coef_[0]:
suggestion = ""
if cof > 0.5:
suggestion += "increase " + fields[index] + ", "
print suggestion
index += 1
elif cof > 0:
suggestion += "slightly increase " + fields[index] + ", "
print suggestion
index += 1
elif cof < -0.5:
suggestion += "decrease " + fields[index] + ", "
print suggestion
index += 1
elif cof < 0:
suggestion += "slightly decrease " + fields[index] + ", "
print suggestion
index+=1
else:
suggestion += "it's fine " + ", "
print suggestion
index+=1
#print suggestion
x_test = data.as_matrix(['temperature', 'humidity', 'brightness'])[-1:]
#print x_test
predicted_value = reg.predict(x_test)
print predicted_value
# if predicted_value < 3:
# for cof in reg.coef_[0]:
# suggestion = ""
# if cof > 0.5:
# suggestion += "increase " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof > 0:
# suggestion += "slightly increase " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof < -0.5:
# suggestion += "decrease " + fields[index] + ", "
# print suggestion
# index += 1
# elif cof < 0:
# suggestion += "slightly decrease " + fields[index] + ", "
# print suggestion
# index+=1
# else:
# suggestion += "it's fine " + ", "
# print suggestion
# index+=1
# plot data
data.plot(kind='scatter', x='temperature', y='sleep quality')
# plot the least squares line
plt.plot(x_test, predicted_value, c='red', linewidth=2)
#plt.show() | mit |
ARM-software/trappy | tests/test_trappy.py | 1 | 4903 | # Copyright 2015-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
from builtins import str
import os
import re
import matplotlib, tempfile
import trappy
from test_thermal import BaseTestThermal
class TestTrappy(BaseTestThermal):
def __init__(self, *args, **kwargs):
super(TestTrappy, self).__init__(*args, **kwargs)
self.map_label = {"00000000,00000039": "A53", "00000000,00000006": "A57"}
self.actor_order = ["GPU", "A57", "A53"]
def test_summary_plots(self):
"""Test summary_plots()
Can't check that the graphs are ok, so just see that the method doesn't blow up"""
trappy.summary_plots(self.actor_order, self.map_label)
matplotlib.pyplot.close('all')
trappy.summary_plots(self.actor_order, self.map_label, width=14,
title="Foo")
matplotlib.pyplot.close('all')
def test_summary_plots_bad_parameters(self):
"""When summary_plots() receives bad parameters, it offers an understandable error"""
self.assertRaises(TypeError, trappy.summary_plots,
(self.map_label, self.actor_order))
try:
trappy.summary_plots(self.map_label, self.actor_order)
except TypeError as exception:
self.assertTrue("actor_order" in str(exception))
else:
self.fail()
try:
trappy.summary_plots(self.actor_order, self.actor_order)
except TypeError as exception:
self.assertTrue("map_label" in str(exception))
else:
self.fail()
def test_summary_other_dir(self):
"""Test summary_plots() with another directory"""
other_random_dir = tempfile.mkdtemp()
os.chdir(other_random_dir)
trappy.summary_plots(self.actor_order, self.map_label, path=self.out_dir)
matplotlib.pyplot.close('all')
# Sanity check that the test actually ran from another directory
self.assertEqual(os.getcwd(), other_random_dir)
def test_summary_plots_only_power_allocator_trace(self):
"""Test that summary_plots() work if there is only power allocator
trace"""
# Strip out "thermal_temperature" from the trace
trace_out = ""
with open("trace.txt") as fin:
for line in fin:
if not re.search("thermal_temperature:", line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
trappy.summary_plots(self.actor_order, self.map_label)
matplotlib.pyplot.close('all')
def test_summary_plots_no_gpu(self):
"""summary_plots() works if there is no GPU trace"""
# Strip out devfreq traces
trace_out = ""
with open("trace.txt") as fin:
for line in fin:
if ("thermal_power_devfreq_get_power:" not in line) and \
("thermal_power_devfreq_limit:" not in line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
trappy.summary_plots(self.actor_order, self.map_label)
matplotlib.pyplot.close('all')
def test_summary_plots_one_actor(self):
"""summary_plots() works if there is only one actor"""
# Strip out devfreq and little traces
trace_out = ""
with open("trace.txt") as fin:
for line in fin:
if ("thermal_power_devfreq_get_power:" not in line) and \
("thermal_power_devfreq_limit:" not in line) and \
("thermal_power_cpu_get_power: cpus=00000000,00000039" not in line) and \
("thermal_power_cpu_limit: cpus=00000000,00000039" not in line):
trace_out += line
with open("trace.txt", "w") as fout:
fout.write(trace_out)
map_label = {"00000000,00000006": "A57"}
trappy.summary_plots(self.actor_order, map_label)
matplotlib.pyplot.close('all')
def test_compare_runs(self):
"""Basic compare_runs() functionality"""
trappy.compare_runs(self.actor_order, self.map_label,
runs=[("new", "."), ("old", self.out_dir)])
matplotlib.pyplot.close('all')
| apache-2.0 |
okadate/romspy | romspy/hview/pickup.py | 2 | 6168 | # coding: utf-8
# (c) 2015-10-21 Teruhisa Okada
"""changelog
2015-11-04 Add levels, fmt
"""
import netCDF4
import numpy as np
from itertools import product
import pandas as pd
import datetime
try:
from geopy.distance import vincenty
except:
pass
import matplotlib.pyplot as plt
import romspy
#import seaborn as sns
#sns.set_palette("Reds")
def pickup(nc, vname, lon, lat, method='idw', grdfile=None):
if grdfile is not None:
grd = netCDF4.Dataset(grdfile)
lon_rho = grd.variables['lon_rho'][0,:]
lat_rho = grd.variables['lat_rho'][:,0]
else:
lon_rho = nc.variables['lon_rho'][0,:]
lat_rho = nc.variables['lat_rho'][:,0]
d_lon = lon_rho - lon
d_lat = lat_rho - lat
x = abs(d_lon).argmin()
y = abs(d_lat).argmin()
if method == 'near':
var = nc.variables[vname]
if var.ndim == 4:
return var[:,:,y,x]
elif var.ndim == 3:
return var[:,y,x]
elif var.ndim == 2:
return var[y,x]
elif method == 'idw':
s_lon = np.sign(d_lon[x])
s_lat = np.sign(d_lat[y])
if np.sign(d_lon[x+1]) != s_lon:
x2 = [x, x+1]
else:
x2 = [x-1, x]
if np.sign(d_lat[y+1]) != s_lat:
y2 = [y, y+1]
else:
y2 = [y-1, y]
var = nc.variables[vname]
weight = np.zeros(4)
var1 = [[] for _ in range(4)]
for i, (x1, y1) in enumerate(product(x2, y2)):
if nc.variables['mask_rho'][y1,x1] == 1:
dx = x - x1
dy = y - y1
weight[i] = np.sqrt(abs(dx) ** 2 + abs(dy) ** 2)
if var.ndim == 4:
var1[i] = var[:,:,y1,x1]
if var.ndim == 3:
var1[i] = var[:,y1,x1]
if var.ndim == 2:
var1[i] = var[y1,x1]
else:
weight[i] = 0
var1[i] = 0
return sum([var1[i] * weight[i] / sum(weight) for i in range(4)])
else:
print 'ERROR: your method "{}" is wrong'.format(method)
def pickup_line(nc, vname, line, time, method='idw', unit='g', cff=None, levels=None, grdfile=None, fmt='%2.1f', cblabel=None, pmethod='contourf'):
if cff is None:
cff = romspy.unit2cff(vname, unit)
if type(time) == int:
t = time
time = netCDF4.num2date(nc.variables['ocean_time'][t], romspy.JST)
elif type(time) == datetime.datetime:
time2 = netCDF4.date2num(time, romspy.JST)
time3 = nc.variables['ocean_time'][:]
t = np.where(time3==time2)[0][0]
else:
print 'ERROR: your time type =',type(time)
if cblabel is None:
cblabel = vname
print '\n',time, t
cs_r = nc.variables['Cs_r'][:]
var = np.zeros([len(line),len(cs_r)])
depth = np.zeros([len(line),len(cs_r)])
dist = np.zeros([len(line),len(cs_r)])
for s, l in enumerate(line):
lon = l[0]
lat = l[1]
v = pickup(nc, vname, lon, lat, method, grdfile=grdfile)
var[s,:] = v[t,:]
h = pickup(nc, 'h', lon, lat, 'idw', grdfile=grdfile)
try:
zeta = pickup(nc, 'zeta', lon, lat, 'idw', grdfile=grdfile)
depth[s,:] = (h + zeta[t]) * cs_r[:]
except:
depth[s,:] = (h + 1.0) * cs_r[:]
if s == 0:
dist[s,:] = 0
else:
back = line[s-1]
fore = line[s]
dist[s,:] = dist[s-1,:] + vincenty(back, fore).meters
#fig, ax = plt.subplots(figsize=[6,3])
ax = plt.gca()
origin = 'upper'
#origin = 'lower'
if levels is None:
levels = romspy.levels(vname, unit=unit)
if pmethod == 'contourf':
CF = ax.contourf(dist/1000, depth, var*cff, extend='both', origin=origin, levels=levels)
plt.clabel(CF, fmt=fmt, colors='k')
CB = plt.colorbar(CF)
CB.ax.set_ylabel(cblabel)
elif pmethod == 'contour':
C = ax.contour(dist/1000, depth, var*cff, colors='w', origin=origin, levels=levels)
plt.clabel(C, fmt=fmt, colors='k')
elif pmethod == 'pcolor':
CF = ax.pcolor(dist/1000, depth, var*cff, vmin=levels[0], vmax=levels[-1])
CB = plt.colorbar(CF)
CB.ax.set_ylabel(cblabel)
ax.plot(dist[:,0]/1000, depth[:,0], 'k-')
ax.set_xlabel('distance(km)')
ax.set_ylabel('depth(m)')
ax.set_title(datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S'))
ax.set_xlabel('distance(km)')
ax.set_ylabel('depth(m)')
ax.set_title(datetime.datetime.strftime(time, '%Y-%m-%d %H:%M:%S'))
return ax
def line_parser(linefile):
df = pd.read_csv(linefile)
return [[df.x[i], df.y[i]] for i in df.index]
def test_pickup(ncfile):
nc = netCDF4.Dataset(ncfile)
var0 = pickup(nc, 'temp', 135.380822, 34.671375)
var1 = pickup(nc, 'temp', 135.380822, 34.671375, 'idw')
fig, ax = plt.subplots(2,1)
p0 = ax[0].pcolor(var0.T)
p1 = ax[1].pcolor(var1.T)
#plt.colorbar(p0, ax=ax[0])
plt.show()
def test_pickup_line(ncfile):
nc = netCDF4.Dataset(ncfile)
line = line_parser('/home/okada/Dropbox/Data/stations_yodo.csv')
fig, ax = plt.subplots(figsize=[6,3])
#pickup_line(nc, 'temp', line, time=0, method='near')
pickup_line(nc, 'salt', line, time=0)
plt.show()
def test_pickup_line2():
nc = netCDF4.Dataset('X:/2015_kaiko/group_4dvar210_2/his/osaka-bay_his_024.nc')
grdfile = 'X:/2015_kaiko/Data/osaka-bay_grdfile_001.nc'
line = romspy.line_parser('F:/okada/Dropbox/Data/stations_abcd_osaka-bay.csv')
romspy.JST = 'seconds since 2012-06-01 00:00:00'
time = 0
fig, ax = plt.subplots(figsize=[6,3])
pickup_line(nc, 'temp', line, time, fmt='%i', grdfile=grdfile, pmethod='pcolor')
plt.show()
if __name__ == '__main__':
testfile = '/home/okada/apps/OB500P/testDA/param6/output/ob500_ini_0.nc'
#test_pickup(testfile)
test_pickup_line(testfile)
#test_pickup_line2()
| mit |
dingocuster/scikit-learn | examples/applications/topics_extraction_with_nmf_lda.py | 133 | 3517 | """
========================================================================================
Topics extraction with Non-Negative Matrix Factorization And Latent Dirichlet Allocation
========================================================================================
This is an example of applying Non Negative Matrix Factorization
and Latent Dirichlet Allocation on a corpus of documents and
extract additive models of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware that the time
complexity is polynomial in NMF. In LDA, the time complexity is
proportional to (n_samples * iterations).
"""
# Author: Olivier Grisel <[email protected]>
# Lars Buitinck <[email protected]>
# Chyi-Kwei Yau <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF, LatentDirichletAllocation
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
def print_top_words(model, feature_names, n_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i] for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
data_samples = dataset.data[:n_samples]
# use tf-idf feature for NMF model
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(data_samples)
# use tf feature for LDA model
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tf = tf_vectorizer.fit_transform(data_samples)
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with tf-idf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model:")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
print_top_words(nmf, tfidf_feature_names, n_top_words)
print("\nFitting LDA models with tf feature, n_samples=%d and n_features=%d..."
% (n_samples, n_features))
lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5,
learning_method='online', learning_offset=50.,
random_state=0)
lda.fit(tf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(lda, tf_feature_names, n_top_words)
| bsd-3-clause |
jreeder/avoplot | src/avoplot/gui/toolbar.py | 3 | 10052 | #Copyright (C) Nial Peters 2013
#
#This file is part of AvoPlot.
#
#AvoPlot is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#AvoPlot is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with AvoPlot. If not, see <http://www.gnu.org/licenses/>.
import wx
from matplotlib.backends.backend_wx import _load_bitmap as load_matplotlib_bitmap
from avoplot import core
from avoplot import figure
from avoplot.gui import menu
class MainToolbar(wx.ToolBar):
"""
Main program toolbar
"""
def __init__(self,parent):
self.parent = parent
self.__active_figure = None
self.__all_figures = set()
wx.ToolBar.__init__(self,parent, wx.ID_ANY)
#file tools
self.new_tool = self.AddTool(-1, wx.ArtProvider.GetBitmap(wx.ART_NEW, wx.ART_TOOLBAR), shortHelpString="New plot")
self.save_tool = self.AddTool(-1, wx.ArtProvider.GetBitmap(wx.ART_FILE_SAVE, wx.ART_TOOLBAR), shortHelpString="Save plot")
self.AddSeparator()
#plot navigation tools
self.home_tool = self.AddTool(-1, wx.ArtProvider.GetBitmap(wx.ART_GO_HOME, wx.ART_TOOLBAR),shortHelpString="Return to initial zoom setting")
self.back_tool = self.AddTool(-1, load_matplotlib_bitmap('back.png'), shortHelpString="Previous zoom setting")
self.forward_tool = self.AddTool(-1, load_matplotlib_bitmap('forward.png'), shortHelpString="Next zoom setting")
self.zoom_tool = self.AddCheckTool(-1, load_matplotlib_bitmap('zoom_to_rect.png'), shortHelp="Zoom selection")
self.pan_tool = self.AddCheckTool(-1, load_matplotlib_bitmap('move.png'),shortHelp='Pan',longHelp='Pan with left, zoom with right')
self.AddSeparator()
self.Realize()
self.enable_plot_tools(False)
#register avoplot event handlers
core.EVT_AVOPLOT_ELEM_ADD(self, self.on_element_add)
core.EVT_AVOPLOT_ELEM_SELECT(self, self.on_element_select)
core.EVT_AVOPLOT_ELEM_DELETE(self, self.on_element_delete)
#register events
wx.EVT_TOOL(self.parent, self.new_tool.GetId(), self.on_new)
wx.EVT_TOOL(self.parent, self.save_tool.GetId(), self.on_save_plot)
wx.EVT_TOOL(self.parent, self.home_tool.GetId(), self.on_home)
wx.EVT_TOOL(self.parent, self.back_tool.GetId(), self.on_back)
wx.EVT_TOOL(self.parent, self.forward_tool.GetId(), self.on_forward)
wx.EVT_TOOL(self.parent, self.zoom_tool.GetId(), self.on_zoom)
wx.EVT_TOOL(self.parent, self.pan_tool.GetId(), self.on_pan)
def on_element_add(self, evnt):
"""
Event handler for new element events. If the element is not a figure
then nothing gets done. For figures, their zoom and pan settings are
updated depending on the toggle state of the zoom/pan tools.
This method also enables the plot navigation tools if they were
previously disabled.
"""
el = evnt.element
if isinstance(el, figure.AvoPlotFigure):
if not self.__all_figures:
self.enable_plot_tools(True)
#enable the zoom/pan tools for this figure (if they are currently
#selected in the toolbar)
if self.GetToolState(self.pan_tool.GetId()):
el.pan()
elif self.GetToolState(self.zoom_tool.GetId()):
el.zoom()
self.__all_figures.add(el)
#initialise the pan and zoom tools to "off" each time a new figure
#is created
self.set_zoom_state(False)
self.set_pan_state(False)
def on_element_delete(self, evnt):
"""
Event handler for element delete events.If the element is not a figure
then nothing gets done. If the element being deleted was the last figure
in the session, then this disables the plot navigation tools.
"""
el = evnt.element
if isinstance(el, figure.AvoPlotFigure):
self.__all_figures.remove(el)
if not self.__all_figures:
self.__active_figure = None
self.enable_plot_tools(False)
def on_element_select(self, evnt):
"""
Event handler for element select events. Keeps track of what the
currently selected element is and updates the state of the history
buttons.
"""
el = evnt.element
if isinstance(el, figure.AvoPlotFigure):
self.__active_figure = el
#set the history button update handler so that the history buttons
#get enabled/disabled at the correct times
self.__active_figure.tb.set_history_buttons = self.update_history_buttons
self.update_history_buttons()
def enable_plot_tools(self, state):
"""
Enables the plot tools if state=True or disables them if state=False
"""
self.EnableTool(self.save_tool.GetId(),state)
self.EnableTool(self.home_tool.GetId(),state)
#self.EnableTool(self.back_tool.GetId(),state)
#self.EnableTool(self.forward_tool.GetId(),state)
self.EnableTool(self.pan_tool.GetId(),state)
self.EnableTool(self.zoom_tool.GetId(),state)
self.update_history_buttons()
def on_new(self,evnt):
"""Handle 'new' button pressed.
Creates a popup menu over the tool button containing the same entries as
the File->New menu.
"""
#Get the position of the toolbar relative to
#the frame. This will be the upper left corner of the first tool
bar_pos = self.GetScreenPosition()-self.parent.GetScreenPosition()
# This is the position of the tool along the tool bar (1st, 2nd, 3rd, etc...)
tool_index = self.GetToolPos(evnt.GetId())
# Get the size of the tool
tool_size = self.GetToolSize()
# This is the lower left corner of the clicked tool
lower_left_pos = (bar_pos[0]+self.GetToolSeparation()*(tool_index+1)+tool_size[0]*tool_index, bar_pos[1]+tool_size[1]+self.GetToolSeparation())#-tool_size[1])
menu_pos = (lower_left_pos[0]-bar_pos[0],lower_left_pos[1]-bar_pos[1])
self.PopupMenu(menu.create_the_New_menu(self.parent), menu_pos)
def on_home(self, evnt):
"""
Event handler for "home zoom level" events. Resets all subplots in the
current figure to their default zoom levels.
"""
if self.__active_figure is not None:
self.__active_figure.go_home()
def on_back(self, evnt):
"""
Event handler for 'back' tool events. Returns the figure to its previous
view.
"""
if self.__active_figure is not None:
self.__active_figure.back()
def on_forward(self, evnt):
"""
Event handler for 'forward' tool events. Returns the figure to its next
view.
"""
if self.__active_figure is not None:
self.__active_figure.forward()
def on_zoom(self,evnt):
"""
Event handler for zoom tool toggle events. Enables or disables zooming
in all figures accordingly.
"""
self.set_zoom_state(self.GetToolState(self.zoom_tool.GetId()))
def set_zoom_state(self, state):
"""
Enables (state = True) or disables (state = False) the zoom tool for all
figures. The pan tool will be disabled if needed.
"""
self.ToggleTool(self.zoom_tool.GetId(),state)
if state:
self.ToggleTool(self.pan_tool.GetId(),False)
for p in self.__all_figures:
if p.is_zoomed() != state:
p.zoom()
def set_pan_state(self, state):
"""
Enables (state = True) or disables (state = False) the pan tool for all
figures. The zoom tool will be disabled if needed.
"""
self.ToggleTool(self.pan_tool.GetId(),state)
if state:
self.ToggleTool(self.zoom_tool.GetId(),False)
for p in self.__all_figures:
if p.is_panned() != state:
p.pan()
def on_pan(self,evnt):
"""
Event handler for pan tool toggle events. Enables or disables panning
in all figures accordingly.
"""
self.set_pan_state(self.GetToolState(self.pan_tool.GetId()))
def on_save_plot(self, *args):
"""
Event handler for save tool events. Opens a file save dialog for saving
the currently selected figure as an image file.
"""
if self.__active_figure is not None:
self.__active_figure.save_figure_as_image()
def update_history_buttons(self):
"""
Enables/disables the next- and prev-view buttons depending on whether
there are views to go forward or back to.
"""
if self.__active_figure is not None:
current_mpl_toolbar = self.__active_figure.tb
can_backward = (current_mpl_toolbar._views._pos > 0)
can_forward = (current_mpl_toolbar._views._pos < len(current_mpl_toolbar._views._elements) - 1)
else:
can_backward = False
can_forward = False
self.EnableTool(self.back_tool.GetId(),can_backward)
self.EnableTool(self.forward_tool.GetId(),can_forward)
| gpl-3.0 |
kiyoto/statsmodels | statsmodels/datasets/sunspots/data.py | 8 | 2040 | """Yearly sunspots data 1700-2008"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """This data is public domain."""
TITLE = __doc__
SOURCE = """
http://www.ngdc.noaa.gov/stp/solar/solarda3.html
The original dataset contains monthly data on sunspot activity in the file
./src/sunspots_yearly.dat. There is also sunspots_monthly.dat.
"""
DESCRSHORT = """Yearly (1700-2008) data on sunspots from the National
Geophysical Data Center."""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 309 (Annual 1700 - 2008)
Number of Variables - 1
Variable name definitions::
SUNACTIVITY - Number of sunspots for each year
The data file contains a 'YEAR' variable that is not returned by load.
"""
from numpy import recfromtxt, array
from pandas import Series, DataFrame
from statsmodels.datasets.utils import Dataset
from os.path import dirname, abspath
def load():
"""
Load the yearly sunspot data and returns a data class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
Notes
-----
This dataset only contains data for one variable, so the attributes
data, raw_data, and endog are all the same variable. There is no exog
attribute defined.
"""
data = _get_data()
endog_name = 'SUNACTIVITY'
endog = array(data[endog_name], dtype=float)
dataset = Dataset(data=data, names=[endog_name], endog=endog,
endog_name=endog_name)
return dataset
def load_pandas():
data = DataFrame(_get_data())
# TODO: time series
endog = Series(data['SUNACTIVITY'], index=data['YEAR'].astype(int))
dataset = Dataset(data=data, names=list(data.columns),
endog=endog, endog_name='volume')
return dataset
def _get_data():
filepath = dirname(abspath(__file__))
with open(filepath + '/sunspots.csv', 'rb') as f:
data = recfromtxt(f, delimiter=",",
names=True, dtype=float)
return data
| bsd-3-clause |
flychen50/thinkstat | estimate.py | 2 | 3280 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
import math
import matplotlib.pyplot as pyplot
import myplot
import Pmf
import random
import thinkstats
def MakeUniformSuite(low, high, steps):
"""Makes a PMF that represents a suite of hypotheses with equal p.
Args:
low: low end of range
high: high end of range
steps: number of values
Returns:
Pmf object
"""
hypos = [low + (high-low) * i / (steps-1.0) for i in range(steps)]
pmf = Pmf.MakePmfFromList(hypos)
return pmf
def Update(suite, evidence):
"""Updates a suite of hypotheses based on new evidence.
Modifies the suite directly; if you want to keep the original, make
a copy.
Args:
suite: Pmf object
evidence: whatever kind of object Likelihood expects
"""
for hypo in suite.Values():
likelihood = Likelihood(evidence, hypo)
suite.Mult(hypo, likelihood)
suite.Normalize()
def Likelihood(evidence, hypo):
"""Computes the likelihood of the evidence assuming the hypothesis is true.
Args:
evidence: sequence of measurements
hypo: parameter of the expo distribution
Returns:
probability of the evidence under the hypothesis
"""
param = hypo
likelihood = 1
for x in evidence:
likelihood *= ExpoPdf(x, param)
return likelihood
def ExpoPdf(x, param):
"""Evaluates the exponential PDF.
Returns the probability density of x in the exponential PDF
with the given parameter.
Args:
x: float observed value
param: float parameter of the exponential distribution
"""
p = param * math.exp(-param * x)
return p
def EstimateParameter(prior, sample, name='posterior'):
"""Computes the posterior distribution for the parameter of an expo dist.
Args:
prior: Pmf that maps values of lambda to their prior prob
sample: sequence of values drawn from expo dist
name: string name for the posterior
Returns:
new Pmf object with the posterior probabilities
"""
posterior = prior.Copy()
posterior.name = name
Update(posterior, sample)
return posterior
def main():
# make a uniform prior
param = 1.2
prior = MakeUniformSuite(0.5, 1.5, 1000)
# try out the sample in the book
t = []
sample = [2.675, 0.198, 1.152, 0.787, 2.717, 4.269]
name = 'post%d' % len(sample)
posterior = EstimateParameter(prior, sample, name)
t.append(posterior)
# try out a range of sample sizes
for n in [10, 20, 40]:
# generate a sample
sample = [random.expovariate(param) for _ in range(n)]
name = 'post%d' % n
# compute the posterior
posterior = EstimateParameter(prior, sample, name)
t.append(posterior)
# plot the posterior distributions
for i, posterior in enumerate(t):
pyplot.subplot(2, 2, i+1)
myplot.Pmf(posterior)
pyplot.xlabel('lambda')
pyplot.ylabel('Posterior probability')
pyplot.legend()
myplot.Save(root='posteriors')
if __name__ == '__main__':
main()
| mit |
aureooms/dotfiles | .config/repl/modules/plot3D.py | 1 | 3559 | from sympy import Symbol
from sympy import lambdify
from sympy import log
from sympy import E
from sympy import factorial
from sympy import latex
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib.colors import ListedColormap, BoundaryNorm
from operator import __gt__
from operator import __lt__
def lam3D(f, x, y):
tmp1 = Symbol('tmp1')
tmp2 = Symbol('tmp2')
return lambdify((tmp1, tmp2), f.subs({x: tmp1, y: tmp2}), modules=['numpy'])
def plot3D(fns, rangex, rangey, points=100, colors=None, layout=None, block=False, **kwargs):
if not isinstance(fns, (list, tuple)):
fns = (fns,)
if layout is None:
layout = (1, len(fns))
x, xmin, xmax = rangex
y, ymin, ymax = rangey
xmin, xmax, ymin, ymax = map(float, (xmin, xmax, ymin, ymax))
X = np.arange(xmin, xmax, (xmax-xmin)/points)
Y = np.arange(ymin, ymax, (ymax-ymin)/points)
domain = np.meshgrid(X, Y)
fig = plt.figure()
for j, f in enumerate(fns, 1):
ax = fig.add_subplot(*layout, j, projection='3d')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xlabel(latex(x, mode='inline'))
ax.set_ylabel(latex(y, mode='inline'))
l = lam3D(f, x, y)
z = l(*domain)
z = np.ma.array(z, mask=np.isnan(z))
zmin = np.amin(z)
zmax = np.amax(z)
ax.set_zlim(zmin, zmax)
ax.set_zlabel(latex(f, mode='inline'))
if colors is None:
cmap, norm = None, None
else:
cmap, norm = colors(zmin, zmax, points)
surf = ax.plot_surface(*domain, z, cmap=cmap,
norm=norm,
linewidth=0, antialiased=False, label=latex(f, mode='inline'))
if cmap is not None:
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show(block=block, **kwargs)
gt = lambda t: threshold(__gt__, t)
lt = lambda t: threshold(__lt__, t)
def threshold (op , t) :
assert op is __gt__ or op is __lt__
def colors(zmin, zmax, points):
bounds = np.linspace(zmin, zmax, points)
_bounds = tuple(filter(lambda x: not op(x,t), bounds.tolist()))
if zmin <= t <= zmax:
if op is __gt__:
if _bounds[-1] != t and t != zmax:
_bounds = _bounds + (t,)
else:
if _bounds[0] != t and t != zmin:
_bounds = (t,) + _bounds
if op(zmin,t): _bounds = (zmin,) + _bounds
if op(zmax,t): _bounds = _bounds + (zmax,)
elif t < zmin <= zmax:
if op is __gt__:
_bounds = (t,zmin,zmax)
else:
_bounds = (zmin,) + _bounds + (zmax,)
elif zmin <= zmax < t:
if op is __lt__:
_bounds = (zmin,zmax,t)
else:
_bounds = (zmin,) + _bounds + (zmax,)
ncolors = len(_bounds)
assert(ncolors >= 2)
bounds = np.array(_bounds)
viridis = cm.get_cmap('viridis', ncolors-1)
newcolors = viridis(np.linspace(0, 1, ncolors-1))
pink = np.array([248/256, 24/256, 148/256, 1])
if op is __gt__:
if t < zmax:
newcolors[-1] = pink
else:
if t > zmin:
newcolors[0] = pink
cmap = ListedColormap(newcolors)
norm = BoundaryNorm(bounds, ncolors-1)
return cmap, norm
return colors
| agpl-3.0 |
kshedstrom/pyroms | examples/Arctic_HYCOM/get_hycom_GLBa0.08_grid.py | 1 | 2227 | import matplotlib
matplotlib.use('Agg')
import numpy as np
import netCDF4
from datetime import datetime
import pyroms
import pyroms_toolbox
import sys
# get HYCOM Northeast Pacific data from 2007 to 2011
invarname = 'temperature'
outvarname = 'temp'
#read grid and variable attributes from the first file
url='http://tds.hycom.org/thredds/dodsC/datasets/global/GLBa0.08_analysis/data/temp/archv.2010_342_00_3zt.nc'
dataset = netCDF4.Dataset(url)
lon = dataset.variables['Longitude'][2100-1:,550-1:4040+1]
lat = dataset.variables['Latitude'][2100-1:,550-1:4040+1]
z = dataset.variables['Depth'][:]
#spval = dataset.variables[invarname]._FillValue
var = dataset.variables[invarname][0,:,2100-1:,550-1:4040+1]
spval = var.get_fill_value()
units = dataset.variables[invarname].units
long_name = dataset.variables[invarname].long_name
dataset.close()
year = 2011
day = 1
#create netCDF file
outfile = 'HYCOM_GLBa0.08_North_grid.nc'
nc = netCDF4.Dataset(outfile, 'w', format='NETCDF3_64BIT')
nc.Created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
nc.title = 'HYCOM + NCODA Global 1/12 Analysis (GLBa0.08)'
#create dimensions
Mp, Lp = lon.shape
N = len(z)
nc.createDimension('lon', Lp)
nc.createDimension('lat', Mp)
nc.createDimension('z', N)
nc.createDimension('ocean_time', None)
#create variables
nc.createVariable('lon', 'f', ('lat', 'lon'))
nc.variables['lon'].long_name = 'longitude'
nc.variables['lon'].units = 'degrees_east'
nc.variables['lon'][:] = lon
nc.createVariable('lat', 'f', ('lat', 'lon'))
nc.variables['lat'].long_name = 'latitude'
nc.variables['lat'].units = 'degrees_north'
nc.variables['lat'][:] = lat
nc.createVariable('z', 'f', ('z'))
nc.variables['z'].long_name = 'depth'
nc.variables['z'].units = 'meter'
nc.variables['z'][:] = z
nc.createVariable('ocean_time', 'f', ('ocean_time'))
nc.variables['ocean_time'].units = 'days since 1900-01-01 00:00:00'
jday = pyroms_toolbox.date2jday(datetime(year, 1, 1)) + day - 1
nc.variables['ocean_time'][0] = jday
nc.createVariable(outvarname, 'f', ('ocean_time', 'z', 'lat', 'lon'), fill_value=spval)
nc.variables[outvarname].long_name = long_name
nc.variables[outvarname].units = units
nc.variables[outvarname][0] = var
nc.close()
| bsd-3-clause |
zrhans/python | exemplos/Examples.lnk/bokeh/compat/mpl/subplots.py | 13 | 1798 | """
Edward Tufte uses this example from Anscombe to show 4 datasets of x
and y that have the same mean, standard deviation, and regression
line, but which are qualitatively different.
matplotlib fun for a rainy day
"""
import matplotlib.pyplot as plt
import numpy as np
from bokeh import mpl
from bokeh.plotting import show
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
y1 = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68])
y2 = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74])
y3 = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73])
x4 = np.array([8, 8, 8, 8, 8, 8, 8, 19, 8, 8, 8])
y4 = np.array([6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89])
def fit(x):
return 3 + 0.5 * x
xfit = np.linspace(np.amin(x), np.amax(x), len(x))
plt.subplot(221)
plt.plot(x, y1, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('I', fontsize=20)
plt.subplot(222)
plt.plot(x, y2, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), xticklabels=[], yticks=(4, 8, 12), yticklabels=[], xticks=(0, 10, 20))
plt.ylabel('II', fontsize=20)
plt.subplot(223)
plt.plot(x, y3, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.ylabel('III', fontsize=20)
plt.setp(plt.gca(), yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.subplot(224)
xfit = np.array([np.amin(x4), np.amax(x4)])
plt.plot(x4, y4, 'ks', xfit, fit(xfit), 'r-', lw=2)
plt.axis([2, 20, 2, 14])
plt.setp(plt.gca(), yticklabels=[], yticks=(4, 8, 12), xticks=(0, 10, 20))
plt.ylabel('IV', fontsize=20)
# We create the figure in matplotlib and then we "pass it" to Bokeh
show(mpl.to_bokeh(name="subplots"))
| gpl-2.0 |
JensMunkHansen/sofus | python/MultiElement.py | 1 | 1541 | import numpy as np
from fnm import (rect,linear_array)
from timeit import default_timer as timer
import matplotlib.pyplot as plt
plt.ion()
def create_time_string(seconds):
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
ms = np.round(s * 1000) % 1000
timeString = "%d:%02d:%02d,%03d" % (h, m, s, (1000*ms))
return timeString;
print('This script uses the Fast Nearfield Method to calculate the CW pressure field of');
print('an array of 10 rectangular elements focused at a single point. The script');
print('outputs the pressure field.\n');
f0 = 1e6 # excitation frequency,Hz
soundspeed = 1500 # m/s
lamda = soundspeed / f0 # wavelength, m
#define a transducer structure/array
nelex = 10
neley = 1
kerf = 5.0e-4
width = 3e-3 # transducer width, m
height = 50e-3 # transducer height, m
d = nelex * (width+kerf)
xmin = -1.5 * d/2
xmax = 1.5 * d/2
ymin = 0
ymax = 0
zmin = 0.0
zmax = 2*d
nx = 100
nz = 100
dx = (xmax - xmin) / max(nx-1.0,1.0)
dz = (zmax - zmin) / max(nz-1.0,1.0)
xs = (np.r_[0:nx] - (nx-1.0)/2.0) * dx
zs = (np.r_[0:nz]) * dz
k = (2*np.pi)/lamda
factor = int(height / width)
ndiv = 32
#factor = 1
xs,zs = np.meshgrid(xs,zs,indexing='ij')
a = linear_array(nElements=nelex,pitch=width+kerf,kerf=kerf,height=height,c=soundspeed, nAbcissa=[ndiv,ndiv*factor])
a.focus([0,0,d],f0)
hmm = a.cw_pressure(xs,np.zeros(xs.shape),zs,k)
result = np.real(np.abs(hmm))
plt.figure()
plt.imshow(result,extent=np.round(100*np.r_[0,2*d,-d/2,d/2]),interpolation='none')
plt.xlabel('Depth [cm]')
plt.ylabel('Width [cm]')
| gpl-3.0 |
fredhusser/scikit-learn | examples/linear_model/plot_multi_task_lasso_support.py | 249 | 2211 | #!/usr/bin/env python
"""
=============================================
Joint feature selection with multi-task Lasso
=============================================
The multi-task lasso allows to fit multiple regression problems
jointly enforcing the selected features to be the same across
tasks. This example simulates sequential measurements, each task
is a time instant, and the relevant features vary in amplitude
over time while being the same. The multi-task lasso imposes that
features that are selected at one time point are select for all time
point. This makes feature selection by the Lasso more stable.
"""
print(__doc__)
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import MultiTaskLasso, Lasso
rng = np.random.RandomState(42)
# Generate some 2D coefficients with sine waves with random frequency and phase
n_samples, n_features, n_tasks = 100, 30, 40
n_relevant_features = 5
coef = np.zeros((n_tasks, n_features))
times = np.linspace(0, 2 * np.pi, n_tasks)
for k in range(n_relevant_features):
coef[:, k] = np.sin((1. + rng.randn(1)) * times + 3 * rng.randn(1))
X = rng.randn(n_samples, n_features)
Y = np.dot(X, coef.T) + rng.randn(n_samples, n_tasks)
coef_lasso_ = np.array([Lasso(alpha=0.5).fit(X, y).coef_ for y in Y.T])
coef_multi_task_lasso_ = MultiTaskLasso(alpha=1.).fit(X, Y).coef_
###############################################################################
# Plot support and time series
fig = plt.figure(figsize=(8, 5))
plt.subplot(1, 2, 1)
plt.spy(coef_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'Lasso')
plt.subplot(1, 2, 2)
plt.spy(coef_multi_task_lasso_)
plt.xlabel('Feature')
plt.ylabel('Time (or Task)')
plt.text(10, 5, 'MultiTaskLasso')
fig.suptitle('Coefficient non-zero location')
feature_to_plot = 0
plt.figure()
plt.plot(coef[:, feature_to_plot], 'k', label='Ground truth')
plt.plot(coef_lasso_[:, feature_to_plot], 'g', label='Lasso')
plt.plot(coef_multi_task_lasso_[:, feature_to_plot],
'r', label='MultiTaskLasso')
plt.legend(loc='upper center')
plt.axis('tight')
plt.ylim([-1.1, 1.1])
plt.show()
| bsd-3-clause |
hainm/statsmodels | statsmodels/sandbox/tsa/diffusion.py | 31 | 18732 | '''getting started with diffusions, continuous time stochastic processes
Author: josef-pktd
License: BSD
References
----------
An Algorithmic Introduction to Numerical Simulation of Stochastic Differential
Equations
Author(s): Desmond J. Higham
Source: SIAM Review, Vol. 43, No. 3 (Sep., 2001), pp. 525-546
Published by: Society for Industrial and Applied Mathematics
Stable URL: http://www.jstor.org/stable/3649798
http://www.sitmo.com/ especially the formula collection
Notes
-----
OU process: use same trick for ARMA with constant (non-zero mean) and drift
some of the processes have easy multivariate extensions
*Open Issues*
include xzero in returned sample or not? currently not
*TODOS*
* Milstein from Higham paper, for which processes does it apply
* Maximum Likelihood estimation
* more statistical properties (useful for tests)
* helper functions for display and MonteCarlo summaries (also for testing/checking)
* more processes for the menagerie (e.g. from empirical papers)
* characteristic functions
* transformations, non-linear e.g. log
* special estimators, e.g. Ait Sahalia, empirical characteristic functions
* fft examples
* check naming of methods, "simulate", "sample", "simexact", ... ?
stochastic volatility models: estimation unclear
finance applications ? option pricing, interest rate models
'''
from __future__ import print_function
import numpy as np
from scipy import stats, signal
import matplotlib.pyplot as plt
#np.random.seed(987656789)
class Diffusion(object):
'''Wiener Process, Brownian Motion with mu=0 and sigma=1
'''
def __init__(self):
pass
def simulateW(self, nobs=100, T=1, dt=None, nrepl=1):
'''generate sample of Wiener Process
'''
dt = T*1.0/nobs
t = np.linspace(dt, 1, nobs)
dW = np.sqrt(dt)*np.random.normal(size=(nrepl, nobs))
W = np.cumsum(dW,1)
self.dW = dW
return W, t
def expectedsim(self, func, nobs=100, T=1, dt=None, nrepl=1):
'''get expectation of a function of a Wiener Process by simulation
initially test example from
'''
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
U = func(t, W)
Umean = U.mean(0)
return U, Umean, t
class AffineDiffusion(Diffusion):
'''
differential equation:
:math::
dx_t = f(t,x)dt + \sigma(t,x)dW_t
integral:
:math::
x_T = x_0 + \\int_{0}^{T}f(t,S)dt + \\int_0^T \\sigma(t,S)dW_t
TODO: check definition, affine, what about jump diffusion?
'''
def __init__(self):
pass
def sim(self, nobs=100, T=1, dt=None, nrepl=1):
# this doesn't look correct if drift or sig depend on x
# see arithmetic BM
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dx = self._drift() + self._sig() * W
x = np.cumsum(dx,1)
xmean = x.mean(0)
return x, xmean, t
def simEM(self, xzero=None, nobs=100, T=1, dt=None, nrepl=1, Tratio=4):
'''
from Higham 2001
TODO: reverse parameterization to start with final nobs and DT
TODO: check if I can skip the loop using my way from exactprocess
problem might be Winc (reshape into 3d and sum)
TODO: (later) check memory efficiency for large simulations
'''
#TODO: reverse parameterization to start with final nobs and DT
nobs = nobs * Tratio # simple way to change parameter
# maybe wrong parameterization,
# drift too large, variance too small ? which dt/Dt
# _drift, _sig independent of dt is wrong
if xzero is None:
xzero = self.xzero
if dt is None:
dt = T*1.0/nobs
W, t = self.simulateW(nobs=nobs, T=T, dt=dt, nrepl=nrepl)
dW = self.dW
t = np.linspace(dt, 1, nobs)
Dt = Tratio*dt;
L = nobs/Tratio; # L EM steps of size Dt = R*dt
Xem = np.zeros((nrepl,L)); # preallocate for efficiency
Xtemp = xzero
Xem[:,0] = xzero
for j in np.arange(1,L):
#Winc = np.sum(dW[:,Tratio*(j-1)+1:Tratio*j],1)
Winc = np.sum(dW[:,np.arange(Tratio*(j-1)+1,Tratio*j)],1)
#Xtemp = Xtemp + Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xtemp = Xtemp + self._drift(x=Xtemp) + self._sig(x=Xtemp) * Winc
#Dt*lamda*Xtemp + mu*Xtemp*Winc;
Xem[:,j] = Xtemp
return Xem
'''
R = 4; Dt = R*dt; L = N/R; % L EM steps of size Dt = R*dt
Xem = zeros(1,L); % preallocate for efficiency
Xtemp = Xzero;
for j = 1:L
Winc = sum(dW(R*(j-1)+1:R*j));
Xtemp = Xtemp + Dt*lambda*Xtemp + mu*Xtemp*Winc;
Xem(j) = Xtemp;
end
'''
class ExactDiffusion(AffineDiffusion):
'''Diffusion that has an exact integral representation
this is currently mainly for geometric, log processes
'''
def __init__(self):
pass
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
'''
t = np.linspace(ddt, nobs*ddt, nobs)
#expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? if mean reverting lag-coeff<1
#lfilter doesn't handle 2d arrays, it does?
inc = self._exactconst(expddt) + self._exactstd(expddt) * normrvs
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.norm(loc=meant, scale=stdt)
class ArithmeticBrownian(AffineDiffusion):
'''
:math::
dx_t &= \\mu dt + \\sigma dW_t
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
return self.mu
def _sig(self, *args, **kwds):
return self.sigma
def exactprocess(self, nobs, xzero=None, ddt=1., nrepl=2):
'''ddt : discrete delta t
not tested yet
'''
if xzero is None:
xzero = self.xzero
t = np.linspace(ddt, nobs*ddt, nobs)
normrvs = np.random.normal(size=(nrepl,nobs))
inc = self._drift + self._sigma * np.sqrt(ddt) * normrvs
#return signal.lfilter([1.], [1.,-1], inc)
return xzero + np.cumsum(inc,1)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
meant = self._drift * t
stdt = self._sigma * np.sqrt(t)
return stats.norm(loc=meant, scale=stdt)
class GeometricBrownian(AffineDiffusion):
'''Geometric Brownian Motion
:math::
dx_t &= \\mu x_t dt + \\sigma x_t dW_t
$x_t $ stochastic process of Geometric Brownian motion,
$\mu $ is the drift,
$\sigma $ is the Volatility,
$W$ is the Wiener process (Brownian motion).
'''
def __init__(self, xzero, mu, sigma):
self.xzero = xzero
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.mu * x
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
class OUprocess(AffineDiffusion):
'''Ornstein-Uhlenbeck
:math::
dx_t&=\\lambda(\\mu - x_t)dt+\\sigma dW_t
mean reverting process
TODO: move exact higher up in class hierarchy
'''
def __init__(self, xzero, mu, lambd, sigma):
self.xzero = xzero
self.lambd = lambd
self.mu = mu
self.sigma = sigma
def _drift(self, *args, **kwds):
x = kwds['x']
return self.lambd * (self.mu - x)
def _sig(self, *args, **kwds):
x = kwds['x']
return self.sigma * x
def exact(self, xzero, t, normrvs):
#TODO: aggregate over time for process with observations for all t
# i.e. exact conditional distribution for discrete time increment
# -> exactprocess
#TODO: for single t, return stats.norm -> exactdist
expnt = np.exp(-self.lambd * t)
return (xzero * expnt + self.mu * (1-expnt) +
self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd) * normrvs)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''ddt : discrete delta t
should be the same as an AR(1)
not tested yet
# after writing this I saw the same use of lfilter in sitmo
'''
t = np.linspace(ddt, nobs*ddt, nobs)
expnt = np.exp(-self.lambd * t)
expddt = np.exp(-self.lambd * ddt)
normrvs = np.random.normal(size=(nrepl,nobs))
#do I need lfilter here AR(1) ? lfilter doesn't handle 2d arrays, it does?
from scipy import signal
#xzero * expnt
inc = ( self.mu * (1-expddt) +
self.sigma * np.sqrt((1-expddt*expddt)/2./self.lambd) * normrvs )
return signal.lfilter([1.], [1.,-expddt], inc)
def exactdist(self, xzero, t):
#TODO: aggregate over time for process with observations for all t
#TODO: for single t, return stats.norm
expnt = np.exp(-self.lambd * t)
meant = xzero * expnt + self.mu * (1-expnt)
stdt = self.sigma * np.sqrt((1-expnt*expnt)/2./self.lambd)
from scipy import stats
return stats.norm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs), data[:-1]))
parest, res, rank, sing = np.linalg.lstsq(exog, data[1:])
const, slope = parest
errvar = res/(nobs-2.)
lambd = -np.log(slope)/dt
sigma = np.sqrt(-errvar * 2.*np.log(slope)/ (1-slope**2)/dt)
mu = const / (1-slope)
return mu, lambd, sigma
class SchwartzOne(ExactDiffusion):
'''the Schwartz type 1 stochastic process
:math::
dx_t = \\kappa (\\mu - \\ln x_t) x_t dt + \\sigma x_tdW \\
The Schwartz type 1 process is a log of the Ornstein-Uhlenbeck stochastic
process.
'''
def __init__(self, xzero, mu, kappa, sigma):
self.xzero = xzero
self.mu = mu
self.kappa = kappa
self.lambd = kappa #alias until I fix exact
self.sigma = sigma
def _exactconst(self, expnt):
return (1-expnt) * (self.mu - self.sigma**2 / 2. /self.kappa)
def _exactstd(self, expnt):
return self.sigma * np.sqrt((1-expnt*expnt)/2./self.kappa)
def exactprocess(self, xzero, nobs, ddt=1., nrepl=2):
'''uses exact solution for log of process
'''
lnxzero = np.log(xzero)
lnx = super(self.__class__, self).exactprocess(xzero, nobs, ddt=ddt, nrepl=nrepl)
return np.exp(lnx)
def exactdist(self, xzero, t):
expnt = np.exp(-self.lambd * t)
#TODO: check this is still wrong, just guessing
meant = np.log(xzero) * expnt + self._exactconst(expnt)
stdt = self._exactstd(expnt)
return stats.lognorm(loc=meant, scale=stdt)
def fitls(self, data, dt):
'''assumes data is 1d, univariate time series
formula from sitmo
'''
# brute force, no parameter estimation errors
nobs = len(data)-1
exog = np.column_stack((np.ones(nobs),np.log(data[:-1])))
parest, res, rank, sing = np.linalg.lstsq(exog, np.log(data[1:]))
const, slope = parest
errvar = res/(nobs-2.) #check denominator estimate, of sigma too low
kappa = -np.log(slope)/dt
sigma = np.sqrt(errvar * kappa / (1-np.exp(-2*kappa*dt)))
mu = const / (1-np.exp(-kappa*dt)) + sigma**2/2./kappa
if np.shape(mu)== (1,): mu = mu[0] # how to remove scalar array ?
if np.shape(sigma)== (1,): sigma = sigma[0]
#mu, kappa are good, sigma too small
return mu, kappa, sigma
class BrownianBridge(object):
def __init__(self):
pass
def simulate(self, x0, x1, nobs, nrepl=1, ddt=1., sigma=1.):
nobs=nobs+1
dt = ddt*1./nobs
t = np.linspace(dt, ddt-dt, nobs)
t = np.linspace(dt, ddt, nobs)
wm = [t/ddt, 1-t/ddt]
#wmi = wm[1]
#wm1 = x1*wm[0]
wmi = 1-dt/(ddt-t)
wm1 = x1*(dt/(ddt-t))
su = sigma* np.sqrt(t*(1-t)/ddt)
s = sigma* np.sqrt(dt*(ddt-t-dt)/(ddt-t))
x = np.zeros((nrepl, nobs))
x[:,0] = x0
rvs = s*np.random.normal(size=(nrepl,nobs))
for i in range(1,nobs):
x[:,i] = x[:,i-1]*wmi[i] + wm1[i] + rvs[:,i]
return x, t, su
class CompoundPoisson(object):
'''nobs iid compound poisson distributions, not a process in time
'''
def __init__(self, lambd, randfn=np.random.normal):
if len(lambd) != len(randfn):
raise ValueError('lambd and randfn need to have the same number of elements')
self.nobj = len(lambd)
self.randfn = randfn
self.lambd = np.asarray(lambd)
def simulate(self, nobs, nrepl=1):
nobj = self.nobj
x = np.zeros((nrepl, nobs, nobj))
N = np.random.poisson(self.lambd[None,None,:], size=(nrepl,nobs,nobj))
for io in range(nobj):
randfnc = self.randfn[io]
nc = N[:,:,io]
#print nrepl,nobs,nc
#xio = randfnc(size=(nrepl,nobs,np.max(nc))).cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
rvs = randfnc(size=(nrepl,nobs,np.max(nc)))
print('rvs.sum()', rvs.sum(), rvs.shape)
xio = rvs.cumsum(-1)[np.arange(nrepl)[:,None],np.arange(nobs),nc-1]
#print xio.shape
x[:,:,io] = xio
x[N==0] = 0
return x, N
'''
randn('state',100) % set the state of randn
T = 1; N = 500; dt = T/N; t = [dt:dt:1];
M = 1000; % M paths simultaneously
dW = sqrt(dt)*randn(M,N); % increments
W = cumsum(dW,2); % cumulative sum
U = exp(repmat(t,[M 1]) + 0.5*W);
Umean = mean(U);
plot([0,t],[1,Umean],'b-'), hold on % plot mean over M paths
plot([0,t],[ones(5,1),U(1:5,:)],'r--'), hold off % plot 5 individual paths
xlabel('t','FontSize',16)
ylabel('U(t)','FontSize',16,'Rotation',0,'HorizontalAlignment','right')
legend('mean of 1000 paths','5 individual paths',2)
averr = norm((Umean - exp(9*t/8)),'inf') % sample error
'''
if __name__ == '__main__':
doplot = 1
nrepl = 1000
examples = []#['all']
if 'all' in examples:
w = Diffusion()
# Wiener Process
# ^^^^^^^^^^^^^^
ws = w.simulateW(1000, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(ws[0].T)
tmp = plt.plot(ws[0].mean(0), linewidth=2)
plt.title('Standard Brownian Motion (Wiener Process)')
func = lambda t, W: np.exp(t + 0.5*W)
us = w.expectedsim(func, nobs=500, nrepl=nrepl)
if doplot:
plt.figure()
tmp = plt.plot(us[0].T)
tmp = plt.plot(us[1], linewidth=2)
plt.title('Brownian Motion - exp')
#plt.show()
averr = np.linalg.norm(us[1] - np.exp(9*us[2]/8.), np.inf)
print(averr)
#print us[1][:10]
#print np.exp(9.*us[2][:10]/8.)
# Geometric Brownian
# ^^^^^^^^^^^^^^^^^^
gb = GeometricBrownian(xzero=1., mu=0.01, sigma=0.5)
gbs = gb.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(gbs.T)
tmp = plt.plot(gbs.mean(0), linewidth=2)
plt.title('Geometric Brownian')
plt.figure()
tmp = plt.plot(np.log(gbs).T)
tmp = plt.plot(np.log(gbs.mean(0)), linewidth=2)
plt.title('Geometric Brownian - log-transformed')
ab = ArithmeticBrownian(xzero=1, mu=0.05, sigma=1)
abs = ab.simEM(nobs=100, nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(abs.T)
tmp = plt.plot(abs.mean(0), linewidth=2)
plt.title('Arithmetic Brownian')
# Ornstein-Uhlenbeck
# ^^^^^^^^^^^^^^^^^^
ou = OUprocess(xzero=2, mu=1, lambd=0.5, sigma=0.1)
ous = ou.simEM()
oue = ou.exact(1, 1, np.random.normal(size=(5,10)))
ou.exact(0, np.linspace(0,10,10/0.1), 0)
ou.exactprocess(0,10)
print(ou.exactprocess(0,10, ddt=0.1,nrepl=10).mean(0))
#the following looks good, approaches mu
oues = ou.exactprocess(0,100, ddt=0.1,nrepl=100)
if doplot:
plt.figure()
tmp = plt.plot(oues.T)
tmp = plt.plot(oues.mean(0), linewidth=2)
plt.title('Ornstein-Uhlenbeck')
# SchwartsOne
# ^^^^^^^^^^^
so = SchwartzOne(xzero=0, mu=1, kappa=0.5, sigma=0.1)
sos = so.exactprocess(0,50, ddt=0.1,nrepl=100)
print(sos.mean(0))
print(np.log(sos.mean(0)))
doplot = 1
if doplot:
plt.figure()
tmp = plt.plot(sos.T)
tmp = plt.plot(sos.mean(0), linewidth=2)
plt.title('Schwartz One')
print(so.fitls(sos[0,:],dt=0.1))
sos2 = so.exactprocess(0,500, ddt=0.1,nrepl=5)
print('true: mu=1, kappa=0.5, sigma=0.1')
for i in range(5):
print(so.fitls(sos2[i],dt=0.1))
# Brownian Bridge
# ^^^^^^^^^^^^^^^
bb = BrownianBridge()
#bbs = bb.sample(x0, x1, nobs, nrepl=1, ddt=1., sigma=1.)
bbs, t, wm = bb.simulate(0, 0.5, 99, nrepl=500, ddt=1., sigma=0.1)
if doplot:
plt.figure()
tmp = plt.plot(bbs.T)
tmp = plt.plot(bbs.mean(0), linewidth=2)
plt.title('Brownian Bridge')
plt.figure()
plt.plot(wm,'r', label='theoretical')
plt.plot(bbs.std(0), label='simulated')
plt.title('Brownian Bridge - Variance')
plt.legend()
# Compound Poisson
# ^^^^^^^^^^^^^^^^
cp = CompoundPoisson([1,1], [np.random.normal,np.random.normal])
cps = cp.simulate(nobs=20000,nrepl=3)
print(cps[0].sum(-1).sum(-1))
print(cps[0].sum())
print(cps[0].mean(-1).mean(-1))
print(cps[0].mean())
print(cps[1].size)
print(cps[1].sum())
#Note Y = sum^{N} X is compound poisson of iid x, then
#E(Y) = E(N)*E(X) eg. eq. (6.37) page 385 in http://ee.stanford.edu/~gray/sp.html
#plt.show()
| bsd-3-clause |
Featuretools/featuretools | featuretools/utils/entity_utils.py | 1 | 7387 | from datetime import datetime
import numpy as np
import pandas as pd
import pandas.api.types as pdtypes
from featuretools import variable_types as vtypes
def infer_variable_types(df, link_vars, variable_types, time_index, secondary_time_index):
'''Infer variable types from dataframe
Args:
df (DataFrame): Input DataFrame
link_vars (list[]): Linked variables
variable_types (dict[str -> dict[str -> type]]) : An entity's
variable_types dict maps string variable ids to types (:class:`.Variable`)
or (type, kwargs) to pass keyword arguments to the Variable.
time_index (str or None): Name of time_index column
secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns
that each map to a list of columns that depend on that secondary time
'''
# TODO: set pk and pk types here
inferred_types = {}
vids_to_assume_datetime = [time_index]
if len(list(secondary_time_index.keys())):
vids_to_assume_datetime.append(list(secondary_time_index.keys())[0])
inferred_type = vtypes.Unknown
for variable in df.columns:
if variable in variable_types:
continue
elif variable in vids_to_assume_datetime:
if col_is_datetime(df[variable]):
inferred_type = vtypes.Datetime
else:
inferred_type = vtypes.Numeric
elif variable in link_vars:
inferred_type = vtypes.Categorical
elif df[variable].dtype == "object":
if not len(df[variable]):
inferred_type = vtypes.Categorical
elif col_is_datetime(df[variable]):
inferred_type = vtypes.Datetime
else:
inferred_type = vtypes.Categorical
# heuristics to predict this some other than categorical
sample = df[variable].sample(min(10000, len(df[variable])))
# catch cases where object dtype cannot be interpreted as a string
try:
avg_length = sample.str.len().mean()
if avg_length > 50:
inferred_type = vtypes.Text
except AttributeError:
pass
elif df[variable].dtype == "bool":
inferred_type = vtypes.Boolean
elif pdtypes.is_categorical_dtype(df[variable].dtype):
inferred_type = vtypes.Categorical
elif pdtypes.is_numeric_dtype(df[variable].dtype):
inferred_type = vtypes.Numeric
elif col_is_datetime(df[variable]):
inferred_type = vtypes.Datetime
elif len(df[variable]):
sample = df[variable] \
.sample(min(10000, df[variable].nunique(dropna=False)))
unique = sample.unique()
percent_unique = sample.size / len(unique)
if percent_unique < .05:
inferred_type = vtypes.Categorical
else:
inferred_type = vtypes.Numeric
inferred_types[variable] = inferred_type
return inferred_types
def convert_all_variable_data(df, variable_types):
"""Convert all dataframes' variables to different types.
"""
for var_id, desired_type in variable_types.items():
type_args = {}
if isinstance(desired_type, tuple):
# grab args before assigning type
type_args = desired_type[1]
desired_type = desired_type[0]
if var_id not in df.columns:
raise LookupError("Variable ID %s not in DataFrame" % (var_id))
current_type = df[var_id].dtype.name
if issubclass(desired_type, vtypes.Numeric) and \
current_type not in vtypes.PandasTypes._pandas_numerics:
df = convert_variable_data(df=df,
column_id=var_id,
new_type=desired_type,
**type_args)
if issubclass(desired_type, vtypes.Discrete) and \
current_type not in [vtypes.PandasTypes._categorical]:
df = convert_variable_data(df=df,
column_id=var_id,
new_type=desired_type,
**type_args)
if issubclass(desired_type, vtypes.Datetime) and \
current_type not in vtypes.PandasTypes._pandas_datetimes:
df = convert_variable_data(df=df,
column_id=var_id,
new_type=desired_type,
**type_args)
return df
def convert_variable_data(df, column_id, new_type, **kwargs):
"""Convert dataframe's variable to different type.
"""
if df[column_id].empty:
return df
if new_type == vtypes.Numeric:
orig_nonnull = df[column_id].dropna().shape[0]
df[column_id] = pd.to_numeric(df[column_id], errors='coerce')
# This will convert strings to nans
# If column contained all strings, then we should
# just raise an error, because that shouldn't have
# been converted to numeric
nonnull = df[column_id].dropna().shape[0]
if nonnull == 0 and orig_nonnull != 0:
raise TypeError("Attempted to convert all string column {} to numeric".format(column_id))
elif issubclass(new_type, vtypes.Datetime):
format = kwargs.get("format", None)
# TODO: if float convert to int?
df[column_id] = pd.to_datetime(df[column_id], format=format,
infer_datetime_format=True)
elif new_type == vtypes.Boolean:
map_dict = {kwargs.get("true_val", True): True,
kwargs.get("false_val", False): False,
True: True,
False: False}
# TODO: what happens to nans?
df[column_id] = df[column_id].map(map_dict).astype(np.bool)
elif not issubclass(new_type, vtypes.Discrete):
raise Exception("Cannot convert column %s to %s" %
(column_id, new_type))
return df
def get_linked_vars(entity):
"""Return a list with the entity linked variables.
"""
link_relationships = [r for r in entity.entityset.relationships
if r.parent_entity.id == entity.id or
r.child_entity.id == entity.id]
link_vars = [v.id for rel in link_relationships
for v in [rel.parent_variable, rel.child_variable]
if v.entity.id == entity.id]
return link_vars
def col_is_datetime(col):
# check if dtype is datetime
if (col.dtype.name.find('datetime') > -1 or
(len(col) and isinstance(col.iloc[0], datetime))):
return True
# if it can be casted to numeric, it's not a datetime
dropped_na = col.dropna()
try:
pd.to_numeric(dropped_na, errors='raise')
except (ValueError, TypeError):
# finally, try to cast to datetime
if col.dtype.name.find('str') > -1 or col.dtype.name.find('object') > -1:
try:
pd.to_datetime(dropped_na, errors='raise')
except Exception:
return False
else:
return True
return False
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/tests/test_rcparams.py | 9 | 10258 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
import sys
import warnings
import matplotlib as mpl
from matplotlib.tests import assert_str_equal
from matplotlib.testing.decorators import cleanup, knownfailureif
from nose.tools import assert_true, assert_raises, assert_equal
from nose.plugins.skip import SkipTest
import nose
from itertools import chain
import numpy as np
from matplotlib.rcsetup import (validate_bool_maybe_none,
validate_stringlist,
validate_bool,
validate_nseq_int,
validate_nseq_float)
mpl.rc('text', usetex=False)
mpl.rc('lines', linewidth=22)
fname = os.path.join(os.path.dirname(__file__), 'test_rcparams.rc')
def test_rcparams():
usetex = mpl.rcParams['text.usetex']
linewidth = mpl.rcParams['lines.linewidth']
# test context given dictionary
with mpl.rc_context(rc={'text.usetex': not usetex}):
assert mpl.rcParams['text.usetex'] == (not usetex)
assert mpl.rcParams['text.usetex'] == usetex
# test context given filename (mpl.rc sets linewdith to 33)
with mpl.rc_context(fname=fname):
assert mpl.rcParams['lines.linewidth'] == 33
assert mpl.rcParams['lines.linewidth'] == linewidth
# test context given filename and dictionary
with mpl.rc_context(fname=fname, rc={'lines.linewidth': 44}):
assert mpl.rcParams['lines.linewidth'] == 44
assert mpl.rcParams['lines.linewidth'] == linewidth
# test rc_file
try:
mpl.rc_file(fname)
assert mpl.rcParams['lines.linewidth'] == 33
finally:
mpl.rcParams['lines.linewidth'] = linewidth
def test_RcParams_class():
rc = mpl.RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': 'sans-serif',
'font.weight': 'normal',
'font.size': 12})
if six.PY3:
expected_repr = """
RcParams({'font.cursive': ['Apple Chancery',
'Textile',
'Zapf Chancery',
'cursive'],
'font.family': ['sans-serif'],
'font.size': 12.0,
'font.weight': 'normal'})""".lstrip()
else:
expected_repr = """
RcParams({u'font.cursive': [u'Apple Chancery',
u'Textile',
u'Zapf Chancery',
u'cursive'],
u'font.family': [u'sans-serif'],
u'font.size': 12.0,
u'font.weight': u'normal'})""".lstrip()
assert_str_equal(expected_repr, repr(rc))
if six.PY3:
expected_str = """
font.cursive: ['Apple Chancery', 'Textile', 'Zapf Chancery', 'cursive']
font.family: ['sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
else:
expected_str = """
font.cursive: [u'Apple Chancery', u'Textile', u'Zapf Chancery', u'cursive']
font.family: [u'sans-serif']
font.size: 12.0
font.weight: normal""".lstrip()
assert_str_equal(expected_str, str(rc))
# test the find_all functionality
assert ['font.cursive', 'font.size'] == sorted(rc.find_all('i[vz]').keys())
assert ['font.family'] == list(six.iterkeys(rc.find_all('family')))
# remove know failure + warnings after merging to master
@knownfailureif(not (sys.version_info[:2] < (2, 7)))
def test_rcparams_update():
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager "
"not supported with Python < 2.7")
rc = mpl.RcParams({'figure.figsize': (3.5, 42)})
bad_dict = {'figure.figsize': (3.5, 42, 1)}
# make sure validation happens on input
with assert_raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
rc.update(bad_dict)
# remove know failure + warnings after merging to master
@knownfailureif(not (sys.version_info[:2] < (2, 7)))
def test_rcparams_init():
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager "
"not supported with Python < 2.7")
with assert_raises(ValueError):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(validate)',
category=UserWarning)
mpl.RcParams({'figure.figsize': (3.5, 42, 1)})
@cleanup
def test_Bug_2543():
# Test that it possible to add all values to itself / deepcopy
# This was not possible because validate_bool_maybe_none did not
# accept None as an argument.
# https://github.com/matplotlib/matplotlib/issues/2543
# We filter warnings at this stage since a number of them are raised
# for deprecated rcparams as they should. We dont want these in the
# printed in the test suite.
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='.*(deprecated|obsolete)',
category=UserWarning)
with mpl.rc_context():
_copy = mpl.rcParams.copy()
for key in six.iterkeys(_copy):
mpl.rcParams[key] = _copy[key]
mpl.rcParams['text.dvipnghack'] = None
with mpl.rc_context():
from copy import deepcopy
_deep_copy = deepcopy(mpl.rcParams)
# real test is that this does not raise
assert_true(validate_bool_maybe_none(None) is None)
assert_true(validate_bool_maybe_none("none") is None)
_fonttype = mpl.rcParams['svg.fonttype']
assert_true(_fonttype == mpl.rcParams['svg.embed_char_paths'])
with mpl.rc_context():
mpl.rcParams['svg.embed_char_paths'] = False
assert_true(mpl.rcParams['svg.fonttype'] == "none")
@cleanup
def test_Bug_2543_newer_python():
# only split from above because of the usage of assert_raises
# as a context manager, which only works in 2.7 and above
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager not supported with Python < 2.7")
from matplotlib.rcsetup import validate_bool_maybe_none, validate_bool
with assert_raises(ValueError):
validate_bool_maybe_none("blah")
with assert_raises(ValueError):
validate_bool(None)
with assert_raises(ValueError):
with mpl.rc_context():
mpl.rcParams['svg.fonttype'] = True
if __name__ == '__main__':
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
def _validation_test_helper(validator, arg, target):
res = validator(arg)
assert_equal(res, target)
def _validation_fail_helper(validator, arg, exception_type):
if sys.version_info[:2] < (2, 7):
raise nose.SkipTest("assert_raises as context manager not "
"supported with Python < 2.7")
with assert_raises(exception_type):
validator(arg)
def test_validators():
validation_tests = (
{'validator': validate_bool,
'success': chain(((_, True) for _ in
('t', 'y', 'yes', 'on', 'true', '1', 1, True)),
((_, False) for _ in
('f', 'n', 'no', 'off', 'false', '0', 0, False))),
'fail': ((_, ValueError)
for _ in ('aardvark', 2, -1, [], ))},
{'validator': validate_stringlist,
'success': (('', []),
('a,b', ['a', 'b']),
('aardvark', ['aardvark']),
('aardvark, ', ['aardvark']),
('aardvark, ,', ['aardvark']),
(['a', 'b'], ['a', 'b']),
(('a', 'b'), ['a', 'b']),
((1, 2), ['1', '2'])),
'fail': ((dict(), AssertionError),
(1, AssertionError),)
},
{'validator': validate_nseq_int(2),
'success': ((_, [1, 2])
for _ in ('1, 2', [1.5, 2.5], [1, 2],
(1, 2), np.array((1, 2)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
},
{'validator': validate_nseq_float(2),
'success': ((_, [1.5, 2.5])
for _ in ('1.5, 2.5', [1.5, 2.5], [1.5, 2.5],
(1.5, 2.5), np.array((1.5, 2.5)))),
'fail': ((_, ValueError)
for _ in ('aardvark', ('a', 1),
(1, 2, 3)
))
}
)
for validator_dict in validation_tests:
validator = validator_dict['validator']
for arg, target in validator_dict['success']:
yield _validation_test_helper, validator, arg, target
for arg, error_type in validator_dict['fail']:
yield _validation_fail_helper, validator, arg, error_type
def test_keymaps():
key_list = [k for k in mpl.rcParams if 'keymap' in k]
for k in key_list:
assert(isinstance(mpl.rcParams[k], list))
def test_rcparams_reset_after_fail():
# There was previously a bug that meant that if rc_context failed and
# raised an exception due to issues in the supplied rc parameters, the
# global rc parameters were left in a modified state.
if sys.version_info[:2] >= (2, 7):
from collections import OrderedDict
else:
raise SkipTest("Test can only be run in Python >= 2.7 as it requires OrderedDict")
with mpl.rc_context(rc={'text.usetex': False}):
assert mpl.rcParams['text.usetex'] is False
with assert_raises(KeyError):
with mpl.rc_context(rc=OrderedDict([('text.usetex', True),('test.blah', True)])):
pass
assert mpl.rcParams['text.usetex'] is False
| gpl-2.0 |
shusenl/scikit-learn | sklearn/tests/test_naive_bayes.py | 70 | 17509 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
liyi193328/seq2seq | seq2seq/contrib/learn/learn_io/pandas_io_test.py | 111 | 7865 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for pandas_io."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.learn_io import pandas_io
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class PandasIoTest(test.TestCase):
def makeTestDataFrame(self):
index = np.arange(100, 104)
a = np.arange(4)
b = np.arange(32, 36)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -28), index=index)
return x, y
def callInputFnOnce(self, input_fn, session):
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
result_values = session.run(results)
coord.request_stop()
coord.join(threads)
return result_values
def testPandasInputFn_IndexMismatch(self):
if not HAS_PANDAS:
return
x, _ = self.makeTestDataFrame()
y_noindex = pd.Series(np.arange(-32, -28))
with self.assertRaises(ValueError):
pandas_io.pandas_input_fn(
x, y_noindex, batch_size=2, shuffle=False, num_epochs=1)
def testPandasInputFn_ProducesExpectedOutputs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, target = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
def testPandasInputFn_ProducesOutputsForLargeBatchAndMultipleEpochs(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 102)
a = np.arange(2)
b = np.arange(32, 34)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -30), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=128, shuffle=False, num_epochs=2)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1, 0, 1])
self.assertAllEqual(features['b'], [32, 33, 32, 33])
self.assertAllEqual(target, [-32, -31, -32, -31])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_ProducesOutputsWhenDataSizeNotDividedByBatchSize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
index = np.arange(100, 105)
a = np.arange(5)
b = np.arange(32, 37)
x = pd.DataFrame({'a': a, 'b': b}, index=index)
y = pd.Series(np.arange(-32, -27), index=index)
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
results = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
features, target = session.run(results)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
self.assertAllEqual(target, [-32, -31])
features, target = session.run(results)
self.assertAllEqual(features['a'], [2, 3])
self.assertAllEqual(features['b'], [34, 35])
self.assertAllEqual(target, [-30, -29])
features, target = session.run(results)
self.assertAllEqual(features['a'], [4])
self.assertAllEqual(features['b'], [36])
self.assertAllEqual(target, [-28])
with self.assertRaises(errors.OutOfRangeError):
session.run(results)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_OnlyX(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, _ = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y=None, batch_size=2, shuffle=False, num_epochs=1)
features = self.callInputFnOnce(input_fn, session)
self.assertAllEqual(features['a'], [0, 1])
self.assertAllEqual(features['b'], [32, 33])
def testPandasInputFn_ExcludesIndex(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)
features, _ = self.callInputFnOnce(input_fn, session)
self.assertFalse('index' in features)
def assertInputsCallableNTimes(self, input_fn, session, n):
inputs = input_fn()
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
for _ in range(n):
session.run(inputs)
with self.assertRaises(errors.OutOfRangeError):
session.run(inputs)
coord.request_stop()
coord.join(threads)
def testPandasInputFn_RespectsEpoch_NoShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=False, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffle(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=4, shuffle=True, num_epochs=1)
self.assertInputsCallableNTimes(input_fn, session, 1)
def testPandasInputFn_RespectsEpoch_WithShuffleAutosize(self):
if not HAS_PANDAS:
return
with self.test_session() as session:
x, y = self.makeTestDataFrame()
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, queue_capacity=None, num_epochs=2)
self.assertInputsCallableNTimes(input_fn, session, 4)
def testPandasInputFn_RespectsEpochUnevenBatches(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
with self.test_session() as session:
input_fn = pandas_io.pandas_input_fn(
x, y, batch_size=3, shuffle=False, num_epochs=1)
# Before the last batch, only one element of the epoch should remain.
self.assertInputsCallableNTimes(input_fn, session, 2)
def testPandasInputFn_Idempotent(self):
if not HAS_PANDAS:
return
x, y = self.makeTestDataFrame()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=False, num_epochs=1)()
for _ in range(2):
pandas_io.pandas_input_fn(
x, y, batch_size=2, shuffle=True, num_epochs=1)()
if __name__ == '__main__':
test.main()
| apache-2.0 |
mpi2/PhenotypeData | external_tools/src/main/python/images/qc_get_urls_from_download_file_path.py | 1 | 3400 | """Get urls from nfs file paths
This script uses download_file_paths from solr (imagename column in
csv file from xray QC) to obtain the download_urls in the csv file
provided by Federico.
The download_file_paths are nfs paths of the form:
impc_base_dir/site/pipeline_stable_id/
procedure_stable_id/parameter_stable_id/filename
These need to be mapped to the download_urls for DCC.
"""
from pathlib import Path
import argparse
import pandas as pd
from qc_mappings import PARAMETER_ID_TO_CLASS_MAP, CLASS_TO_PARAMETER_ID_MAP
# Local function to get parameters using class labels
def _parameter_from_class(classlabel, prefix="IMPC"):
try:
return prefix + "_XRY_" + \
CLASS_TO_PARAMETER_ID_MAP[int(classlabel)]+"_001"
except (ValueError, KeyError,):
return "UNKNOWN_PARAMETER_STABLE_ID"
except:
return "PARAMETER_MAP_ERROR"
parser = argparse.ArgumentParser(
description = "Get urls from nfs file paths"
)
parser.add_argument(
'-u', '--url-csv-path', dest='url_csv_path', required=True,
help='path to csv containing urls. This is normally provided by ' + \
'Federico at the start of the data-release'
)
parser.add_argument(
'-i', '--input-base-dir', dest='input_base_dir', required=True,
help='Base directory containing verified QC files'
)
parser.add_argument(
'-o', '--output-base-dir', dest='output_base_dir',
help='Directory to store output containing mapped csvs. Defauts to ' + \
'input-base-dir if not supplied'
)
args = parser.parse_args()
# Read in file with urls and create key using pipeline, procedure, parameter and filename
df_urls = pd.read_csv(args.url_csv_path)
df_urls['key'] = df_urls['pipeline_stable_id'] + df_urls['procedure_stable_id'] + df_urls['parameter_stable_id'] + df_urls['download_file_path'].map(lambda x: x.split('/')[-1])
df_urls = df_urls.set_index('key')
input_base_dir = Path(args.input_base_dir)
if args.output_base_dir is None:
output_base_dir = Path(args.input_base_dir)
else:
output_base_dir = Path(args.output_base_dir)
to_process = [str(p) for p in input_base_dir.glob('**/*structures*processed.csv')]
for fpath in to_process:
fname = fpath.split('/')[-1]
# Get prefix for parameter_stable_id
prefix = fname.split('_')[1]
df = pd.read_csv(fpath)
if 'verified_classlabel' not in df.columns:
print(f"No 'verified classlabel' column in {fname} - not processing")
continue
# Filter out records with wrong parameter IDs
parameter_id = fname.split('_')[3]
expected_label = PARAMETER_ID_TO_CLASS_MAP[parameter_id]
df = df[df['verified_classlabel'] != expected_label]
if len(df) == 0:
print(f"No incorrectly annotated images for {fname}")
continue
df['key'] = df['imagename'].map(lambda s: "".join(s.split('/')[-4:]))
df.set_index('key', inplace=True)
df['download_file_path'] = df_urls.loc[df.index]['download_file_path']
df['correct_parameter_id'] = \
df['verified_classlabel'].map(lambda x: _parameter_from_class(x,prefix))
df.sort_values('correct_parameter_id', inplace=True)
out_fname = fname[:-4]+"_url.csv"
out_path = output_base_dir.joinpath(out_fname)
df.to_csv(out_path, index=False, columns=['download_file_path', 'correct_parameter_id'])
print(f"Written output to {out_path}")
| apache-2.0 |
J535D165/recordlinkage | tests/test_measures.py | 1 | 4180 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
import recordlinkage as rl
import numpy
import pandas
FULL_INDEX = pandas.MultiIndex.from_product(
[[1, 2, 3], [1, 2, 3]], # 3x3 matrix
names=['first', 'second'])
LINKS_TRUE = pandas.MultiIndex.from_tuples(
[(1, 1), (2, 2), (3, 3)], # the diagonal
names=['first', 'second'])
LINKS_PRED = pandas.MultiIndex.from_tuples(
[(1, 1), (2, 1), (3, 1), (1, 2)], # L shape
names=['first', 'second'])
class TestMeasures(object):
def test_confusion_matrix(self):
result_len = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
result_full_index = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, FULL_INDEX)
expected = numpy.array([[1, 2], [3, 3]])
numpy.testing.assert_array_equal(result_len, expected)
numpy.testing.assert_array_equal(result_full_index, expected)
def test_tp_fp_tn_fn(self):
tp = rl.true_positives(LINKS_TRUE, LINKS_PRED)
assert tp == 1
fp = rl.false_positives(LINKS_TRUE, LINKS_PRED)
assert fp == 3
tn = rl.true_negatives(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert tn == 3
fn = rl.false_negatives(LINKS_TRUE, LINKS_PRED)
assert fn == 2
def test_recall(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED)
assert rl.recall(LINKS_TRUE, LINKS_PRED) == 1 / 3
assert rl.recall(cm) == 1 / 3
def test_precision(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert rl.precision(LINKS_TRUE, LINKS_PRED) == 1 / 4
assert rl.precision(cm) == 1 / 4
def test_accuracy(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert rl.accuracy(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX)) == 4 / 9
assert rl.accuracy(cm) == 4 / 9
assert rl.accuracy(LINKS_TRUE, LINKS_PRED, FULL_INDEX) == 4 / 9
def test_specificity(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
assert rl.specificity(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX)) == 1 / 2
assert rl.specificity(cm) == 1 / 2
assert rl.specificity(LINKS_TRUE, LINKS_PRED, FULL_INDEX) == 1 / 2
def test_fscore(self):
# confusion matrix
cm = rl.confusion_matrix(LINKS_TRUE, LINKS_PRED, len(FULL_INDEX))
prec = rl.precision(LINKS_TRUE, LINKS_PRED)
rec = rl.recall(LINKS_TRUE, LINKS_PRED)
expected = float(2 * prec * rec / (prec + rec))
assert rl.fscore(LINKS_TRUE, LINKS_PRED) == expected
assert rl.fscore(cm) == expected
def test_full_index_size(self):
df_a = pandas.DataFrame(numpy.arange(10))
df_b = pandas.DataFrame(numpy.arange(10))
assert rl.full_index_size(df_a) == 45
assert rl.full_index_size(len(df_a)) == 45
assert rl.full_index_size((len(df_a))) == 45
assert rl.full_index_size([len(df_a)]) == 45
assert rl.full_index_size(df_a, df_b) == 100
assert rl.full_index_size(len(df_a), len(df_b)) == 100
assert rl.full_index_size((len(df_a), len(df_b))) == 100
assert rl.full_index_size([len(df_a), len(df_b)]) == 100
def test_reduction_ratio(self):
df_a = pandas.DataFrame(numpy.arange(10))
df_b = pandas.DataFrame(numpy.arange(10))
candidate_pairs_link = pandas.MultiIndex.from_product(
[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
candidate_pairs_dedup = pandas.MultiIndex.from_arrays(
[[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]])
assert rl.reduction_ratio(candidate_pairs_dedup, df_a) == 8 / 9
assert rl.reduction_ratio(candidate_pairs_dedup, (df_a)) == 8 / 9
assert rl.reduction_ratio(candidate_pairs_dedup, (df_a, )) == 8 / 9
assert rl.reduction_ratio(candidate_pairs_link, df_a, df_b) == 3 / 4
assert rl.reduction_ratio(candidate_pairs_link, (df_a, df_b)) == 3 / 4
assert rl.reduction_ratio(candidate_pairs_link, [df_a, df_b]) == 3 / 4
| bsd-3-clause |
lenovor/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
whitews/dpconverge | dpconverge/data_collection.py | 1 | 6939 | from data_set import DataSet
from flowstats import cluster
import numpy as np
import pandas as pd
import multiprocessing
def bem_cluster(input_dict):
model = cluster.DPMixtureModel(
input_dict['component_count'],
input_dict['iteration_count'],
burn_in=0,
model='bem'
)
bem_results = model.fit(
input_dict['data'],
0,
seed=input_dict['random_seed'],
munkres_id=False,
verbose=True
)
dp_mixture_iter = bem_results.get_iteration(0)
log_like = dp_mixture_iter.log_likelihood(input_dict['data'])
print log_like
true_comp_count = np.sum(bem_results.pis > 0.0001)
return {
'comp': input_dict['component_count'],
'true_comp': true_comp_count,
'seed': input_dict['random_seed'],
'log_like': log_like
}
class DataCollection(object):
"""
A collection of DataSet objects
"""
def __init__(self):
self._parameter_count = None
self.data_sets = []
@property
def data_set_count(self):
return len(self.data_sets)
def add_data_set(self, data_set):
if not isinstance(data_set, DataSet):
raise TypeError("data_set must be of type DataSet")
if self._parameter_count is None:
self._parameter_count = data_set.parameter_count
if self._parameter_count != data_set.parameter_count:
raise ValueError(
"Data set parameter count must match the existing data sets"
)
else:
self.data_sets.append(data_set)
def reset_results(self):
for ds in self.data_sets:
ds.results = None
ds.raw_results = None
def estimate_initial_conditions(self, max_comp=128, max_iter=5000):
# now run bem on the combined data set to get initial conditions
max_log_like = None # the highest value for all runs
converged = False
component_count = max_comp
iteration_count = max_iter
results = [] # will be a list of dicts to convert to a DataFrame
cpu_count = multiprocessing.cpu_count()
bem_pool = multiprocessing.Pool(processes=cpu_count)
data = np.vstack(
[np.vstack(ds.blobs.values()) for ds in self.data_sets]
)
while not converged:
print component_count
new_comp_counts = []
# set of dictionaries for this comp run, one for each seed
input_dicts = [
{
'data': data,
'component_count': component_count,
'iteration_count': iteration_count,
'random_seed': seed
} for seed in range(1, 17)
]
tmp_results_list = bem_pool.map(bem_cluster, input_dicts)
for r in tmp_results_list:
if r['log_like'] > max_log_like:
max_log_like = r['log_like']
for r in tmp_results_list:
# if the new log_like is close to the max (within 1%),
# see if there are any empty components (pi < 0.0001)
if abs(max_log_like - r['log_like']) < abs(max_log_like * 0.01):
new_comp_counts.append(r['true_comp'])
# save good run to our results
results.append(r)
if len(new_comp_counts) > 0:
if int(np.mean(new_comp_counts)) < component_count:
component_count = int(np.min(new_comp_counts))
else:
converged = True
else:
converged = True
results_df = pd.DataFrame(
results,
columns=['comp', 'true_comp', 'seed', 'log_like']
)
min_comp = results_df.comp.min()
best_index = results_df[results_df.comp == min_comp].log_like.argmax()
best_run = results[best_index]
# create a data set that's the combination of all data sets
prelim_ds = DataSet(parameter_count=self._parameter_count)
for i, ds in enumerate(self.data_sets):
# start blob labels at 1 (i + 1)
prelim_ds.add_blob(i + 1, np.vstack(ds.blobs.values()))
prelim_ds.cluster(
component_count=best_run['comp'],
burn_in=0,
iteration_count=iteration_count,
random_seed=best_run['seed'],
model='bem'
)
log_like = prelim_ds.get_log_likelihood_trace()[0]
print log_like
# get classifications to calculate weights for each data set
pis = []
for label in sorted(prelim_ds.labels):
label_classes = prelim_ds.get_classifications(0, [label])
ds_pis = []
for c in range(best_run['comp']):
ds_pis.append(np.sum(label_classes == c) / float(len(label_classes)))
pis.append(ds_pis) # list of lists
# convert LoL pis to numpy array
pis = np.array(pis)
prelim_ds.plot_classifications(0)
# Re-run a chain using the initial conditions from the last iteration
last_iter = prelim_ds.raw_results.get_iteration(0)
initial_conditions = {
'pis': pis,
'mus': last_iter.mus,
'sigmas': last_iter.sigmas
}
return best_run['comp'], initial_conditions
def cluster(
self,
component_count,
burn_in,
iteration_count,
random_seed,
initial_conditions=None
):
# local 'data_sets' holds the raw data values for each DataSet
data_sets = list()
for ds in self.data_sets:
data = np.vstack(ds.blobs.values())
if data.size == 0:
raise ValueError("Found an empty data set")
data_sets.append(data)
if len(data_sets) < 2:
# nothing for us to do
raise ValueError("HDP needs at least 2 data sets")
model = cluster.HDPMixtureModel(
component_count,
iteration_count,
burn_in
)
if initial_conditions is not None:
# should check keys of initial values, the
# shapes & values should be taken care of in FlowStats
initial_weights = initial_conditions['pis']
model.load_mu(initial_conditions['mus'])
model.load_sigma(initial_conditions['sigmas'])
else:
initial_weights = None
fitted_results = model.fit(
data_sets,
0,
seed=random_seed,
munkres_id=False,
verbose=True,
initial_weights=initial_weights
)
# save results for each DataSet
for i, ds in enumerate(self.data_sets):
ds.add_results(fitted_results[i])
return fitted_results
| bsd-3-clause |
NunoEdgarGub1/scikit-learn | sklearn/datasets/tests/test_svmlight_format.py | 228 | 11221 | from bz2 import BZ2File
import gzip
from io import BytesIO
import numpy as np
import os
import shutil
from tempfile import NamedTemporaryFile
from sklearn.externals.six import b
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_in
import sklearn
from sklearn.datasets import (load_svmlight_file, load_svmlight_files,
dump_svmlight_file)
currdir = os.path.dirname(os.path.abspath(__file__))
datafile = os.path.join(currdir, "data", "svmlight_classification.txt")
multifile = os.path.join(currdir, "data", "svmlight_multilabel.txt")
invalidfile = os.path.join(currdir, "data", "svmlight_invalid.txt")
invalidfile2 = os.path.join(currdir, "data", "svmlight_invalid_order.txt")
def test_load_svmlight_file():
X, y = load_svmlight_file(datafile)
# test X's shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 21)
assert_equal(y.shape[0], 6)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2), (0, 15, 1.5),
(1, 5, 1.0), (1, 12, -3),
(2, 20, 27)):
assert_equal(X[i, j], val)
# tests X's zero values
assert_equal(X[0, 3], 0)
assert_equal(X[0, 5], 0)
assert_equal(X[1, 8], 0)
assert_equal(X[1, 16], 0)
assert_equal(X[2, 18], 0)
# test can change X's values
X[0, 2] *= 2
assert_equal(X[0, 2], 5)
# test y
assert_array_equal(y, [1, 2, 3, 4, 1, 2])
def test_load_svmlight_file_fd():
# test loading from file descriptor
X1, y1 = load_svmlight_file(datafile)
fd = os.open(datafile, os.O_RDONLY)
try:
X2, y2 = load_svmlight_file(fd)
assert_array_equal(X1.data, X2.data)
assert_array_equal(y1, y2)
finally:
os.close(fd)
def test_load_svmlight_file_multilabel():
X, y = load_svmlight_file(multifile, multilabel=True)
assert_equal(y, [(0, 1), (2,), (), (1, 2)])
def test_load_svmlight_files():
X_train, y_train, X_test, y_test = load_svmlight_files([datafile] * 2,
dtype=np.float32)
assert_array_equal(X_train.toarray(), X_test.toarray())
assert_array_equal(y_train, y_test)
assert_equal(X_train.dtype, np.float32)
assert_equal(X_test.dtype, np.float32)
X1, y1, X2, y2, X3, y3 = load_svmlight_files([datafile] * 3,
dtype=np.float64)
assert_equal(X1.dtype, X2.dtype)
assert_equal(X2.dtype, X3.dtype)
assert_equal(X3.dtype, np.float64)
def test_load_svmlight_file_n_features():
X, y = load_svmlight_file(datafile, n_features=22)
# test X'shape
assert_equal(X.indptr.shape[0], 7)
assert_equal(X.shape[0], 6)
assert_equal(X.shape[1], 22)
# test X's non-zero values
for i, j, val in ((0, 2, 2.5), (0, 10, -5.2),
(1, 5, 1.0), (1, 12, -3)):
assert_equal(X[i, j], val)
# 21 features in file
assert_raises(ValueError, load_svmlight_file, datafile, n_features=20)
def test_load_compressed():
X, y = load_svmlight_file(datafile)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".gz") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, gzip.open(tmp.name, "wb"))
Xgz, ygz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xgz.toarray())
assert_array_equal(y, ygz)
with NamedTemporaryFile(prefix="sklearn-test", suffix=".bz2") as tmp:
tmp.close() # necessary under windows
with open(datafile, "rb") as f:
shutil.copyfileobj(f, BZ2File(tmp.name, "wb"))
Xbz, ybz = load_svmlight_file(tmp.name)
# because we "close" it manually and write to it,
# we need to remove it manually.
os.remove(tmp.name)
assert_array_equal(X.toarray(), Xbz.toarray())
assert_array_equal(y, ybz)
@raises(ValueError)
def test_load_invalid_file():
load_svmlight_file(invalidfile)
@raises(ValueError)
def test_load_invalid_order_file():
load_svmlight_file(invalidfile2)
@raises(ValueError)
def test_load_zero_based():
f = BytesIO(b("-1 4:1.\n1 0:1\n"))
load_svmlight_file(f, zero_based=False)
def test_load_zero_based_auto():
data1 = b("-1 1:1 2:2 3:3\n")
data2 = b("-1 0:0 1:1\n")
f1 = BytesIO(data1)
X, y = load_svmlight_file(f1, zero_based="auto")
assert_equal(X.shape, (1, 3))
f1 = BytesIO(data1)
f2 = BytesIO(data2)
X1, y1, X2, y2 = load_svmlight_files([f1, f2], zero_based="auto")
assert_equal(X1.shape, (1, 4))
assert_equal(X2.shape, (1, 4))
def test_load_with_qid():
# load svmfile with qid attribute
data = b("""
3 qid:1 1:0.53 2:0.12
2 qid:1 1:0.13 2:0.1
7 qid:2 1:0.87 2:0.12""")
X, y = load_svmlight_file(BytesIO(data), query_id=False)
assert_array_equal(y, [3, 2, 7])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
res1 = load_svmlight_files([BytesIO(data)], query_id=True)
res2 = load_svmlight_file(BytesIO(data), query_id=True)
for X, y, qid in (res1, res2):
assert_array_equal(y, [3, 2, 7])
assert_array_equal(qid, [1, 1, 2])
assert_array_equal(X.toarray(), [[.53, .12], [.13, .1], [.87, .12]])
@raises(ValueError)
def test_load_invalid_file2():
load_svmlight_files([datafile, invalidfile, datafile])
@raises(TypeError)
def test_not_a_filename():
# in python 3 integers are valid file opening arguments (taken as unix
# file descriptors)
load_svmlight_file(.42)
@raises(IOError)
def test_invalid_filename():
load_svmlight_file("trou pic nic douille")
def test_dump():
Xs, y = load_svmlight_file(datafile)
Xd = Xs.toarray()
# slicing a csr_matrix can unsort its .indices, so test that we sort
# those correctly
Xsliced = Xs[np.arange(Xs.shape[0])]
for X in (Xs, Xd, Xsliced):
for zero_based in (True, False):
for dtype in [np.float32, np.float64, np.int32]:
f = BytesIO()
# we need to pass a comment to get the version info in;
# LibSVM doesn't grok comments so they're not put in by
# default anymore.
dump_svmlight_file(X.astype(dtype), y, f, comment="test",
zero_based=zero_based)
f.seek(0)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in("scikit-learn %s" % sklearn.__version__, comment)
comment = f.readline()
try:
comment = str(comment, "utf-8")
except TypeError: # fails in Python 2.x
pass
assert_in(["one", "zero"][zero_based] + "-based", comment)
X2, y2 = load_svmlight_file(f, dtype=dtype,
zero_based=zero_based)
assert_equal(X2.dtype, dtype)
assert_array_equal(X2.sorted_indices().indices, X2.indices)
if dtype == np.float32:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 4)
else:
assert_array_almost_equal(
# allow a rounding error at the last decimal place
Xd.astype(dtype), X2.toarray(), 15)
assert_array_equal(y, y2)
def test_dump_multilabel():
X = [[1, 0, 3, 0, 5],
[0, 0, 0, 0, 0],
[0, 5, 0, 1, 0]]
y = [[0, 1, 0], [1, 0, 1], [1, 1, 0]]
f = BytesIO()
dump_svmlight_file(X, y, f, multilabel=True)
f.seek(0)
# make sure it dumps multilabel correctly
assert_equal(f.readline(), b("1 0:1 2:3 4:5\n"))
assert_equal(f.readline(), b("0,2 \n"))
assert_equal(f.readline(), b("0,1 1:5 3:1\n"))
def test_dump_concise():
one = 1
two = 2.1
three = 3.01
exact = 1.000000000000001
# loses the last decimal place
almost = 1.0000000000000001
X = [[one, two, three, exact, almost],
[1e9, 2e18, 3e27, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]]
y = [one, two, three, exact, almost]
f = BytesIO()
dump_svmlight_file(X, y, f)
f.seek(0)
# make sure it's using the most concise format possible
assert_equal(f.readline(),
b("1 0:1 1:2.1 2:3.01 3:1.000000000000001 4:1\n"))
assert_equal(f.readline(), b("2.1 0:1000000000 1:2e+18 2:3e+27\n"))
assert_equal(f.readline(), b("3.01 \n"))
assert_equal(f.readline(), b("1.000000000000001 \n"))
assert_equal(f.readline(), b("1 \n"))
f.seek(0)
# make sure it's correct too :)
X2, y2 = load_svmlight_file(f)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
def test_dump_comment():
X, y = load_svmlight_file(datafile)
X = X.toarray()
f = BytesIO()
ascii_comment = "This is a comment\nspanning multiple lines."
dump_svmlight_file(X, y, f, comment=ascii_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
# XXX we have to update this to support Python 3.x
utf8_comment = b("It is true that\n\xc2\xbd\xc2\xb2 = \xc2\xbc")
f = BytesIO()
assert_raises(UnicodeDecodeError,
dump_svmlight_file, X, y, f, comment=utf8_comment)
unicode_comment = utf8_comment.decode("utf-8")
f = BytesIO()
dump_svmlight_file(X, y, f, comment=unicode_comment, zero_based=False)
f.seek(0)
X2, y2 = load_svmlight_file(f, zero_based=False)
assert_array_almost_equal(X, X2.toarray())
assert_array_equal(y, y2)
f = BytesIO()
assert_raises(ValueError,
dump_svmlight_file, X, y, f, comment="I've got a \0.")
def test_dump_invalid():
X, y = load_svmlight_file(datafile)
f = BytesIO()
y2d = [y]
assert_raises(ValueError, dump_svmlight_file, X, y2d, f)
f = BytesIO()
assert_raises(ValueError, dump_svmlight_file, X, y[:-1], f)
def test_dump_query_id():
# test dumping a file with query_id
X, y = load_svmlight_file(datafile)
X = X.toarray()
query_id = np.arange(X.shape[0]) // 2
f = BytesIO()
dump_svmlight_file(X, y, f, query_id=query_id, zero_based=True)
f.seek(0)
X1, y1, query_id1 = load_svmlight_file(f, query_id=True, zero_based=True)
assert_array_almost_equal(X, X1.toarray())
assert_array_almost_equal(y, y1)
assert_array_almost_equal(query_id, query_id1)
| bsd-3-clause |
andrewnc/scikit-learn | sklearn/utils/tests/test_fixes.py | 281 | 1829 | # Authors: Gael Varoquaux <[email protected]>
# Justin Vincent
# Lars Buitinck
# License: BSD 3 clause
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_true
from numpy.testing import (assert_almost_equal,
assert_array_almost_equal)
from sklearn.utils.fixes import divide, expit
from sklearn.utils.fixes import astype
def test_expit():
# Check numerical stability of expit (logistic function).
# Simulate our previous Cython implementation, based on
#http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
assert_almost_equal(expit(1000.), 1. / (1. + np.exp(-1000.)), decimal=16)
assert_almost_equal(expit(-1000.), np.exp(-1000.) / (1. + np.exp(-1000.)),
decimal=16)
x = np.arange(10)
out = np.zeros_like(x, dtype=np.float32)
assert_array_almost_equal(expit(x), expit(x, out=out))
def test_divide():
assert_equal(divide(.6, 1), .600000000000)
def test_astype_copy_memory():
a_int32 = np.ones(3, np.int32)
# Check that dtype conversion works
b_float32 = astype(a_int32, dtype=np.float32, copy=False)
assert_equal(b_float32.dtype, np.float32)
# Changing dtype forces a copy even if copy=False
assert_false(np.may_share_memory(b_float32, a_int32))
# Check that copy can be skipped if requested dtype match
c_int32 = astype(a_int32, dtype=np.int32, copy=False)
assert_true(c_int32 is a_int32)
# Check that copy can be forced, and is the case by default:
d_int32 = astype(a_int32, dtype=np.int32, copy=True)
assert_false(np.may_share_memory(d_int32, a_int32))
e_int32 = astype(a_int32, dtype=np.int32)
assert_false(np.may_share_memory(e_int32, a_int32))
| bsd-3-clause |
yhat/db.py | db/column.py | 1 | 7909 | from prettytable import PrettyTable
import pandas as pd
class Column(object):
"""
A Columns is an in-memory reference to a column in a particular table. You
can use it to do some basic DB exploration and you can also use it to
execute simple queries.
"""
def __init__(self, con, query_templates, schema, table, name, dtype, keys_per_column):
self._con = con
self._query_templates = query_templates
self.schema = schema
self.table = table
self.name = name
self.type = dtype
self.keys_per_column = keys_per_column
self.foreign_keys = []
self.ref_keys = []
def __repr__(self):
tbl = PrettyTable(["Table", "Name", "Type", "Foreign Keys",
"Reference Keys"])
tbl.add_row([self.table, self.name, self.type, self._str_foreign_keys(),
self._str_ref_keys()])
return str(tbl)
def __str__(self):
return "Column({0})<{1}>".format(self.name, self.__hash__())
def _repr_html_(self):
tbl = PrettyTable(["Table", "Name", "Type"])
tbl.add_row([self.table, self.name, self.type])
return tbl.get_html_string()
def _str_foreign_keys(self):
keys = []
for col in self.foreign_keys:
keys.append("%s.%s" % (col.table, col.name))
if self.keys_per_column is not None and len(keys) > self.keys_per_column:
keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys) - self.keys_per_column)]
return ", ".join(keys)
def _str_ref_keys(self):
keys = []
for col in self.ref_keys:
keys.append("%s.%s" % (col.table, col.name))
if self.keys_per_column is not None and len(keys) > self.keys_per_column:
keys = keys[0:self.keys_per_column] + ['(+ {0} more)'.format(len(keys) - self.keys_per_column)]
return ", ".join(keys)
def head(self, n=6):
"""
Returns first n values of your column as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
LIMIT <n>
Parameters
----------
n: int
number of rows to return
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.City.head()
0 Sao Jose dos Campos
1 Stuttgart
2 Montreal
3 Oslo
4 Prague
5 Prague
Name: City, dtype: object
>>> db.tables.Customer.City.head(2)
0 Sao Jose dos Campos
1 Stuttgart
Name: City, dtype: object
"""
q = self._query_templates['column']['head'].format(column=self.name, schema=self.schema,
table=self.table, n=n)
return pd.read_sql(q, self._con)[self.name]
def all(self):
"""
Returns entire column as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column>
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.Email.all().head()
0 [email protected]
1 [email protected]
2 [email protected]
3 [email protected]
4 [email protected]
Name: Email, dtype: object
>>> df = db.tables.Customer.Email.all()
>>> len(df)
59
"""
q = self._query_templates['column']['all'].format(column=self.name, schema=self.schema,
table=self.table)
return pd.read_sql(q, self._con)[self.name]
def unique(self):
"""
Returns all unique values as a DataFrame. This is executing:
SELECT
DISTINCT
<name_of_the_column>
FROM
<name_of_the_table>
Examples
--------
>>> from db import DemoDB
>>> db = DemoDB()
>>> db.tables.Customer.FirstName.unique().head(10)
0 Luis
1 Leonie
2 Francois
3 Bjorn
4 Franti\u0161ek
5 Helena
6 Astrid
7 Daan
8 Kara
9 Eduardo
Name: FirstName, dtype: object
>>> len(db.tables.Customer.LastName.unique())
59
"""
q = self._query_templates['column']['unique'].format(column=self.name, schema=self.schema,
table=self.table)
return pd.read_sql(q, self._con)[self.name]
def sample(self, n=10):
"""
Returns random sample of n rows as a DataFrame. This is executing:
SELECT
<name_of_the_column>
FROM
<name_of_the_table>
ORDER BY
RANDOM()
LIMIT <n>
Parameters
----------
n: int
number of rows to sample
Examples (removed from doctest as we can't predict random names...)
--------
from db import DemoDB
db = DemoDB()
db.tables.Artist.Name.sample(10)
0 Pedro Luis & A Parede
1 Santana Feat. Eric Clapton
2 Os Mutantes
3 Banda Black Rio
4 Adrian Leaper & Doreen de Feis
5 Chicago Symphony Orchestra & Fritz Reiner
6 Smashing Pumpkins
7 Spyro Gyra
8 Aaron Copland & London Symphony Orchestra
9 Sir Georg Solti & Wiener Philharmoniker
Name: Name, dtype: object
>>> from db import DemoDB
>>> db = DemoDB()
>>> df = db.tables.Artist.Name.sample(10)
>>> len(df)
10
"""
q = self._query_templates['column']['sample'].format(column=self.name, schema=self.schema,
table=self.table, n=n)
return pd.read_sql(q, self._con)[self.name]
def to_dict(self):
"""
Serialize representation of the column for local caching.
"""
return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
class ColumnSet(object):
"""
Set of Columns. Used for displaying search results in terminal/ipython
notebook.
"""
def __init__(self, columns):
self.columns = columns
self.pretty_tbl_cols = ["Table", "Column Name", "Type"]
self.use_schema = False
for col in columns:
if col.schema and not self.use_schema:
self.use_schema = True
self.pretty_tbl_cols.insert(0, "Schema")
def __getitem__(self, i):
return self.columns[i]
def _tablify(self):
tbl = PrettyTable(self.pretty_tbl_cols)
for col in self.pretty_tbl_cols:
tbl.align[col] = "l"
for col in self.columns:
row_data = [col.table, col.name, col.type]
if self.use_schema:
row_data.insert(0, col.schema)
tbl.add_row(row_data)
return tbl
def __repr__(self):
tbl = str(self._tablify())
return tbl
def _repr_html_(self):
return self._tablify().get_html_string()
def to_dict(self):
"""Serialize representation of the tableset for local caching."""
return {'columns': [col.to_dict() for col in self.columns]}
| bsd-2-clause |
SCECcode/BBP | bbp/utils/batch/combine_map_gof_gen.py | 1 | 13475 | #!/usr/bin/env python
"""
Copyright 2010-2019 University Of Southern California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This program created a map-based GOF, combining information from all
realizations into a single map plot where the color of each station is
the average bias from all realizations.
"""
# Import Python modules
import os
import glob
import optparse
import matplotlib
if (matplotlib.get_backend() != 'agg'):
matplotlib.use('Agg') # Disables use of Tk/X11
import matplotlib.colors as mcolors
import matplotlib.cm as cm
from matplotlib.ticker import FormatStrFormatter
import pylab
import numpy
# Import Broadband modules
from install_cfg import InstallCfg
import PlotMap
import fault_utils
import plot_config
import bband_utils
# Constants
# Use an extra buffer to plot the region around all stations (in degrees)
BUFFER_LATITUDE = 0.25
BUFFER_LONGITUDE = 0.25
DIST_PERIODS = [0.01, 0.05, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0]
# --------------------------------------------------------------------------
# Functions
# --------------------------------------------------------------------------
def set_boundaries_from_lon_lat(all_sta_lon, all_sta_lat):
"""
This function sets the north, south, east, and west boundaries
of the region we should plot, using the stations' locations in
a lat/lon list
"""
# Start without anything
north = None
south = None
east = None
west = None
all_lon = []
all_lat = []
for lon_list, lat_list in zip(all_sta_lon, all_sta_lat):
for sta_lon, sta_lat in zip(lon_list, lat_list):
all_lon.append(sta_lon)
all_lat.append(sta_lat)
for lon, lat in zip(all_lon, all_lat):
# If this is the first station, use its location
if north is None:
north = lat
south = lat
east = lon
west = lon
# Next station
continue
if lat > north:
north = lat
elif lat < south:
south = lat
if lon > east:
east = lon
elif lon < west:
west = lon
# Try to make the plot more symmetric
lat_range = abs(north - south)
lon_range = abs(east - west)
if (lat_range > lon_range):
diff = lat_range - lon_range
diff = diff / 2.0
east = east + diff
west = west - diff
elif (lon_range > lat_range):
diff = lon_range - lat_range
diff = diff / 2.0
north = north + diff
south = south - diff
# Great, now we just add a buffer on each side
if north < (90 - BUFFER_LATITUDE):
north = north + BUFFER_LATITUDE
else:
north = 90
if south > (-90 + BUFFER_LATITUDE):
south = south - BUFFER_LATITUDE
else:
south = -90
if east < (180 - BUFFER_LONGITUDE):
east = east + BUFFER_LONGITUDE
else:
east = 180
if west > (-180 + BUFFER_LONGITUDE):
west = west - BUFFER_LONGITUDE
else:
west = -180
return north, south, east, west
def combine_realization_data(tmpdir, period):
"""
This function reads the resid-map files from all realizations and
returns the combined data set
"""
data = {}
realizations = sorted(os.listdir(tmpdir))
for realization in realizations:
basedir = os.path.join(tmpdir, realization)
resid_file = glob.glob("%s%s*-resid-map-%.3f-rotd50.txt" %
(basedir, os.sep, period))
if len(resid_file) != 1:
raise bband_utils.ProcessingError("Residuals file not found for "
"realization %s!" % (realization))
resid_file = resid_file[0]
input_file = open(resid_file, 'r')
for line in input_file:
line = line.strip()
# Skip comments and empty lines
if line.startswith("#") or line.startswith("%") or not line:
continue
pieces = line.split()
# Make sure we have enough tokens
if len(pieces) != 3:
continue
# Convert to floats
pieces = [float(piece) for piece in pieces]
lon = pieces[0]
lat = pieces[1]
val = pieces[2]
if (lon, lat) in data:
data[(lon, lat)].append(val)
else:
data[(lon, lat)] = [val]
input_file.close()
# Ok, processed all realizations, now combine the data
sta_x_data = []
sta_y_data = []
sta_resid_data = []
for item in data:
sta_x_data.append(item[0])
sta_y_data.append(item[1])
sta_resid_data.append(numpy.mean(data[item]))
# Return the data we found
return sta_x_data, sta_y_data, sta_resid_data
def plot_combined_map_gof(indir, tmpdir, outdir, codebase):
"""
This function reads data from the residuals files from multiple
realizations and plots a map gof plot with a number of periods.
"""
# Capture number of realizations and event label
num_realizations = len(os.listdir(tmpdir))
basedir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
resid_file = glob.glob("%s%s*-resid-map*rotd50.txt" %
(basedir, os.sep))[0]
event_label = os.path.basename(resid_file).split("-")[0]
# Get one trace file
basedir = os.path.join(indir, os.listdir(indir)[0])
trace_file = glob.glob("%s%s*.trace" % (basedir, os.sep))[0]
# Now get the SRC (or SRF file) in order to get the hypocenter
# location. Note that this function will look for the hypocenter
# location from the first realization. If the simulation was
# created using randomized hypocenter locations, the plot will
# only display the location of the hypocenter from the first
# realization.
src_file = glob.glob("%s%s*.src" % (basedir, os.sep))
if not len(src_file):
srf_file = glob.glob("%s%s*.srf" % (basedir, os.sep))
if not len(srf_file):
raise bband_utils.ProcessingError("Cannot find SRC/SRF file!")
source_file = srf_file[0]
else:
source_file = src_file[0]
# Get hypo_lon, hypo_lat from src/srf file
hypo_lon, hypo_lat = fault_utils.calculate_epicenter(source_file)
# Collect all the data from the residuals file
all_sta_x_data = []
all_sta_y_data = []
all_sta_resid_data = []
for period in DIST_PERIODS:
(sta_x_data,
sta_y_data,
sta_resid_data) = combine_realization_data(tmpdir, period)
all_sta_x_data.append(sta_x_data)
all_sta_y_data.append(sta_y_data)
all_sta_resid_data.append(sta_resid_data)
# Get plot boundaries
(north, south,
east, west) = set_boundaries_from_lon_lat(all_sta_x_data,
all_sta_y_data)
# Get directory names
install = InstallCfg.getInstance()
# Prepare to plot map GOF
plotregion = [west, east, south, north]
topo = os.path.join(install.A_PLOT_DATA_DIR, 'calTopo18.bf')
coastal = os.path.join(install.A_PLOT_DATA_DIR, 'gshhs_h.txt')
border = os.path.join(install.A_PLOT_DATA_DIR, 'wdb_borders_h.txt')
# Now create the map GOF
outfile = os.path.join(outdir, "gof-map-combined-%s-%s-rotd50.png" %
(codebase, event_label))
create_combined_map_gof(all_sta_x_data, all_sta_y_data, all_sta_resid_data,
plotregion, topo, coastal, border, trace_file,
event_label, num_realizations, codebase, outfile,
hypo_lat=hypo_lat, hypo_lon=hypo_lon)
def create_combined_map_gof(all_sta_x_data, all_sta_y_data, all_sta_resid_data,
plotregion, topo, coastal, border, fault,
event_label, num_realizations, codebase, outfile,
hypo_lat=None, hypo_lon=None):
"""
Creates a combined gof map plot for all the data and distances
provided
"""
plottitle = ("GOF Comparison for %s\n%d Realizations\n%s Method" %
(event_label, num_realizations, codebase.upper()))
# Read in topo data
topo_points = PlotMap.read_topo(topo, plotregion)
# Read in fault data
fault_x, fault_y = PlotMap.read_fault(fault)
# Read coastlines
coast_x, coast_y = PlotMap.read_coastal(coastal, plotregion)
# Read borders
bord_x, bord_y = PlotMap.read_coastal(border, plotregion)
# Create figure
num_plots = len(DIST_PERIODS)
if len(DIST_PERIODS) % 2:
num_plots = num_plots + 1
num_columns = num_plots / 2
fig, axs = pylab.plt.subplots(2, num_columns)
fig.set_size_inches(12, 6.5)
fig.autofmt_xdate()
# Setup color scale
cmap = cm.gist_gray
norm = mcolors.Normalize(vmin=-2000.0, vmax=3000.0)
# Convert to list
subfigs = []
for y_subplot in range(0, 2):
for x_subplot in range(0, num_columns):
subfigs.append(axs[y_subplot, x_subplot])
# Fixed vmin and vmax for all plots
vmin = -1.5
vmax = 1.5
# Good, now walk through each subfig
for (subfig, sta_x_data, sta_y_data,
sta_resid_data, period) in zip(subfigs, all_sta_x_data, all_sta_y_data,
all_sta_resid_data, DIST_PERIODS):
# Plot basemap
subfig.imshow(topo_points, cmap=cmap, norm=norm,
extent=plotregion, interpolation='nearest')
# Freeze the axis extents
subfig.set_autoscale_on(False)
# Plot coast lines
for idx in xrange(0, len(coast_x)):
subfig.plot(coast_x[idx], coast_y[idx], linestyle='-', color='0.75')
# Plot borders
for idx in xrange(0, len(bord_x)):
subfig.plot(bord_x[idx], bord_y[idx], linestyle='-', color='0.75')
# Plot fault trace
subfig.plot(fault_x, fault_y, linestyle='-', color='k')
# Plot hypocenter
if hypo_lat is not None and hypo_lon is not None:
hypo_lat = [hypo_lat]
hypo_lon = [hypo_lon]
subfig.scatter(hypo_lon, hypo_lat, marker=(5,1,0),
color='y', s=50)
# Plot the stations
im = subfig.scatter(sta_x_data, sta_y_data, s=20, c=sta_resid_data,
cmap=cm.jet_r, vmin=vmin, vmax=vmax, marker='o')
# Set degree formatting of tick values
major_formatter = FormatStrFormatter(u'%.1f\u00b0')
subfig.xaxis.set_major_formatter(major_formatter)
subfig.yaxis.set_major_formatter(major_formatter)
#Disable colorbar on each plot
#subfig.figure.colorbar(im, ax=subfig)
# Set font size
for tick in subfig.get_xticklabels():
tick.set_fontsize(6)
for tick in subfig.get_yticklabels():
tick.set_fontsize(6)
subfig.set_title("Period = %.3f s" % (period), size=8)
# Slightly different values for top/bottom since the combined plot
# has 3 title lines
fig.subplots_adjust(left = 0.05, right = 0.91, hspace = 0.0,
top = 0.92, bottom = 0.02)
colorbar_ax = fig.add_axes([0.93, 0.17, 0.02, 0.6])
fig.colorbar(im, cax=colorbar_ax)
fig.suptitle('%s' % (plottitle), size=12)
print "Saving map GoF plot to %s" % (outfile)
fig.savefig(outfile, format="png", transparent=False, dpi=plot_config.dpi)
# --------------------------------------------------------------------------
# Main
# --------------------------------------------------------------------------
PARSER = optparse.OptionParser()
PARSER.add_option("-d", "--dir", dest="input_dir",
help="Input directory containing simulation results")
PARSER.add_option("-o", "--output_dir", dest="output_dir",
help="Directory where produced map plot will go")
PARSER.add_option("-c", "--codebase", dest="codebase",
help="Method used for the simulation")
(OPTIONS, ARGS) = PARSER.parse_args()
if OPTIONS.input_dir is None:
PARSER.error("Please specify the input directory!")
TOP_INPUT_DIR = OPTIONS.input_dir
if not os.path.isdir(TOP_INPUT_DIR):
PARSER.error("Invalid input directory!")
if not "Sims" in os.listdir(TOP_INPUT_DIR):
PARSER.error("Please provide the top-level simulation directory!\n"
"This is the directory given to the cluster script")
INPUT_OUTDIR = os.path.join(TOP_INPUT_DIR, "Sims" , "outdata")
INPUT_INDIR = os.path.join(TOP_INPUT_DIR, "Sims", "indata")
if OPTIONS.output_dir is None:
PARSER.error("error specify output directory!")
else:
OUTPUT_DIR = OPTIONS.output_dir
if not os.path.isdir(OUTPUT_DIR):
PARSER.error("Invalid output directory!")
if OPTIONS.codebase is None:
PARSER.error("Please specify codebase!")
# Create combined map gof plot
plot_combined_map_gof(INPUT_INDIR, INPUT_OUTDIR, OUTPUT_DIR, OPTIONS.codebase)
# All done!
print "All Done!"
| apache-2.0 |
anorfleet/turntable | test/lib/python2.7/site-packages/scipy/cluster/hierarchy.py | 7 | 93813 | """
========================================================
Hierarchical clustering (:mod:`scipy.cluster.hierarchy`)
========================================================
.. currentmodule:: scipy.cluster.hierarchy
These functions cut hierarchical clusterings into flat clusterings
or find the roots of the forest formed by a cut by providing the flat
cluster ids of each observation.
.. autosummary::
:toctree: generated/
fcluster
fclusterdata
leaders
These are routines for agglomerative clustering.
.. autosummary::
:toctree: generated/
linkage
single
complete
average
weighted
centroid
median
ward
These routines compute statistics on hierarchies.
.. autosummary::
:toctree: generated/
cophenet
from_mlab_linkage
inconsistent
maxinconsts
maxdists
maxRstat
to_mlab_linkage
Routines for visualizing flat clusters.
.. autosummary::
:toctree: generated/
dendrogram
These are data structures and routines for representing hierarchies as
tree objects.
.. autosummary::
:toctree: generated/
ClusterNode
leaves_list
to_tree
These are predicates for checking the validity of linkage and
inconsistency matrices as well as for checking isomorphism of two
flat cluster assignments.
.. autosummary::
:toctree: generated/
is_valid_im
is_valid_linkage
is_isomorphic
is_monotonic
correspond
num_obs_linkage
Utility routines for plotting:
.. autosummary::
:toctree: generated/
set_link_color_palette
References
----------
.. [1] "Statistics toolbox." API Reference Documentation. The MathWorks.
http://www.mathworks.com/access/helpdesk/help/toolbox/stats/.
Accessed October 1, 2007.
.. [2] "Hierarchical clustering." API Reference Documentation.
The Wolfram Research, Inc.
http://reference.wolfram.com/mathematica/HierarchicalClustering/tutorial/
HierarchicalClustering.html.
Accessed October 1, 2007.
.. [3] Gower, JC and Ross, GJS. "Minimum Spanning Trees and Single Linkage
Cluster Analysis." Applied Statistics. 18(1): pp. 54--64. 1969.
.. [4] Ward Jr, JH. "Hierarchical grouping to optimize an objective
function." Journal of the American Statistical Association. 58(301):
pp. 236--44. 1963.
.. [5] Johnson, SC. "Hierarchical clustering schemes." Psychometrika.
32(2): pp. 241--54. 1966.
.. [6] Sneath, PH and Sokal, RR. "Numerical taxonomy." Nature. 193: pp.
855--60. 1962.
.. [7] Batagelj, V. "Comparing resemblance measures." Journal of
Classification. 12: pp. 73--90. 1995.
.. [8] Sokal, RR and Michener, CD. "A statistical method for evaluating
systematic relationships." Scientific Bulletins. 38(22):
pp. 1409--38. 1958.
.. [9] Edelbrock, C. "Mixture model tests of hierarchical clustering
algorithms: the problem of classifying everybody." Multivariate
Behavioral Research. 14: pp. 367--84. 1979.
.. [10] Jain, A., and Dubes, R., "Algorithms for Clustering Data."
Prentice-Hall. Englewood Cliffs, NJ. 1988.
.. [11] Fisher, RA "The use of multiple measurements in taxonomic
problems." Annals of Eugenics, 7(2): 179-188. 1936
* MATLAB and MathWorks are registered trademarks of The MathWorks, Inc.
* Mathematica is a registered trademark of The Wolfram Research, Inc.
"""
from __future__ import division, print_function, absolute_import
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
# hierarchy.py (derived from cluster.py, http://scipy-cluster.googlecode.com)
#
# Author: Damian Eads
# Date: September 22, 2007
#
# Copyright (c) 2007, 2008, Damian Eads
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# - Neither the name of the author nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
import numpy as np
from . import _hierarchy
import scipy.spatial.distance as distance
from scipy.lib.six import string_types
from scipy.lib.six import xrange
_cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2,
'weighted': 6}
_cpy_euclid_methods = {'centroid': 3, 'median': 4, 'ward': 5}
_cpy_linkage_methods = set(_cpy_non_euclid_methods.keys()).union(
set(_cpy_euclid_methods.keys()))
__all__ = ['ClusterNode', 'average', 'centroid', 'complete', 'cophenet',
'correspond', 'dendrogram', 'fcluster', 'fclusterdata',
'from_mlab_linkage', 'inconsistent', 'is_isomorphic',
'is_monotonic', 'is_valid_im', 'is_valid_linkage', 'leaders',
'leaves_list', 'linkage', 'maxRstat', 'maxdists', 'maxinconsts',
'median', 'num_obs_linkage', 'set_link_color_palette', 'single',
'to_mlab_linkage', 'to_tree', 'ward', 'weighted', 'distance']
def _warning(s):
warnings.warn('scipy.cluster: %s' % s, stacklevel=3)
def _copy_array_if_base_present(a):
"""
Copies the array if its base points to a parent array.
"""
if a.base is not None:
return a.copy()
elif np.issubsctype(a, np.float32):
return np.array(a, dtype=np.double)
else:
return a
def _copy_arrays_if_base_present(T):
"""
Accepts a tuple of arrays T. Copies the array T[i] if its base array
points to an actual array. Otherwise, the reference is just copied.
This is useful if the arrays are being passed to a C function that
does not do proper striding.
"""
l = [_copy_array_if_base_present(a) for a in T]
return l
def _randdm(pnts):
""" Generates a random distance matrix stored in condensed form. A
pnts * (pnts - 1) / 2 sized vector is returned.
"""
if pnts >= 2:
D = np.random.rand(pnts * (pnts - 1) / 2)
else:
raise ValueError("The number of points in the distance matrix "
"must be at least 2.")
return D
def single(y):
"""
Performs single/min/nearest linkage on the condensed distance matrix ``y``
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
The linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='single', metric='euclidean')
def complete(y):
"""
Performs complete/max/farthest point linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage
"""
return linkage(y, method='complete', metric='euclidean')
def average(y):
"""
Performs average/UPGMA linkage on a condensed distance matrix
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='average', metric='euclidean')
def weighted(y):
"""
Performs weighted/WPGMA linkage on the condensed distance matrix.
See ``linkage`` for more information on the return
structure and algorithm.
Parameters
----------
y : ndarray
The upper triangular of the distance matrix. The result of
``pdist`` is returned in this form.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage : for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='weighted', metric='euclidean')
def centroid(y):
"""
Performs centroid/UPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = centroid(y)``
Performs centroid/UPGMC linkage on the condensed distance
matrix ``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = centroid(X)``
Performs centroid/UPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See ``linkage``
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
A linkage matrix containing the hierarchical clustering. See
the ``linkage`` function documentation for more information
on its structure.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='centroid', metric='euclidean')
def median(y):
"""
Performs median/WPGMC linkage.
See ``linkage`` for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = median(y)``
Performs median/WPGMC linkage on the condensed distance matrix
``y``. See ``linkage`` for more information on the return
structure and algorithm.
2. ``Z = median(X)``
Performs median/WPGMC linkage on the observation matrix ``X``
using Euclidean distance as the distance metric. See linkage
for more information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='median', metric='euclidean')
def ward(y):
"""
Performs Ward's linkage on a condensed or redundant distance matrix.
See linkage for more information on the return structure
and algorithm.
The following are common calling conventions:
1. ``Z = ward(y)``
Performs Ward's linkage on the condensed distance matrix ``Z``. See
linkage for more information on the return structure and
algorithm.
2. ``Z = ward(X)``
Performs Ward's linkage on the observation matrix ``X`` using
Euclidean distance as the distance metric. See linkage for more
information on the return structure and algorithm.
Parameters
----------
Q : ndarray
A condensed or redundant distance matrix. A condensed
distance matrix is a flat array containing the upper
triangular of the distance matrix. This is the form that
``pdist`` returns. Alternatively, a collection of
m observation vectors in n dimensions may be passed as
a m by n array.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
See Also
--------
linkage: for advanced creation of hierarchical clusterings.
"""
return linkage(y, method='ward', metric='euclidean')
def linkage(y, method='single', metric='euclidean'):
"""
Performs hierarchical/agglomerative clustering on the condensed
distance matrix y.
y must be a :math:`{n \\choose 2}` sized
vector where n is the number of original observations paired
in the distance matrix. The behavior of this function is very
similar to the MATLAB linkage function.
A 4 by :math:`(n-1)` matrix ``Z`` is returned. At the
:math:`i`-th iteration, clusters with indices ``Z[i, 0]`` and
``Z[i, 1]`` are combined to form cluster :math:`n + i`. A
cluster with an index less than :math:`n` corresponds to one of
the :math:`n` original observations. The distance between
clusters ``Z[i, 0]`` and ``Z[i, 1]`` is given by ``Z[i, 2]``. The
fourth value ``Z[i, 3]`` represents the number of original
observations in the newly formed cluster.
The following linkage methods are used to compute the distance
:math:`d(s, t)` between two clusters :math:`s` and
:math:`t`. The algorithm begins with a forest of clusters that
have yet to be used in the hierarchy being formed. When two
clusters :math:`s` and :math:`t` from this forest are combined
into a single cluster :math:`u`, :math:`s` and :math:`t` are
removed from the forest, and :math:`u` is added to the
forest. When only one cluster remains in the forest, the algorithm
stops, and this cluster becomes the root.
A distance matrix is maintained at each iteration. The ``d[i,j]``
entry corresponds to the distance between cluster :math:`i` and
:math:`j` in the original forest.
At each iteration, the algorithm must update the distance matrix
to reflect the distance of the newly formed cluster u with the
remaining clusters in the forest.
Suppose there are :math:`|u|` original observations
:math:`u[0], \\ldots, u[|u|-1]` in cluster :math:`u` and
:math:`|v|` original objects :math:`v[0], \\ldots, v[|v|-1]` in
cluster :math:`v`. Recall :math:`s` and :math:`t` are
combined to form cluster :math:`u`. Let :math:`v` be any
remaining cluster in the forest that is not :math:`u`.
The following are methods for calculating the distance between the
newly formed cluster :math:`u` and each :math:`v`.
* method='single' assigns
.. math::
d(u,v) = \\min(dist(u[i],v[j]))
for all points :math:`i` in cluster :math:`u` and
:math:`j` in cluster :math:`v`. This is also known as the
Nearest Point Algorithm.
* method='complete' assigns
.. math::
d(u, v) = \\max(dist(u[i],v[j]))
for all points :math:`i` in cluster u and :math:`j` in
cluster :math:`v`. This is also known by the Farthest Point
Algorithm or Voor Hees Algorithm.
* method='average' assigns
.. math::
d(u,v) = \\sum_{ij} \\frac{d(u[i], v[j])}
{(|u|*|v|)}
for all points :math:`i` and :math:`j` where :math:`|u|`
and :math:`|v|` are the cardinalities of clusters :math:`u`
and :math:`v`, respectively. This is also called the UPGMA
algorithm.
* method='weighted' assigns
.. math::
d(u,v) = (dist(s,v) + dist(t,v))/2
where cluster u was formed with cluster s and t and v
is a remaining cluster in the forest. (also called WPGMA)
* method='centroid' assigns
.. math::
dist(s,t) = ||c_s-c_t||_2
where :math:`c_s` and :math:`c_t` are the centroids of
clusters :math:`s` and :math:`t`, respectively. When two
clusters :math:`s` and :math:`t` are combined into a new
cluster :math:`u`, the new centroid is computed over all the
original objects in clusters :math:`s` and :math:`t`. The
distance then becomes the Euclidean distance between the
centroid of :math:`u` and the centroid of a remaining cluster
:math:`v` in the forest. This is also known as the UPGMC
algorithm.
* method='median' assigns math:`d(s,t)` like the ``centroid``
method. When two clusters :math:`s` and :math:`t` are combined
into a new cluster :math:`u`, the average of centroids s and t
give the new centroid :math:`u`. This is also known as the
WPGMC algorithm.
* method='ward' uses the Ward variance minimization algorithm.
The new entry :math:`d(u,v)` is computed as follows,
.. math::
d(u,v) = \\sqrt{\\frac{|v|+|s|}
{T}d(v,s)^2
+ \\frac{|v|+|t|}
{T}d(v,t)^2
+ \\frac{|v|}
{T}d(s,t)^2}
where :math:`u` is the newly joined cluster consisting of
clusters :math:`s` and :math:`t`, :math:`v` is an unused
cluster in the forest, :math:`T=|v|+|s|+|t|`, and
:math:`|*|` is the cardinality of its argument. This is also
known as the incremental algorithm.
Warning: When the minimum distance pair in the forest is chosen, there
may be two or more pairs with the same minimum distance. This
implementation may chose a different minimum than the MATLAB
version.
Parameters
----------
y : ndarray
A condensed or redundant distance matrix. A condensed distance matrix
is a flat array containing the upper triangular of the distance matrix.
This is the form that ``pdist`` returns. Alternatively, a collection of
:math:`m` observation vectors in n dimensions may be passed as an
:math:`m` by :math:`n` array.
method : str, optional
The linkage algorithm to use. See the ``Linkage Methods`` section below
for full descriptions.
metric : str, optional
The distance metric to use. See the ``distance.pdist`` function for a
list of valid distance metrics.
Returns
-------
Z : ndarray
The hierarchical clustering encoded as a linkage matrix.
"""
if not isinstance(method, string_types):
raise TypeError("Argument 'method' must be a string.")
y = _convert_to_double(np.asarray(y, order='c'))
s = y.shape
if len(s) == 1:
distance.is_valid_y(y, throw=True, name='y')
d = distance.num_obs_y(y)
if method not in _cpy_non_euclid_methods:
raise ValueError("Valid methods when the raw observations are "
"omitted are 'single', 'complete', 'weighted', "
"and 'average'.")
# Since the C code does not support striding using strides.
[y] = _copy_arrays_if_base_present([y])
Z = np.zeros((d - 1, 4))
if method == 'single':
_hierarchy.slink(y, Z, int(d))
else:
_hierarchy.linkage(y, Z, int(d),
int(_cpy_non_euclid_methods[method]))
elif len(s) == 2:
X = y
n = s[0]
if method not in _cpy_linkage_methods:
raise ValueError('Invalid method: %s' % method)
if method in _cpy_non_euclid_methods:
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
if method == 'single':
_hierarchy.slink(dm, Z, n)
else:
_hierarchy.linkage(dm, Z, n,
int(_cpy_non_euclid_methods[method]))
elif method in _cpy_euclid_methods:
if metric != 'euclidean':
raise ValueError(("Method '%s' requires the distance metric "
"to be euclidean") % method)
dm = distance.pdist(X, metric)
Z = np.zeros((n - 1, 4))
_hierarchy.linkage(dm, Z, n,
int(_cpy_euclid_methods[method]))
return Z
class ClusterNode:
"""
A tree node class for representing a cluster.
Leaf nodes correspond to original observations, while non-leaf nodes
correspond to non-singleton clusters.
The to_tree function converts a matrix returned by the linkage
function into an easy-to-use tree representation.
See Also
--------
to_tree : for converting a linkage matrix ``Z`` into a tree object.
"""
def __init__(self, id, left=None, right=None, dist=0, count=1):
if id < 0:
raise ValueError('The id must be non-negative.')
if dist < 0:
raise ValueError('The distance must be non-negative.')
if (left is None and right is not None) or \
(left is not None and right is None):
raise ValueError('Only full or proper binary trees are permitted.'
' This node has one child.')
if count < 1:
raise ValueError('A cluster must contain at least one original '
'observation.')
self.id = id
self.left = left
self.right = right
self.dist = dist
if self.left is None:
self.count = count
else:
self.count = left.count + right.count
def get_id(self):
"""
The identifier of the target node.
For ``0 <= i < n``, `i` corresponds to original observation i.
For ``n <= i < 2n-1``, `i` corresponds to non-singleton cluster formed
at iteration ``i-n``.
Returns
-------
id : int
The identifier of the target node.
"""
return self.id
def get_count(self):
"""
The number of leaf nodes (original observations) belonging to
the cluster node nd. If the target node is a leaf, 1 is
returned.
Returns
-------
get_count : int
The number of leaf nodes below the target node.
"""
return self.count
def get_left(self):
"""
Return a reference to the left child tree object.
Returns
-------
left : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.left
def get_right(self):
"""
Returns a reference to the right child tree object.
Returns
-------
right : ClusterNode
The left child of the target node. If the node is a leaf,
None is returned.
"""
return self.right
def is_leaf(self):
"""
Returns True if the target node is a leaf.
Returns
-------
leafness : bool
True if the target node is a leaf node.
"""
return self.left is None
def pre_order(self, func=(lambda x: x.id)):
"""
Performs pre-order traversal without recursive function calls.
When a leaf node is first encountered, ``func`` is called with
the leaf node as its argument, and its result is appended to
the list.
For example, the statement::
ids = root.pre_order(lambda x: x.id)
returns a list of the node ids corresponding to the leaf nodes
of the tree as they appear from left to right.
Parameters
----------
func : function
Applied to each leaf ClusterNode object in the pre-order traversal.
Given the i'th leaf node in the pre-ordeR traversal ``n[i]``, the
result of func(n[i]) is stored in L[i]. If not provided, the index
of the original observation to which the node corresponds is used.
Returns
-------
L : list
The pre-order traversal.
"""
# Do a preorder traversal, caching the result. To avoid having to do
# recursion, we'll store the previous index we've visited in a vector.
n = self.count
curNode = [None] * (2 * n)
lvisited = set()
rvisited = set()
curNode[0] = self
k = 0
preorder = []
while k >= 0:
nd = curNode[k]
ndid = nd.id
if nd.is_leaf():
preorder.append(func(nd))
k = k - 1
else:
if ndid not in lvisited:
curNode[k + 1] = nd.left
lvisited.add(ndid)
k = k + 1
elif ndid not in rvisited:
curNode[k + 1] = nd.right
rvisited.add(ndid)
k = k + 1
# If we've visited the left and right of this non-leaf
# node already, go up in the tree.
else:
k = k - 1
return preorder
_cnode_bare = ClusterNode(0)
_cnode_type = type(ClusterNode)
def to_tree(Z, rd=False):
"""
Converts a hierarchical clustering encoded in the matrix ``Z`` (by
linkage) into an easy-to-use tree object.
The reference r to the root ClusterNode object is returned.
Each ClusterNode object has a left, right, dist, id, and count
attribute. The left and right attributes point to ClusterNode objects
that were combined to generate the cluster. If both are None then
the ClusterNode object is a leaf node, its count must be 1, and its
distance is meaningless but set to 0.
Note: This function is provided for the convenience of the library
user. ClusterNodes are not used as input to any of the functions in this
library.
Parameters
----------
Z : ndarray
The linkage matrix in proper form (see the ``linkage``
function documentation).
rd : bool, optional
When False, a reference to the root ClusterNode object is
returned. Otherwise, a tuple (r,d) is returned. ``r`` is a
reference to the root node while ``d`` is a dictionary
mapping cluster ids to ClusterNode references. If a cluster id is
less than n, then it corresponds to a singleton cluster
(leaf node). See ``linkage`` for more information on the
assignment of cluster ids to clusters.
Returns
-------
L : list
The pre-order traversal.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# The number of original objects is equal to the number of rows minus
# 1.
n = Z.shape[0] + 1
# Create a list full of None's to store the node objects
d = [None] * (n * 2 - 1)
# Create the nodes corresponding to the n original objects.
for i in xrange(0, n):
d[i] = ClusterNode(i)
nd = None
for i in xrange(0, n - 1):
fi = int(Z[i, 0])
fj = int(Z[i, 1])
if fi > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 0') % fi)
if fj > i + n:
raise ValueError(('Corrupt matrix Z. Index to derivative cluster '
'is used before it is formed. See row %d, '
'column 1') % fj)
nd = ClusterNode(i + n, d[fi], d[fj], Z[i, 2])
# ^ id ^ left ^ right ^ dist
if Z[i, 3] != nd.count:
raise ValueError(('Corrupt matrix Z. The count Z[%d,3] is '
'incorrect.') % i)
d[n + i] = nd
if rd:
return (nd, d)
else:
return nd
def _convert_to_bool(X):
if X.dtype != np.bool:
X = X.astype(np.bool)
if not X.flags.contiguous:
X = X.copy()
return X
def _convert_to_double(X):
if X.dtype != np.double:
X = X.astype(np.double)
if not X.flags.contiguous:
X = X.copy()
return X
def cophenet(Z, Y=None):
"""
Calculates the cophenetic distances between each observation in
the hierarchical clustering defined by the linkage ``Z``.
Suppose ``p`` and ``q`` are original observations in
disjoint clusters ``s`` and ``t``, respectively and
``s`` and ``t`` are joined by a direct parent cluster
``u``. The cophenetic distance between observations
``i`` and ``j`` is simply the distance between
clusters ``s`` and ``t``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as an array
(see ``linkage`` function).
Y : ndarray (optional)
Calculates the cophenetic correlation coefficient ``c`` of a
hierarchical clustering defined by the linkage matrix `Z`
of a set of :math:`n` observations in :math:`m`
dimensions. `Y` is the condensed distance matrix from which
`Z` was generated.
Returns
-------
c : ndarray
The cophentic correlation distance (if ``y`` is passed).
d : ndarray
The cophenetic distance matrix in condensed form. The
:math:`ij` th entry is the cophenetic distance between
original observations :math:`i` and :math:`j`.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
zz = np.zeros((n * (n - 1)) // 2, dtype=np.double)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
Z = _convert_to_double(Z)
_hierarchy.cophenetic_distances(Z, zz, int(n))
if Y is None:
return zz
Y = np.asarray(Y, order='c')
distance.is_valid_y(Y, throw=True, name='Y')
z = zz.mean()
y = Y.mean()
Yy = Y - y
Zz = zz - z
numerator = (Yy * Zz)
denomA = Yy ** 2
denomB = Zz ** 2
c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum()))
return (c, zz)
def inconsistent(Z, d=2):
"""
Calculates inconsistency statistics on a linkage.
Note: This function behaves similarly to the MATLAB(TM)
inconsistent function.
Parameters
----------
Z : ndarray
The :math:`(n-1)` by 4 matrix encoding the linkage
(hierarchical clustering). See ``linkage`` documentation
for more information on its form.
d : int, optional
The number of links up to `d` levels below each
non-singleton cluster.
Returns
-------
R : ndarray
A :math:`(n-1)` by 5 matrix where the ``i``'th row
contains the link statistics for the non-singleton cluster
``i``. The link statistics are computed over the link
heights for links :math:`d` levels below the cluster
``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard
deviation of the link heights, respectively; ``R[i,2]`` is
the number of links included in the calculation; and
``R[i,3]`` is the inconsistency coefficient,
.. math:: \\frac{\\mathtt{Z[i,2]}-\\mathtt{R[i,0]}} {R[i,1]}
"""
Z = np.asarray(Z, order='c')
Zs = Z.shape
is_valid_linkage(Z, throw=True, name='Z')
if (not d == np.floor(d)) or d < 0:
raise ValueError('The second argument d must be a nonnegative '
'integer value.')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
n = Zs[0] + 1
R = np.zeros((n - 1, 4), dtype=np.double)
_hierarchy.inconsistent(Z, R, int(n), int(d))
return R
def from_mlab_linkage(Z):
"""
Converts a linkage matrix generated by MATLAB(TM) to a new
linkage matrix compatible with this module.
The conversion does two things:
* the indices are converted from ``1..N`` to ``0..(N-1)`` form,
and
* a fourth column Z[:,3] is added where Z[i,3] is represents the
number of original observations (leaves) in the non-singleton
cluster i.
This function is useful when loading in linkages from legacy data
files generated by MATLAB.
Parameters
----------
Z : ndarray
A linkage matrix generated by MATLAB(TM).
Returns
-------
ZS : ndarray
A linkage matrix compatible with this library.
"""
Z = np.asarray(Z, dtype=np.double, order='c')
Zs = Z.shape
# If it's empty, return it.
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
if len(Zs) != 2:
raise ValueError("The linkage array must be rectangular.")
# If it contains no rows, return it.
if Zs[0] == 0:
return Z.copy()
Zpart = Z.copy()
if Zpart[:, 0:2].min() != 1.0 and Zpart[:, 0:2].max() != 2 * Zs[0]:
raise ValueError('The format of the indices is not 1..N')
Zpart[:, 0:2] -= 1.0
CS = np.zeros((Zs[0],), dtype=np.double)
_hierarchy.calculate_cluster_sizes(Zpart, CS, int(Zs[0]) + 1)
return np.hstack([Zpart, CS.reshape(Zs[0], 1)])
def to_mlab_linkage(Z):
"""
Converts a linkage matrix to a MATLAB(TM) compatible one.
Converts a linkage matrix ``Z`` generated by the linkage function
of this module to a MATLAB(TM) compatible one. The return linkage
matrix has the last column removed and the cluster indices are
converted to ``1..N`` indexing.
Parameters
----------
Z : ndarray
A linkage matrix generated by this library.
Returns
-------
to_mlab_linkage : ndarray
A linkage matrix compatible with MATLAB(TM)'s hierarchical
clustering functions.
The return linkage matrix has the last column removed
and the cluster indices are converted to ``1..N`` indexing.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
Zs = Z.shape
if len(Zs) == 0 or (len(Zs) == 1 and Zs[0] == 0):
return Z.copy()
is_valid_linkage(Z, throw=True, name='Z')
ZP = Z[:, 0:3].copy()
ZP[:, 0:2] += 1.0
return ZP
def is_monotonic(Z):
"""
Returns True if the linkage passed is monotonic.
The linkage is monotonic if for every cluster :math:`s` and :math:`t`
joined, the distance between them is no less than the distance
between any previously joined clusters.
Parameters
----------
Z : ndarray
The linkage matrix to check for monotonicity.
Returns
-------
b : bool
A boolean indicating whether the linkage is monotonic.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
# We expect the i'th value to be greater than its successor.
return (Z[1:, 2] >= Z[:-1, 2]).all()
def is_valid_im(R, warning=False, throw=False, name=None):
"""Returns True if the inconsistency matrix passed is valid.
It must be a :math:`n` by 4 numpy array of doubles. The standard
deviations ``R[:,1]`` must be nonnegative. The link counts
``R[:,2]`` must be positive and no greater than :math:`n-1`.
Parameters
----------
R : ndarray
The inconsistency matrix to check for validity.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True if the inconsistency matrix is valid.
"""
R = np.asarray(R, order='c')
valid = True
try:
if type(R) != np.ndarray:
if name:
raise TypeError(('Variable \'%s\' passed as inconsistency '
'matrix is not a numpy array.') % name)
else:
raise TypeError('Variable passed as inconsistency matrix '
'is not a numpy array.')
if R.dtype != np.double:
if name:
raise TypeError(('Inconsistency matrix \'%s\' must contain '
'doubles (double).') % name)
else:
raise TypeError('Inconsistency matrix must contain doubles '
'(double).')
if len(R.shape) != 2:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have '
'shape=2 (i.e. be two-dimensional).') % name)
else:
raise ValueError('Inconsistency matrix must have shape=2 '
'(i.e. be two-dimensional).')
if R.shape[1] != 4:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have 4 '
'columns.') % name)
else:
raise ValueError('Inconsistency matrix must have 4 columns.')
if R.shape[0] < 1:
if name:
raise ValueError(('Inconsistency matrix \'%s\' must have at '
'least one row.') % name)
else:
raise ValueError('Inconsistency matrix must have at least '
'one row.')
if (R[:, 0] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height means.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height means.')
if (R[:, 1] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link height standard '
'deviations.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link height standard deviations.')
if (R[:, 2] < 0).any():
if name:
raise ValueError(('Inconsistency matrix \'%s\' contains '
'negative link counts.') % name)
else:
raise ValueError('Inconsistency matrix contains negative '
'link counts.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def is_valid_linkage(Z, warning=False, throw=False, name=None):
"""
Checks the validity of a linkage matrix.
A linkage matrix is valid if it is a two dimensional
ndarray (type double) with :math:`n`
rows and 4 columns. The first two columns must contain indices
between 0 and :math:`2n-1`. For a given row ``i``,
:math:`0 \\leq \\mathtt{Z[i,0]} \\leq i+n-1`
and :math:`0 \\leq Z[i,1] \\leq i+n-1`
(i.e. a cluster cannot join another cluster unless the cluster
being joined has been generated.)
Parameters
----------
Z : array_like
Linkage matrix.
warning : bool, optional
When True, issues a Python warning if the linkage
matrix passed is invalid.
throw : bool, optional
When True, throws a Python exception if the linkage
matrix passed is invalid.
name : str, optional
This string refers to the variable name of the invalid
linkage matrix.
Returns
-------
b : bool
True iff the inconsistency matrix is valid.
"""
Z = np.asarray(Z, order='c')
valid = True
try:
if type(Z) != np.ndarray:
if name:
raise TypeError(('\'%s\' passed as a linkage is not a valid '
'array.') % name)
else:
raise TypeError('Variable is not a valid array.')
if Z.dtype != np.double:
if name:
raise TypeError('Linkage matrix \'%s\' must contain doubles.'
% name)
else:
raise TypeError('Linkage matrix must contain doubles.')
if len(Z.shape) != 2:
if name:
raise ValueError(('Linkage matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Linkage matrix must have shape=2 '
'(i.e. be two-dimensional).')
if Z.shape[1] != 4:
if name:
raise ValueError('Linkage matrix \'%s\' must have 4 columns.'
% name)
else:
raise ValueError('Linkage matrix must have 4 columns.')
if Z.shape[0] == 0:
raise ValueError('Linkage must be computed on at least two '
'observations.')
n = Z.shape[0]
if n > 1:
if ((Z[:, 0] < 0).any() or (Z[:, 1] < 0).any()):
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'indices.') % name)
else:
raise ValueError('Linkage contains negative indices.')
if (Z[:, 2] < 0).any():
if name:
raise ValueError(('Linkage \'%s\' contains negative '
'distances.') % name)
else:
raise ValueError('Linkage contains negative distances.')
if (Z[:, 3] < 0).any():
if name:
raise ValueError('Linkage \'%s\' contains negative counts.'
% name)
else:
raise ValueError('Linkage contains negative counts.')
if _check_hierarchy_uses_cluster_before_formed(Z):
if name:
raise ValueError(('Linkage \'%s\' uses non-singleton cluster '
'before its formed.') % name)
else:
raise ValueError("Linkage uses non-singleton cluster before "
"it's formed.")
if _check_hierarchy_uses_cluster_more_than_once(Z):
if name:
raise ValueError(('Linkage \'%s\' uses the same cluster more '
'than once.') % name)
else:
raise ValueError('Linkage uses the same cluster more than '
'once.')
except Exception as e:
if throw:
raise
if warning:
_warning(str(e))
valid = False
return valid
def _check_hierarchy_uses_cluster_before_formed(Z):
n = Z.shape[0] + 1
for i in xrange(0, n - 1):
if Z[i, 0] >= n + i or Z[i, 1] >= n + i:
return True
return False
def _check_hierarchy_uses_cluster_more_than_once(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
if (Z[i, 0] in chosen) or (Z[i, 1] in chosen) or Z[i, 0] == Z[i, 1]:
return True
chosen.add(Z[i, 0])
chosen.add(Z[i, 1])
return False
def _check_hierarchy_not_all_clusters_used(Z):
n = Z.shape[0] + 1
chosen = set([])
for i in xrange(0, n - 1):
chosen.add(int(Z[i, 0]))
chosen.add(int(Z[i, 1]))
must_chosen = set(range(0, 2 * n - 2))
return len(must_chosen.difference(chosen)) > 0
def num_obs_linkage(Z):
"""
Returns the number of original observations of the linkage matrix
passed.
Parameters
----------
Z : ndarray
The linkage matrix on which to perform the operation.
Returns
-------
n : int
The number of original observations in the linkage.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
return (Z.shape[0] + 1)
def correspond(Z, Y):
"""
Checks for correspondence between linkage and condensed distance matrices
They must have the same number of original observations for
the check to succeed.
This function is useful as a sanity check in algorithms that make
extensive use of linkage and distance matrices that must
correspond to the same set of original observations.
Parameters
----------
Z : array_like
The linkage matrix to check for correspondence.
Y : array_like
The condensed distance matrix to check for correspondence.
Returns
-------
b : bool
A boolean indicating whether the linkage matrix and distance
matrix could possibly correspond to one another.
"""
is_valid_linkage(Z, throw=True)
distance.is_valid_y(Y, throw=True)
Z = np.asarray(Z, order='c')
Y = np.asarray(Y, order='c')
return distance.num_obs_y(Y) == num_obs_linkage(Z)
def fcluster(Z, t, criterion='inconsistent', depth=2, R=None, monocrit=None):
"""
Forms flat clusters from the hierarchical clustering defined by
the linkage matrix ``Z``.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded with the matrix returned
by the `linkage` function.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
The criterion to use in forming flat clusters. This can
be any of the following values:
``inconsistent`` : If a cluster node and all its
descendants have an inconsistent value less than or equal
to `t` then all its leaf descendants belong to the
same flat cluster. When no non-singleton cluster meets
this criterion, every node is assigned to its own
cluster. (Default)
``distance`` : Forms flat clusters so that the original
observations in each flat cluster have no greater a
cophenetic distance than `t`.
``maxclust`` : Finds a minimum threshold ``r`` so that
the cophenetic distance between any two original
observations in the same flat cluster is no more than
``r`` and no more than `t` flat clusters are formed.
``monocrit`` : Forms a flat cluster from a cluster node c
with index i when ``monocrit[j] <= t``.
For example, to threshold on the maximum mean distance
as computed in the inconsistency matrix R with a
threshold of 0.8 do:
MR = maxRstat(Z, R, 3)
cluster(Z, t=0.8, criterion='monocrit', monocrit=MR)
``maxclust_monocrit`` : Forms a flat cluster from a
non-singleton cluster node ``c`` when ``monocrit[i] <=
r`` for all cluster indices ``i`` below and including
``c``. ``r`` is minimized such that no more than ``t``
flat clusters are formed. monocrit must be
monotonic. For example, to minimize the threshold t on
maximum inconsistency values so that no more than 3 flat
clusters are formed, do:
MI = maxinconsts(Z, R)
cluster(Z, t=3, criterion='maxclust_monocrit', monocrit=MI)
depth : int, optional
The maximum depth to perform the inconsistency calculation.
It has no meaning for the other criteria. Default is 2.
R : ndarray, optional
The inconsistency matrix to use for the 'inconsistent'
criterion. This matrix is computed if not provided.
monocrit : ndarray, optional
An array of length n-1. `monocrit[i]` is the
statistics upon which non-singleton i is thresholded. The
monocrit vector must be monotonic, i.e. given a node c with
index i, for all node indices j corresponding to nodes
below c, `monocrit[i] >= monocrit[j]`.
Returns
-------
fcluster : ndarray
An array of length n. T[i] is the flat cluster number to
which original observation i belongs.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
T = np.zeros((n,), dtype='i')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[Z] = _copy_arrays_if_base_present([Z])
if criterion == 'inconsistent':
if R is None:
R = inconsistent(Z, depth)
else:
R = np.asarray(R, order='c')
is_valid_im(R, throw=True, name='R')
# Since the C code does not support striding using strides.
# The dimensions are used instead.
[R] = _copy_arrays_if_base_present([R])
_hierarchy.cluster_in(Z, R, T, float(t), int(n))
elif criterion == 'distance':
_hierarchy.cluster_dist(Z, T, float(t), int(n))
elif criterion == 'maxclust':
_hierarchy.cluster_maxclust_dist(Z, T, int(n), int(t))
elif criterion == 'monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_monocrit(Z, monocrit, T, float(t), int(n))
elif criterion == 'maxclust_monocrit':
[monocrit] = _copy_arrays_if_base_present([monocrit])
_hierarchy.cluster_maxclust_monocrit(Z, monocrit, T, int(n), int(t))
else:
raise ValueError('Invalid cluster formation criterion: %s'
% str(criterion))
return T
def fclusterdata(X, t, criterion='inconsistent',
metric='euclidean', depth=2, method='single', R=None):
"""
Cluster observation data using a given metric.
Clusters the original observations in the n-by-m data
matrix X (n observations in m dimensions), using the euclidean
distance metric to calculate distances between original observations,
performs hierarchical clustering using the single linkage algorithm,
and forms flat clusters using the inconsistency method with `t` as the
cut-off threshold.
A one-dimensional array T of length n is returned. T[i] is the index
of the flat cluster to which the original observation i belongs.
Parameters
----------
X : (N, M) ndarray
N by M data matrix with N observations in M dimensions.
t : float
The threshold to apply when forming flat clusters.
criterion : str, optional
Specifies the criterion for forming flat clusters. Valid
values are 'inconsistent' (default), 'distance', or 'maxclust'
cluster formation algorithms. See `fcluster` for descriptions.
metric : str, optional
The distance metric for calculating pairwise distances. See
`distance.pdist` for descriptions and linkage to verify
compatibility with the linkage method.
depth : int, optional
The maximum depth for the inconsistency calculation. See
`inconsistent` for more information.
method : str, optional
The linkage method to use (single, complete, average,
weighted, median centroid, ward). See `linkage` for more
information. Default is "single".
R : ndarray, optional
The inconsistency matrix. It will be computed if necessary
if it is not passed.
Returns
-------
fclusterdata : ndarray
A vector of length n. T[i] is the flat cluster number to
which original observation i belongs.
Notes
-----
This function is similar to the MATLAB function clusterdata.
"""
X = np.asarray(X, order='c', dtype=np.double)
if type(X) != np.ndarray or len(X.shape) != 2:
raise TypeError('The observation matrix X must be an n by m numpy '
'array.')
Y = distance.pdist(X, metric=metric)
Z = linkage(Y, method=method)
if R is None:
R = inconsistent(Z, d=depth)
else:
R = np.asarray(R, order='c')
T = fcluster(Z, criterion=criterion, depth=depth, R=R, t=t)
return T
def leaves_list(Z):
"""
Returns a list of leaf node ids
The return corresponds to the observation vector index as it appears
in the tree from left to right. Z is a linkage matrix.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. `Z` is
a linkage matrix. See ``linkage`` for more information.
Returns
-------
leaves_list : ndarray
The list of leaf node ids.
"""
Z = np.asarray(Z, order='c')
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
ML = np.zeros((n,), dtype='i')
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.prelist(Z, ML, int(n))
return ML
# Maps number of leaves to text size.
#
# p <= 20, size="12"
# 20 < p <= 30, size="10"
# 30 < p <= 50, size="8"
# 50 < p <= np.inf, size="6"
_dtextsizes = {20: 12, 30: 10, 50: 8, 85: 6, np.inf: 5}
_drotation = {20: 0, 40: 45, np.inf: 90}
_dtextsortedkeys = list(_dtextsizes.keys())
_dtextsortedkeys.sort()
_drotationsortedkeys = list(_drotation.keys())
_drotationsortedkeys.sort()
def _remove_dups(L):
"""
Removes duplicates AND preserves the original order of the elements.
The set class is not guaranteed to do this.
"""
seen_before = set([])
L2 = []
for i in L:
if i not in seen_before:
seen_before.add(i)
L2.append(i)
return L2
def _get_tick_text_size(p):
for k in _dtextsortedkeys:
if p <= k:
return _dtextsizes[k]
def _get_tick_rotation(p):
for k in _drotationsortedkeys:
if p <= k:
return _drotation[k]
def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=None,
leaf_rotation=None, contraction_marks=None,
ax=None):
# Import matplotlib here so that it's not imported unless dendrograms
# are plotted. Raise an informative error if importing fails.
try:
# if an axis is provided, don't use pylab at all
if ax is None:
import matplotlib.pylab
import matplotlib.patches
import matplotlib.collections
except ImportError:
raise ImportError("You must install the matplotlib library to plot the dendrogram. Use no_plot=True to calculate the dendrogram without plotting.")
if ax is None:
ax = matplotlib.pylab.gca()
# if we're using pylab, we want to trigger a draw at the end
trigger_redraw = True
else:
trigger_redraw = False
# Independent variable plot width
ivw = len(ivl) * 10
# Depenendent variable plot height
dvw = mh + mh * 0.05
ivticks = np.arange(5, len(ivl) * 10 + 5, 10)
if orientation == 'top':
ax.set_ylim([0, dvw])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
ax.xaxis.set_ticks_position('bottom')
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(len(ivl)))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'bottom':
ax.set_ylim([dvw, 0])
ax.set_xlim([0, ivw])
xlines = icoords
ylines = dcoords
if no_labels:
ax.set_xticks([])
ax.set_xticklabels([])
else:
ax.set_xticks(ivticks)
ax.set_xticklabels(ivl)
lbls = ax.get_xticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
else:
leaf_rot = float(_get_tick_rotation(p))
map(lambda lbl: lbl.set_rotation(leaf_rot), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
else:
leaf_fs = float(_get_tick_text_size(p))
map(lambda lbl: lbl.set_rotation(leaf_fs), lbls)
ax.xaxis.set_ticks_position('top')
# Make the tick marks invisible because they cover up the links
for line in ax.get_xticklines():
line.set_visible(False)
elif orientation == 'left':
ax.set_xlim([0, dvw])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('left')
# Make the tick marks invisible because they cover up the
# links
for line in ax.get_yticklines():
line.set_visible(False)
elif orientation == 'right':
ax.set_xlim([dvw, 0])
ax.set_ylim([0, ivw])
xlines = dcoords
ylines = icoords
if no_labels:
ax.set_yticks([])
ax.set_yticklabels([])
else:
ax.set_yticks(ivticks)
ax.set_yticklabels(ivl)
lbls = ax.get_yticklabels()
if leaf_rotation:
map(lambda lbl: lbl.set_rotation(leaf_rotation), lbls)
if leaf_font_size:
map(lambda lbl: lbl.set_size(leaf_font_size), lbls)
ax.yaxis.set_ticks_position('right')
# Make the tick marks invisible because they cover up the links
for line in ax.get_yticklines():
line.set_visible(False)
# Let's use collections instead. This way there is a separate legend
# item for each tree grouping, rather than stupidly one for each line
# segment.
colors_used = _remove_dups(color_list)
color_to_lines = {}
for color in colors_used:
color_to_lines[color] = []
for (xline, yline, color) in zip(xlines, ylines, color_list):
color_to_lines[color].append(list(zip(xline, yline)))
colors_to_collections = {}
# Construct the collections.
for color in colors_used:
coll = matplotlib.collections.LineCollection(color_to_lines[color],
colors=(color,))
colors_to_collections[color] = coll
# Add all the non-blue link groupings, i.e. those groupings below the
# color threshold.
for color in colors_used:
if color != 'b':
ax.add_collection(colors_to_collections[color])
# If there is a blue grouping (i.e., links above the color threshold),
# it should go last.
if 'b' in colors_to_collections:
ax.add_collection(colors_to_collections['b'])
if contraction_marks is not None:
if orientation in ('left', 'right'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((y, x),
width=dvw / 100, height=1.0)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if orientation in ('top', 'bottom'):
for (x, y) in contraction_marks:
e = matplotlib.patches.Ellipse((x, y),
width=1.0, height=dvw / 100)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
e.set_facecolor('k')
if trigger_redraw:
matplotlib.pylab.draw_if_interactive()
_link_line_colors = ['g', 'r', 'c', 'm', 'y', 'k']
def set_link_color_palette(palette):
"""
Set list of matplotlib color codes for dendrogram color_threshold.
Parameters
----------
palette : list
A list of matplotlib color codes. The order of
the color codes is the order in which the colors are cycled
through when color thresholding in the dendrogram.
"""
if type(palette) not in (list, tuple):
raise TypeError("palette must be a list or tuple")
_ptypes = [isinstance(p, string_types) for p in palette]
if False in _ptypes:
raise TypeError("all palette list elements must be color strings")
for i in list(_link_line_colors):
_link_line_colors.remove(i)
_link_line_colors.extend(list(palette))
def dendrogram(Z, p=30, truncate_mode=None, color_threshold=None,
get_leaves=True, orientation='top', labels=None,
count_sort=False, distance_sort=False, show_leaf_counts=True,
no_plot=False, no_labels=False, color_list=None,
leaf_font_size=None, leaf_rotation=None, leaf_label_func=None,
no_leaves=False, show_contracted=False,
link_color_func=None, ax=None):
"""
Plots the hierarchical clustering as a dendrogram.
The dendrogram illustrates how each cluster is
composed by drawing a U-shaped link between a non-singleton
cluster and its children. The height of the top of the U-link is
the distance between its children clusters. It is also the
cophenetic distance between original observations in the two
children clusters. It is expected that the distances in Z[:,2] be
monotonic, otherwise crossings appear in the dendrogram.
Parameters
----------
Z : ndarray
The linkage matrix encoding the hierarchical clustering to
render as a dendrogram. See the ``linkage`` function for more
information on the format of ``Z``.
p : int, optional
The ``p`` parameter for ``truncate_mode``.
truncate_mode : str, optional
The dendrogram can be hard to read when the original
observation matrix from which the linkage is derived is
large. Truncation is used to condense the dendrogram. There
are several modes:
``None/'none'``
No truncation is performed (Default).
``'lastp'``
The last ``p`` non-singleton formed in the linkage are the only
non-leaf nodes in the linkage; they correspond to rows
``Z[n-p-2:end]`` in ``Z``. All other non-singleton clusters are
contracted into leaf nodes.
``'mlab'``
This corresponds to MATLAB(TM) behavior. (not implemented yet)
``'level'/'mtica'``
No more than ``p`` levels of the dendrogram tree are displayed.
This corresponds to Mathematica(TM) behavior.
color_threshold : double, optional
For brevity, let :math:`t` be the ``color_threshold``.
Colors all the descendent links below a cluster node
:math:`k` the same color if :math:`k` is the first node below
the cut threshold :math:`t`. All links connecting nodes with
distances greater than or equal to the threshold are colored
blue. If :math:`t` is less than or equal to zero, all nodes
are colored blue. If ``color_threshold`` is None or
'default', corresponding with MATLAB(TM) behavior, the
threshold is set to ``0.7*max(Z[:,2])``.
get_leaves : bool, optional
Includes a list ``R['leaves']=H`` in the result
dictionary. For each :math:`i`, ``H[i] == j``, cluster node
``j`` appears in position ``i`` in the left-to-right traversal
of the leaves, where :math:`j < 2n-1` and :math:`i < n`.
orientation : str, optional
The direction to plot the dendrogram, which can be any
of the following strings:
``'top'``
Plots the root at the top, and plot descendent links going downwards.
(default).
``'bottom'``
Plots the root at the bottom, and plot descendent links going
upwards.
``'left'``
Plots the root at the left, and plot descendent links going right.
``'right'``
Plots the root at the right, and plot descendent links going left.
labels : ndarray, optional
By default ``labels`` is None so the index of the original observation
is used to label the leaf nodes. Otherwise, this is an :math:`n`
-sized list (or tuple). The ``labels[i]`` value is the text to put
under the :math:`i` th leaf node only if it corresponds to an original
observation and not a non-singleton cluster.
count_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum number of original objects in its cluster
is plotted first.
``'descendent'``
The child with the maximum number of original objects in its cluster
is plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
distance_sort : str or bool, optional
For each node n, the order (visually, from left-to-right) n's
two descendent links are plotted is determined by this
parameter, which can be any of the following values:
``False``
Nothing is done.
``'ascending'`` or ``True``
The child with the minimum distance between its direct descendents is
plotted first.
``'descending'``
The child with the maximum distance between its direct descendents is
plotted first.
Note ``distance_sort`` and ``count_sort`` cannot both be True.
show_leaf_counts : bool, optional
When True, leaf nodes representing :math:`k>1` original
observation are labeled with the number of observations they
contain in parentheses.
no_plot : bool, optional
When True, the final rendering is not performed. This is
useful if only the data structures computed for the rendering
are needed or if matplotlib is not available.
no_labels : bool, optional
When True, no labels appear next to the leaf nodes in the
rendering of the dendrogram.
leaf_rotation : double, optional
Specifies the angle (in degrees) to rotate the leaf
labels. When unspecified, the rotation is based on the number of
nodes in the dendrogram (default is 0).
leaf_font_size : int, optional
Specifies the font size (in points) of the leaf labels. When
unspecified, the size based on the number of nodes in the
dendrogram.
leaf_label_func : lambda or function, optional
When leaf_label_func is a callable function, for each
leaf with cluster index :math:`k < 2n-1`. The function
is expected to return a string with the label for the
leaf.
Indices :math:`k < n` correspond to original observations
while indices :math:`k \\geq n` correspond to non-singleton
clusters.
For example, to label singletons with their node id and
non-singletons with their id, count, and inconsistency
coefficient, simply do:
>>> # First define the leaf label function.
>>> def llf(id):
... if id < n:
... return str(id)
... else:
>>> return '[%d %d %1.2f]' % (id, count, R[n-id,3])
>>>
>>> # The text for the leaf nodes is going to be big so force
>>> # a rotation of 90 degrees.
>>> dendrogram(Z, leaf_label_func=llf, leaf_rotation=90)
show_contracted : bool, optional
When True the heights of non-singleton nodes contracted
into a leaf node are plotted as crosses along the link
connecting that leaf node. This really is only useful when
truncation is used (see ``truncate_mode`` parameter).
link_color_func : callable, optional
If given, `link_color_function` is called with each non-singleton id
corresponding to each U-shaped link it will paint. The function is
expected to return the color to paint the link, encoded as a matplotlib
color string code. For example:
>>> dendrogram(Z, link_color_func=lambda k: colors[k])
colors the direct links below each untruncated non-singleton node
``k`` using ``colors[k]``.
ax : matplotlib Axes instance, optional
If None and `no_plot` is not True, the dendrogram will be plotted
on the current axes. Otherwise if `no_plot` is not True the
dendrogram will be plotted on the given ``Axes`` instance. This can be
useful if the dendrogram is part of a more complex figure.
Returns
-------
R : dict
A dictionary of data structures computed to render the
dendrogram. Its has the following keys:
``'color_list'``
A list of color names. The k'th element represents the color of the
k'th link.
``'icoord'`` and ``'dcoord'``
Each of them is a list of lists. Let ``icoord = [I1, I2, ..., Ip]``
where ``Ik = [xk1, xk2, xk3, xk4]`` and ``dcoord = [D1, D2, ..., Dp]``
where ``Dk = [yk1, yk2, yk3, yk4]``, then the k'th link painted is
``(xk1, yk1)`` - ``(xk2, yk2)`` - ``(xk3, yk3)`` - ``(xk4, yk4)``.
``'ivl'``
A list of labels corresponding to the leaf nodes.
``'leaves'``
For each i, ``H[i] == j``, cluster node ``j`` appears in position
``i`` in the left-to-right traversal of the leaves, where
:math:`j < 2n-1` and :math:`i < n`. If ``j`` is less than ``n``, the
``i``-th leaf node corresponds to an original observation.
Otherwise, it corresponds to a non-singleton cluster.
"""
# Features under consideration.
#
# ... = dendrogram(..., leaves_order=None)
#
# Plots the leaves in the order specified by a vector of
# original observation indices. If the vector contains duplicates
# or results in a crossing, an exception will be thrown. Passing
# None orders leaf nodes based on the order they appear in the
# pre-order traversal.
Z = np.asarray(Z, order='c')
if orientation not in ["top", "left", "bottom", "right"]:
raise ValueError("orientation must be one of 'top', 'left', "
"'bottom', or 'right'")
is_valid_linkage(Z, throw=True, name='Z')
Zs = Z.shape
n = Zs[0] + 1
if type(p) in (int, float):
p = int(p)
else:
raise TypeError('The second argument must be a number')
if truncate_mode not in ('lastp', 'mlab', 'mtica', 'level', 'none', None):
raise ValueError('Invalid truncation mode.')
if truncate_mode == 'lastp' or truncate_mode == 'mlab':
if p > n or p == 0:
p = n
if truncate_mode == 'mtica' or truncate_mode == 'level':
if p <= 0:
p = np.inf
if get_leaves:
lvs = []
else:
lvs = None
icoord_list = []
dcoord_list = []
color_list = []
current_color = [0]
currently_below_threshold = [False]
if no_leaves:
ivl = None
else:
ivl = []
if color_threshold is None or \
(isinstance(color_threshold, string_types) and
color_threshold == 'default'):
color_threshold = max(Z[:, 2]) * 0.7
R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl,
'leaves': lvs, 'color_list': color_list}
if show_contracted:
contraction_marks = []
else:
contraction_marks = None
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=2 * n - 2, iv=0.0, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
contraction_marks=contraction_marks,
link_color_func=link_color_func)
if not no_plot:
mh = max(Z[:, 2])
_plot_dendrogram(icoord_list, dcoord_list, ivl, p, n, mh, orientation,
no_labels, color_list, leaf_font_size=leaf_font_size,
leaf_rotation=leaf_rotation,
contraction_marks=contraction_marks,
ax=ax)
return R
def _append_singleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
# If leaf node labels are to be displayed...
if ivl is not None:
# If a leaf_label_func has been provided, the label comes from the
# string returned from the leaf_label_func, which is a function
# passed to dendrogram.
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
# Otherwise, if the dendrogram caller has passed a labels list
# for the leaf nodes, use it.
if labels is not None:
ivl.append(labels[int(i - n)])
else:
# Otherwise, use the id as the label for the leaf.x
ivl.append(str(int(i)))
def _append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl, leaf_label_func,
i, labels, show_leaf_counts):
# If the leaf id structure is not None and is a list then the caller
# to dendrogram has indicated that cluster id's corresponding to the
# leaf nodes should be recorded.
if lvs is not None:
lvs.append(int(i))
if ivl is not None:
if leaf_label_func:
ivl.append(leaf_label_func(int(i)))
else:
if show_leaf_counts:
ivl.append("(" + str(int(Z[i - n, 3])) + ")")
else:
ivl.append("")
def _append_contraction_marks(Z, iv, i, n, contraction_marks):
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _append_contraction_marks_sub(Z, iv, i, n, contraction_marks):
if i >= n:
contraction_marks.append((iv, Z[i - n, 2]))
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 0]), n, contraction_marks)
_append_contraction_marks_sub(Z, iv, int(Z[i - n, 1]), n, contraction_marks)
def _dendrogram_calculate_info(Z, p, truncate_mode,
color_threshold=np.inf, get_leaves=True,
orientation='top', labels=None,
count_sort=False, distance_sort=False,
show_leaf_counts=False, i=-1, iv=0.0,
ivl=[], n=0, icoord_list=[], dcoord_list=[],
lvs=None, mhr=False,
current_color=[], color_list=[],
currently_below_threshold=[],
leaf_label_func=None, level=0,
contraction_marks=None,
link_color_func=None):
"""
Calculates the endpoints of the links as well as the labels for the
the dendrogram rooted at the node with index i. iv is the independent
variable value to plot the left-most leaf node below the root node i
(if orientation='top', this would be the left-most x value where the
plotting of this root node i and its descendents should begin).
ivl is a list to store the labels of the leaf nodes. The leaf_label_func
is called whenever ivl != None, labels == None, and
leaf_label_func != None. When ivl != None and labels != None, the
labels list is used only for labeling the leaf nodes. When
ivl == None, no labels are generated for leaf nodes.
When get_leaves==True, a list of leaves is built as they are visited
in the dendrogram.
Returns a tuple with l being the independent variable coordinate that
corresponds to the midpoint of cluster to the left of cluster i if
i is non-singleton, otherwise the independent coordinate of the leaf
node if i is a leaf node.
Returns
-------
A tuple (left, w, h, md), where:
* left is the independent variable coordinate of the center of the
the U of the subtree
* w is the amount of space used for the subtree (in independent
variable units)
* h is the height of the subtree in dependent variable units
* md is the max(Z[*,2]) for all nodes * below and including
the target node.
"""
if n == 0:
raise ValueError("Invalid singleton cluster count n.")
if i == -1:
raise ValueError("Invalid root cluster index i.")
if truncate_mode == 'lastp':
# If the node is a leaf node but corresponds to a non-single cluster,
# it's label is either the empty string or the number of original
# observations belonging to cluster i.
if i < 2 * n - p and i >= n:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mtica', 'level'):
if i > n and level > p:
d = Z[i - n, 2]
_append_nonsingleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels,
show_leaf_counts)
if contraction_marks is not None:
_append_contraction_marks(Z, iv + 5.0, i, n, contraction_marks)
return (iv + 5.0, 10.0, 0.0, d)
elif i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
elif truncate_mode in ('mlab',):
pass
# Otherwise, only truncate if we have a leaf node.
#
# If the truncate_mode is mlab, the linkage has been modified
# with the truncated tree.
#
# Only place leaves if they correspond to original observations.
if i < n:
_append_singleton_leaf_node(Z, p, n, level, lvs, ivl,
leaf_label_func, i, labels)
return (iv + 5.0, 10.0, 0.0, 0.0)
# !!! Otherwise, we don't have a leaf node, so work on plotting a
# non-leaf node.
# Actual indices of a and b
aa = int(Z[i - n, 0])
ab = int(Z[i - n, 1])
if aa > n:
# The number of singletons below cluster a
na = Z[aa - n, 3]
# The distance between a's two direct children.
da = Z[aa - n, 2]
else:
na = 1
da = 0.0
if ab > n:
nb = Z[ab - n, 3]
db = Z[ab - n, 2]
else:
nb = 1
db = 0.0
if count_sort == 'ascending' or count_sort == True:
# If a has a count greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if na > nb:
# The cluster index to draw to the left (ua) will be ab
# and the one to draw to the right (ub) will be aa
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif count_sort == 'descending':
# If a has a count less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if na > nb:
ua = aa
ub = ab
else:
ua = ab
ub = aa
elif distance_sort == 'ascending' or distance_sort == True:
# If a has a distance greater than b, it and its descendents should
# be drawn to the right. Otherwise, to the left.
if da > db:
ua = ab
ub = aa
else:
ua = aa
ub = ab
elif distance_sort == 'descending':
# If a has a distance less than or equal to b, it and its
# descendents should be drawn to the left. Otherwise, to
# the right.
if da > db:
ua = aa
ub = ab
else:
ua = ab
ub = aa
else:
ua = aa
ub = ab
# Updated iv variable and the amount of space used.
(uiva, uwa, uah, uamd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ua, iv=iv, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
h = Z[i - n, 2]
if h >= color_threshold or color_threshold <= 0:
c = 'b'
if currently_below_threshold[0]:
current_color[0] = (current_color[0] + 1) % len(_link_line_colors)
currently_below_threshold[0] = False
else:
currently_below_threshold[0] = True
c = _link_line_colors[current_color[0]]
(uivb, uwb, ubh, ubmd) = \
_dendrogram_calculate_info(
Z=Z, p=p,
truncate_mode=truncate_mode,
color_threshold=color_threshold,
get_leaves=get_leaves,
orientation=orientation,
labels=labels,
count_sort=count_sort,
distance_sort=distance_sort,
show_leaf_counts=show_leaf_counts,
i=ub, iv=iv + uwa, ivl=ivl, n=n,
icoord_list=icoord_list,
dcoord_list=dcoord_list, lvs=lvs,
current_color=current_color,
color_list=color_list,
currently_below_threshold=currently_below_threshold,
leaf_label_func=leaf_label_func,
level=level + 1, contraction_marks=contraction_marks,
link_color_func=link_color_func)
max_dist = max(uamd, ubmd, h)
icoord_list.append([uiva, uiva, uivb, uivb])
dcoord_list.append([uah, h, h, ubh])
if link_color_func is not None:
v = link_color_func(int(i))
if not isinstance(v, string_types):
raise TypeError("link_color_func must return a matplotlib "
"color string!")
color_list.append(v)
else:
color_list.append(c)
return (((uiva + uivb) / 2), uwa + uwb, h, max_dist)
def is_isomorphic(T1, T2):
"""
Determines if two different cluster assignments are equivalent.
Parameters
----------
T1 : array_like
An assignment of singleton cluster ids to flat cluster ids.
T2 : array_like
An assignment of singleton cluster ids to flat cluster ids.
Returns
-------
b : bool
Whether the flat cluster assignments `T1` and `T2` are
equivalent.
"""
T1 = np.asarray(T1, order='c')
T2 = np.asarray(T2, order='c')
if type(T1) != np.ndarray:
raise TypeError('T1 must be a numpy array.')
if type(T2) != np.ndarray:
raise TypeError('T2 must be a numpy array.')
T1S = T1.shape
T2S = T2.shape
if len(T1S) != 1:
raise ValueError('T1 must be one-dimensional.')
if len(T2S) != 1:
raise ValueError('T2 must be one-dimensional.')
if T1S[0] != T2S[0]:
raise ValueError('T1 and T2 must have the same number of elements.')
n = T1S[0]
d = {}
for i in xrange(0, n):
if T1[i] in d:
if d[T1[i]] != T2[i]:
return False
else:
d[T1[i]] = T2[i]
return True
def maxdists(Z):
"""
Returns the maximum distance between any non-singleton cluster.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
Returns
-------
maxdists : ndarray
A ``(n-1)`` sized numpy array of doubles; ``MD[i]`` represents
the maximum distance between any cluster (including
singletons) below and including the node with index i. More
specifically, ``MD[i] = Z[Q(i)-n, 2].max()`` where ``Q(i)`` is the
set of all node indices below and including node i.
"""
Z = np.asarray(Z, order='c', dtype=np.double)
is_valid_linkage(Z, throw=True, name='Z')
n = Z.shape[0] + 1
MD = np.zeros((n - 1,))
[Z] = _copy_arrays_if_base_present([Z])
_hierarchy.get_max_dist_for_each_cluster(Z, MD, int(n))
return MD
def maxinconsts(Z, R):
"""
Returns the maximum inconsistency coefficient for each
non-singleton cluster and its descendents.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : ndarray
The inconsistency matrix.
Returns
-------
MI : ndarray
A monotonic ``(n-1)``-sized numpy array of doubles.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
n = Z.shape[0] + 1
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
MI = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MI, int(n), 3)
return MI
def maxRstat(Z, R, i):
"""
Returns the maximum statistic for each non-singleton cluster and
its descendents.
Parameters
----------
Z : array_like
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
R : array_like
The inconsistency matrix.
i : int
The column of `R` to use as the statistic.
Returns
-------
MR : ndarray
Calculates the maximum statistic for the i'th column of the
inconsistency matrix `R` for each non-singleton cluster
node. ``MR[j]`` is the maximum over ``R[Q(j)-n, i]`` where
``Q(j)`` the set of all node ids corresponding to nodes below
and including ``j``.
"""
Z = np.asarray(Z, order='c')
R = np.asarray(R, order='c')
is_valid_linkage(Z, throw=True, name='Z')
is_valid_im(R, throw=True, name='R')
if type(i) is not int:
raise TypeError('The third argument must be an integer.')
if i < 0 or i > 3:
raise ValueError('i must be an integer between 0 and 3 inclusive.')
if Z.shape[0] != R.shape[0]:
raise ValueError("The inconsistency matrix and linkage matrix each "
"have a different number of rows.")
n = Z.shape[0] + 1
MR = np.zeros((n - 1,))
[Z, R] = _copy_arrays_if_base_present([Z, R])
_hierarchy.get_max_Rfield_for_each_cluster(Z, R, MR, int(n), i)
return MR
def leaders(Z, T):
"""
Returns the root nodes in a hierarchical clustering.
Returns the root nodes in a hierarchical clustering corresponding
to a cut defined by a flat cluster assignment vector ``T``. See
the ``fcluster`` function for more information on the format of ``T``.
For each flat cluster :math:`j` of the :math:`k` flat clusters
represented in the n-sized flat cluster assignment vector ``T``,
this function finds the lowest cluster node :math:`i` in the linkage
tree Z such that:
* leaf descendents belong only to flat cluster j
(i.e. ``T[p]==j`` for all :math:`p` in :math:`S(i)` where
:math:`S(i)` is the set of leaf ids of leaf nodes descendent
with cluster node :math:`i`)
* there does not exist a leaf that is not descendent with
:math:`i` that also belongs to cluster :math:`j`
(i.e. ``T[q]!=j`` for all :math:`q` not in :math:`S(i)`). If
this condition is violated, ``T`` is not a valid cluster
assignment vector, and an exception will be thrown.
Parameters
----------
Z : ndarray
The hierarchical clustering encoded as a matrix. See
``linkage`` for more information.
T : ndarray
The flat cluster assignment vector.
Returns
-------
L : ndarray
The leader linkage node id's stored as a k-element 1-D array
where ``k`` is the number of flat clusters found in ``T``.
``L[j]=i`` is the linkage cluster node id that is the
leader of flat cluster with id M[j]. If ``i < n``, ``i``
corresponds to an original observation, otherwise it
corresponds to a non-singleton cluster.
For example: if ``L[3]=2`` and ``M[3]=8``, the flat cluster with
id 8's leader is linkage node 2.
M : ndarray
The leader linkage node id's stored as a k-element 1-D array where
``k`` is the number of flat clusters found in ``T``. This allows the
set of flat cluster ids to be any arbitrary set of ``k`` integers.
"""
Z = np.asarray(Z, order='c')
T = np.asarray(T, order='c')
if type(T) != np.ndarray or T.dtype != 'i':
raise TypeError('T must be a one-dimensional numpy array of integers.')
is_valid_linkage(Z, throw=True, name='Z')
if len(T) != Z.shape[0] + 1:
raise ValueError('Mismatch: len(T)!=Z.shape[0] + 1.')
Cl = np.unique(T)
kk = len(Cl)
L = np.zeros((kk,), dtype='i')
M = np.zeros((kk,), dtype='i')
n = Z.shape[0] + 1
[Z, T] = _copy_arrays_if_base_present([Z, T])
s = _hierarchy.leaders(Z, T, L, M, int(kk), int(n))
if s >= 0:
raise ValueError(('T is not a valid assignment vector. Error found '
'when examining linkage node %d (< 2n-1).') % s)
return (L, M)
# These are test functions to help me test the leaders function.
def _leaders_test(Z, T):
tr = to_tree(Z)
_leaders_test_recurs_mark(tr, T)
return tr
def _leader_identify(tr, T):
if tr.is_leaf():
return T[tr.id]
else:
left = tr.get_left()
right = tr.get_right()
lfid = _leader_identify(left, T)
rfid = _leader_identify(right, T)
print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d'
% (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid))
if lfid != rfid:
if lfid != -1:
print('leader: %d with tag %d' % (left.id, lfid))
if rfid != -1:
print('leader: %d with tag %d' % (right.id, rfid))
return -1
else:
return lfid
def _leaders_test_recurs_mark(tr, T):
if tr.is_leaf():
tr.asgn = T[tr.id]
else:
tr.asgn = -1
_leaders_test_recurs_mark(tr.left, T)
_leaders_test_recurs_mark(tr.right, T)
| mit |
HKuz/Test_Code | pythonCheatsheet.py | 1 | 6673 | #!/usr/local/bin/python
# Python Numpy and Pandas Cheatsheet
# Source: https://elitedatascience.com/python-cheat-sheet
# Importing Data
'''
pd.read_csv(filename) # From a CSV file
pd.read_table(filename) # From a delimited text file (like TSV)
pd.read_excel(filename) # From an Excel file
pd.read_sql(query, connection_object) # Reads from a SQL table/database
pd.read_json(json_string) # Reads from a JSON formatted string, URL or file.
pd.read_html(url) # Parses an html URL, string or file and extracts tables to
# a list of dataframes
pd.read_clipboard() # Takes the contents of your clipboard and
# passes it to read_table()
pd.DataFrame(dict) # From a dict, keys for columns names, values for
# data as lists
'''
# Exploring Data
'''
df.shape() # Prints number of rows and columns in dataframe
df.head(n) # Prints first n rows of the DataFrame
df.tail(n) # Prints last n rows of the DataFrame
df.info() # Index, Datatype and Memory information
df.describe() # Summary statistics for numerical columns
s.value_counts(dropna=False) # Views unique values and counts
df.apply(pd.Series.value_counts) # Unique values and counts for all columns
df.describe() # Summary statistics for numerical columns
df.mean() # Returns the mean of all columns
df.corr() # Returns the correlation between columns in a DataFrame
df.count() # Returns the number of non-null values in each DataFrame column
df.max() # Returns the highest value in each column
df.min() # Returns the lowest value in each column
df.median() # Returns the median of each column
df.std() # Returns the standard deviation of each column
'''
# Selecting
'''
df[col] # Returns column with label col as Series
df[[col1, col2]] # Returns Columns as a new DataFrame
s.iloc[0] # Selection by position (selects first element)
s.loc[0] # Selection by index (selects element at index 0)
df.iloc[0,:] # First row
df.iloc[0,0] # First element of first column
'''
# Data Cleaning
'''
df.columns = ['a','b','c'] # Renames columns
pd.isnull() # Checks for null Values, Returns Boolean Array
pd.notnull() # Opposite of s.isnull()
df.dropna() # Drops all rows that contain null values
df.dropna(axis=1) # Drops all columns that contain null values
df.dropna(axis=1,thresh=n) # Drops all rows have have less than n non null
# values
df.fillna(x) # Replaces all null values with x
s.fillna(s.mean()) # Replaces all null values with the mean (mean can be
# replaced with almost any function from the statistics section)
s.astype(float) # Converts the datatype of the series to float
s.replace(1,'one') # Replaces all values equal to 1 with 'one'
s.replace([1,3],['one','three']) # Replaces all 1 with 'one' and 3
# with 'three'
df.rename(columns=lambda x: x + 1) # Mass renaming of columns
df.rename(columns={'old_name': 'new_ name'}) # Selective renaming
df.set_index('column_one') # Changes the index
df.rename(index=lambda x: x + 1) # Mass renaming of index
'''
# Filter, Sort, and Group By
'''
df[df[col] > 0.5] # Rows where the col column is greater than 0.5
df[(df[col] > 0.5) & (df[col] < 0.7)] # Rows where 0.5 < col < 0.7
df.sort_values(col1) # Sorts values by col1 in ascending order
df.sort_values(col2,ascending=False) # Sorts values by col2 in descending
# order
df.sort_values([col1,col2], ascending=[True,False]) # Sorts values by col1 in
# ascending order then col2 in descending order
df.groupby(col) # Returns a groupby object for values from one column
df.groupby([col1,col2]) # Returns a groupby object values from multiple
# columns
df.groupby(col1)[col2].mean() # Returns the mean of the values in col2,
# grouped by the values in col1 (mean can be replaced with almost any function
# from the statistics section)
df.pivot_table(index=col1, values= col2,col3], aggfunc=mean) # Creates a
# pivot table that groups by col1 and calculates the mean of col2 and col3
df.groupby(col1).agg(np.mean) # Finds the average across all columns for
# every unique column 1 group
df.apply(np.mean) # Applies a function across each column
df.apply(np.max, axis=1) # Applies a function across each row
'''
# Joining and Combining
'''
df1.append(df2) # Adds the rows in df1 to the end of df2 (columns should be
#identical)
pd.concat([df1, df2],axis=1) # Adds the columns in df1 to the end of df2
# (rows should be identical)
df1.join(df2,on=col1,how='inner') # SQL-style joins the columns in df1 with
# the columns on df2 where the rows for col have identical values. how can be
# one of 'left', 'right', 'outer', 'inner'
'''
# Writing Data
'''
df.to_csv(filename) # Writes to a CSV file
df.to_excel(filename) # Writes to an Excel file
df.to_sql(table_name, connection_object) # Writes to a SQL table
df.to_json(filename) # Writes to a file in JSON format
df.to_html(filename) # Saves as an HTML table
df.to_clipboard() # Writes to the clipboard
'''
# Machine Learning
# Import libraries and modules
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.externals import joblib
# Load red wine data.
dataset_url = ('http://mlr.cs.umass.edu/ml/machine-learning-databases/'
'wine-quality/winequality-red.csv')
data = pd.read_csv(dataset_url, sep=';')
# Split data into training and test sets
y = data.quality
X = data.drop('quality', axis=1)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.2,
random_state=123,
stratify=y)
# Declare data preprocessing steps
pipeline = make_pipeline(preprocessing.StandardScaler(),
RandomForestRegressor(n_estimators=100))
# Declare hyperparameters to tune
hyperparameters = {'randomforestregressor__max_features':
['auto', 'sqrt', 'log2'],
'randomforestregressor__max_depth': [None, 5, 3, 1]}
# Tune model using cross-validation pipeline
clf = GridSearchCV(pipeline, hyperparameters, cv=10)
clf.fit(X_train, y_train)
# Refit on the entire training set
# No additional code needed if clf.refit == True (default is True)
# Evaluate model pipeline on test data
pred = clf.predict(X_test)
print(r2_score(y_test, pred))
print(mean_squared_error(y_test, pred))
# Save model for future use
joblib.dump(clf, 'rf_regressor.pkl')
# To load: clf2 = joblib.load('rf_regressor.pkl')
| mit |
pjryan126/solid-start-careers | store/api/zillow/venv/lib/python2.7/site-packages/pandas/io/json.py | 1 | 25993 | # pylint: disable-msg=E1101,W0613,W0603
import os
import copy
from collections import defaultdict
import numpy as np
import pandas.json as _json
from pandas.tslib import iNaT
from pandas.compat import long, u
from pandas import compat, isnull
from pandas import Series, DataFrame, to_datetime
from pandas.io.common import get_filepath_or_buffer
from pandas.core.common import AbstractMethodError
import pandas.core.common as com
loads = _json.loads
dumps = _json.dumps
# interface to/from
def to_json(path_or_buf, obj, orient=None, date_format='epoch',
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None):
if isinstance(obj, Series):
s = SeriesWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
elif isinstance(obj, DataFrame):
s = FrameWriter(
obj, orient=orient, date_format=date_format,
double_precision=double_precision, ensure_ascii=force_ascii,
date_unit=date_unit, default_handler=default_handler).write()
else:
raise NotImplementedError("'obj' should be a Series or a DataFrame")
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, 'w') as fh:
fh.write(s)
elif path_or_buf is None:
return s
else:
path_or_buf.write(s)
class Writer(object):
def __init__(self, obj, orient, date_format, double_precision,
ensure_ascii, date_unit, default_handler=None):
self.obj = obj
if orient is None:
orient = self._default_orient
self.orient = orient
self.date_format = date_format
self.double_precision = double_precision
self.ensure_ascii = ensure_ascii
self.date_unit = date_unit
self.default_handler = default_handler
self.is_copy = None
self._format_axes()
def _format_axes(self):
raise AbstractMethodError(self)
def write(self):
return dumps(
self.obj,
orient=self.orient,
double_precision=self.double_precision,
ensure_ascii=self.ensure_ascii,
date_unit=self.date_unit,
iso_dates=self.date_format == 'iso',
default_handler=self.default_handler)
class SeriesWriter(Writer):
_default_orient = 'index'
def _format_axes(self):
if not self.obj.index.is_unique and self.orient == 'index':
raise ValueError("Series index must be unique for orient="
"'%s'" % self.orient)
class FrameWriter(Writer):
_default_orient = 'columns'
def _format_axes(self):
""" try to axes if they are datelike """
if not self.obj.index.is_unique and self.orient in (
'index', 'columns'):
raise ValueError("DataFrame index must be unique for orient="
"'%s'." % self.orient)
if not self.obj.columns.is_unique and self.orient in (
'index', 'columns', 'records'):
raise ValueError("DataFrame columns must be unique for orient="
"'%s'." % self.orient)
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=True,
convert_axes=True, convert_dates=True, keep_default_dates=True,
numpy=False, precise_float=False, date_unit=None):
"""
Convert a JSON string to pandas object
Parameters
----------
path_or_buf : a valid JSON string or file-like, default: None
The string could be a URL. Valid URL schemes include http, ftp, s3, and
file. For file URLs, a host is expected. For instance, a local file
could be ``file://localhost/path/to/table.json``
orient
* `Series`
- default is ``'index'``
- allowed values are: ``{'split','records','index'}``
- The Series index must be unique for orient ``'index'``.
* `DataFrame`
- default is ``'columns'``
- allowed values are: {'split','records','index','columns','values'}
- The DataFrame index must be unique for orients 'index' and
'columns'.
- The DataFrame columns must be unique for orients 'index',
'columns', and 'records'.
* The format of the JSON string
- split : dict like
``{index -> [index], columns -> [columns], data -> [values]}``
- records : list like
``[{column -> value}, ... , {column -> value}]``
- index : dict like ``{index -> {column -> value}}``
- columns : dict like ``{column -> {index -> value}}``
- values : just the values array
typ : type of object to recover (series or frame), default 'frame'
dtype : boolean or dict, default True
If True, infer dtypes, if a dict of column to dtype, then use those,
if False, then don't infer dtypes at all, applies only to the data.
convert_axes : boolean, default True
Try to convert the axes to the proper dtypes.
convert_dates : boolean, default True
List of columns to parse for dates; If True, then try to parse
datelike columns default is True; a column label is datelike if
* it ends with ``'_at'``,
* it ends with ``'_time'``,
* it begins with ``'timestamp'``,
* it is ``'modified'``, or
* it is ``'date'``
keep_default_dates : boolean, default True
If parsing dates, then parse the default datelike columns
numpy : boolean, default False
Direct decoding to numpy arrays. Supports numeric data only, but
non-numeric column and index labels are supported. Note also that the
JSON ordering MUST be the same for each term if numpy=True.
precise_float : boolean, default False
Set to enable usage of higher precision (strtod) function when
decoding string to double values. Default (False) is to use fast but
less precise builtin functionality
date_unit : string, default None
The timestamp unit to detect if converting dates. The default behaviour
is to try and detect the correct precision, but if this is not desired
then pass one of 's', 'ms', 'us' or 'ns' to force parsing only seconds,
milliseconds, microseconds or nanoseconds respectively.
Returns
-------
result : Series or DataFrame
"""
filepath_or_buffer, _, _ = get_filepath_or_buffer(path_or_buf)
if isinstance(filepath_or_buffer, compat.string_types):
try:
exists = os.path.exists(filepath_or_buffer)
# if the filepath is too long will raise here
# 5874
except (TypeError, ValueError):
exists = False
if exists:
with open(filepath_or_buffer, 'r') as fh:
json = fh.read()
else:
json = filepath_or_buffer
elif hasattr(filepath_or_buffer, 'read'):
json = filepath_or_buffer.read()
else:
json = filepath_or_buffer
obj = None
if typ == 'frame':
obj = FrameParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
if typ == 'series' or obj is None:
if not isinstance(dtype, bool):
dtype = dict(data=dtype)
obj = SeriesParser(json, orient, dtype, convert_axes, convert_dates,
keep_default_dates, numpy, precise_float,
date_unit).parse()
return obj
class Parser(object):
_STAMP_UNITS = ('s', 'ms', 'us', 'ns')
_MIN_STAMPS = {
's': long(31536000),
'ms': long(31536000000),
'us': long(31536000000000),
'ns': long(31536000000000000)}
def __init__(self, json, orient, dtype=True, convert_axes=True,
convert_dates=True, keep_default_dates=False, numpy=False,
precise_float=False, date_unit=None):
self.json = json
if orient is None:
orient = self._default_orient
self.orient = orient
self.dtype = dtype
if orient == "split":
numpy = False
if date_unit is not None:
date_unit = date_unit.lower()
if date_unit not in self._STAMP_UNITS:
raise ValueError('date_unit must be one of %s' %
(self._STAMP_UNITS,))
self.min_stamp = self._MIN_STAMPS[date_unit]
else:
self.min_stamp = self._MIN_STAMPS['s']
self.numpy = numpy
self.precise_float = precise_float
self.convert_axes = convert_axes
self.convert_dates = convert_dates
self.date_unit = date_unit
self.keep_default_dates = keep_default_dates
self.obj = None
def check_keys_split(self, decoded):
"checks that dict has only the appropriate keys for orient='split'"
bad_keys = set(decoded.keys()).difference(set(self._split_keys))
if bad_keys:
bad_keys = ", ".join(bad_keys)
raise ValueError(u("JSON data had unexpected key(s): %s") %
com.pprint_thing(bad_keys))
def parse(self):
# try numpy
numpy = self.numpy
if numpy:
self._parse_numpy()
else:
self._parse_no_numpy()
if self.obj is None:
return None
if self.convert_axes:
self._convert_axes()
self._try_convert_types()
return self.obj
def _convert_axes(self):
""" try to convert axes """
for axis in self.obj._AXIS_NUMBERS.keys():
new_axis, result = self._try_convert_data(
axis, self.obj._get_axis(axis), use_dtypes=False,
convert_dates=True)
if result:
setattr(self.obj, axis, new_axis)
def _try_convert_types(self):
raise AbstractMethodError(self)
def _try_convert_data(self, name, data, use_dtypes=True,
convert_dates=True):
""" try to parse a ndarray like into a column by inferring dtype """
# don't try to coerce, unless a force conversion
if use_dtypes:
if self.dtype is False:
return data, False
elif self.dtype is True:
pass
else:
# dtype to force
dtype = (self.dtype.get(name)
if isinstance(self.dtype, dict) else self.dtype)
if dtype is not None:
try:
dtype = np.dtype(dtype)
return data.astype(dtype), True
except:
return data, False
if convert_dates:
new_data, result = self._try_convert_to_date(data)
if result:
return new_data, True
result = False
if data.dtype == 'object':
# try float
try:
data = data.astype('float64')
result = True
except:
pass
if data.dtype.kind == 'f':
if data.dtype != 'float64':
# coerce floats to 64
try:
data = data.astype('float64')
result = True
except:
pass
# do't coerce 0-len data
if len(data) and (data.dtype == 'float' or data.dtype == 'object'):
# coerce ints if we can
try:
new_data = data.astype('int64')
if (new_data == data).all():
data = new_data
result = True
except:
pass
# coerce ints to 64
if data.dtype == 'int':
# coerce floats to 64
try:
data = data.astype('int64')
result = True
except:
pass
return data, result
def _try_convert_to_date(self, data):
""" try to parse a ndarray like into a date column
try to coerce object in epoch/iso formats and
integer/float in epcoh formats, return a boolean if parsing
was successful """
# no conversion on empty
if not len(data):
return data, False
new_data = data
if new_data.dtype == 'object':
try:
new_data = data.astype('int64')
except:
pass
# ignore numbers that are out of range
if issubclass(new_data.dtype.type, np.number):
in_range = (isnull(new_data.values) | (new_data > self.min_stamp) |
(new_data.values == iNaT))
if not in_range.all():
return data, False
date_units = (self.date_unit,) if self.date_unit else self._STAMP_UNITS
for date_unit in date_units:
try:
new_data = to_datetime(new_data, errors='raise',
unit=date_unit)
except OverflowError:
continue
except:
break
return new_data, True
return data, False
def _try_convert_dates(self):
raise AbstractMethodError(self)
class SeriesParser(Parser):
_default_orient = 'index'
_split_keys = ('name', 'index', 'data')
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = Series(dtype=None, **decoded)
else:
self.obj = Series(
loads(json, precise_float=self.precise_float), dtype=None)
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = Series(**decoded)
elif orient == "columns" or orient == "index":
self.obj = Series(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
else:
self.obj = Series(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
def _try_convert_types(self):
if self.obj is None:
return
obj, result = self._try_convert_data(
'data', self.obj, convert_dates=self.convert_dates)
if result:
self.obj = obj
class FrameParser(Parser):
_default_orient = 'columns'
_split_keys = ('columns', 'index', 'data')
def _parse_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
args = loads(json, dtype=None, numpy=True, labelled=True,
precise_float=self.precise_float)
if args:
args = (args[0].T, args[2], args[1])
self.obj = DataFrame(*args)
elif orient == "split":
decoded = loads(json, dtype=None, numpy=True,
precise_float=self.precise_float)
decoded = dict((str(k), v) for k, v in compat.iteritems(decoded))
self.check_keys_split(decoded)
self.obj = DataFrame(**decoded)
elif orient == "values":
self.obj = DataFrame(loads(json, dtype=None, numpy=True,
precise_float=self.precise_float))
else:
self.obj = DataFrame(*loads(json, dtype=None, numpy=True,
labelled=True,
precise_float=self.precise_float))
def _parse_no_numpy(self):
json = self.json
orient = self.orient
if orient == "columns":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
elif orient == "split":
decoded = dict((str(k), v)
for k, v in compat.iteritems(loads(
json,
precise_float=self.precise_float)))
self.check_keys_split(decoded)
self.obj = DataFrame(dtype=None, **decoded)
elif orient == "index":
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None).T
else:
self.obj = DataFrame(
loads(json, precise_float=self.precise_float), dtype=None)
def _process_converter(self, f, filt=None):
""" take a conversion function and possibly recreate the frame """
if filt is None:
filt = lambda col, c: True
needs_new_obj = False
new_obj = dict()
for i, (col, c) in enumerate(self.obj.iteritems()):
if filt(col, c):
new_data, result = f(col, c)
if result:
c = new_data
needs_new_obj = True
new_obj[i] = c
if needs_new_obj:
# possibly handle dup columns
new_obj = DataFrame(new_obj, index=self.obj.index)
new_obj.columns = self.obj.columns
self.obj = new_obj
def _try_convert_types(self):
if self.obj is None:
return
if self.convert_dates:
self._try_convert_dates()
self._process_converter(
lambda col, c: self._try_convert_data(col, c, convert_dates=False))
def _try_convert_dates(self):
if self.obj is None:
return
# our columns to parse
convert_dates = self.convert_dates
if convert_dates is True:
convert_dates = []
convert_dates = set(convert_dates)
def is_ok(col):
""" return if this col is ok to try for a date parse """
if not isinstance(col, compat.string_types):
return False
col_lower = col.lower()
if (col_lower.endswith('_at') or
col_lower.endswith('_time') or
col_lower == 'modified' or
col_lower == 'date' or
col_lower == 'datetime' or
col_lower.startswith('timestamp')):
return True
return False
self._process_converter(
lambda col, c: self._try_convert_to_date(c),
lambda col, c: ((self.keep_default_dates and is_ok(col)) or
col in convert_dates))
# ---------------------------------------------------------------------
# JSON normalization routines
def nested_to_record(ds, prefix="", level=0):
"""a simplified json_normalize
converts a nested dict into a flat dict ("record"), unlike json_normalize,
it does not attempt to extract a subset of the data.
Parameters
----------
ds : dict or list of dicts
prefix: the prefix, optional, default: ""
level: the number of levels in the jason string, optional, default: 0
Returns
-------
d - dict or list of dicts, matching `ds`
Examples
--------
IN[52]: nested_to_record(dict(flat1=1,dict1=dict(c=1,d=2),
nested=dict(e=dict(c=1,d=2),d=2)))
Out[52]:
{'dict1.c': 1,
'dict1.d': 2,
'flat1': 1,
'nested.d': 2,
'nested.e.c': 1,
'nested.e.d': 2}
"""
singleton = False
if isinstance(ds, dict):
ds = [ds]
singleton = True
new_ds = []
for d in ds:
new_d = copy.deepcopy(d)
for k, v in d.items():
# each key gets renamed with prefix
if level == 0:
newkey = str(k)
else:
newkey = prefix + '.' + str(k)
# only dicts gets recurse-flattend
# only at level>1 do we rename the rest of the keys
if not isinstance(v, dict):
if level != 0: # so we skip copying for top level, common case
v = new_d.pop(k)
new_d[newkey] = v
continue
else:
v = new_d.pop(k)
new_d.update(nested_to_record(v, newkey, level + 1))
new_ds.append(new_d)
if singleton:
return new_ds[0]
return new_ds
def json_normalize(data, record_path=None, meta=None,
meta_prefix=None,
record_prefix=None):
"""
"Normalize" semi-structured JSON data into a flat table
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
record_path : string or list of strings, default None
Path in each object to list of records. If not passed, data will be
assumed to be an array of records
meta : list of paths (string or list of strings), default None
Fields to use as metadata for each record in resulting table
record_prefix : string, default None
If True, prefix records with dotted (?) path, e.g. foo.bar.field if
path to records is ['foo', 'bar']
meta_prefix : string, default None
Returns
-------
frame : DataFrame
Examples
--------
>>> data = [{'state': 'Florida',
... 'shortname': 'FL',
... 'info': {
... 'governor': 'Rick Scott'
... },
... 'counties': [{'name': 'Dade', 'population': 12345},
... {'name': 'Broward', 'population': 40000},
... {'name': 'Palm Beach', 'population': 60000}]},
... {'state': 'Ohio',
... 'shortname': 'OH',
... 'info': {
... 'governor': 'John Kasich'
... },
... 'counties': [{'name': 'Summit', 'population': 1234},
... {'name': 'Cuyahoga', 'population': 1337}]}]
>>> from pandas.io.json import json_normalize
>>> result = json_normalize(data, 'counties', ['state', 'shortname',
... ['info', 'governor']])
>>> result
name population info.governor state shortname
0 Dade 12345 Rick Scott Florida FL
1 Broward 40000 Rick Scott Florida FL
2 Palm Beach 60000 Rick Scott Florida FL
3 Summit 1234 John Kasich Ohio OH
4 Cuyahoga 1337 John Kasich Ohio OH
"""
def _pull_field(js, spec):
result = js
if isinstance(spec, list):
for field in spec:
result = result[field]
else:
result = result[spec]
return result
# A bit of a hackjob
if isinstance(data, dict):
data = [data]
if record_path is None:
if any([isinstance(x, dict) for x in compat.itervalues(data[0])]):
# naive normalization, this is idempotent for flat records
# and potentially will inflate the data considerably for
# deeply nested structures:
# {VeryLong: { b: 1,c:2}} -> {VeryLong.b:1 ,VeryLong.c:@}
#
# TODO: handle record value which are lists, at least error
# reasonably
data = nested_to_record(data)
return DataFrame(data)
elif not isinstance(record_path, list):
record_path = [record_path]
if meta is None:
meta = []
elif not isinstance(meta, list):
meta = [meta]
for i, x in enumerate(meta):
if not isinstance(x, list):
meta[i] = [x]
# Disastrously inefficient for now
records = []
lengths = []
meta_vals = defaultdict(list)
meta_keys = ['.'.join(val) for val in meta]
def _recursive_extract(data, path, seen_meta, level=0):
if len(path) > 1:
for obj in data:
for val, key in zip(meta, meta_keys):
if level + 1 == len(val):
seen_meta[key] = _pull_field(obj, val[-1])
_recursive_extract(obj[path[0]], path[1:],
seen_meta, level=level + 1)
else:
for obj in data:
recs = _pull_field(obj, path[0])
# For repeating the metadata later
lengths.append(len(recs))
for val, key in zip(meta, meta_keys):
if level + 1 > len(val):
meta_val = seen_meta[key]
else:
meta_val = _pull_field(obj, val[level:])
meta_vals[key].append(meta_val)
records.extend(recs)
_recursive_extract(data, record_path, {}, level=0)
result = DataFrame(records)
if record_prefix is not None:
result.rename(columns=lambda x: record_prefix + x, inplace=True)
# Data types, a problem
for k, v in compat.iteritems(meta_vals):
if meta_prefix is not None:
k = meta_prefix + k
if k in result:
raise ValueError('Conflicting metadata name %s, '
'need distinguishing prefix ' % k)
result[k] = np.array(v).repeat(lengths)
return result
| gpl-2.0 |
TechTheLunatic/balise-t3 | Benchmark/python/main.py | 1 | 3879 | from math import sqrt
import matplotlib.pyplot as plt
def main():
filename = "Test acquisition vanilla-chocolate"
file = open("../" + filename + ".txt")
data = readDataFile_orderedFormat(file)
file.close()
# data = adjustRawData(data, 6500)
L = 3000
l = 2000
speedOfSound = 0.34 # in mm/µs
positionsEq1_x = []
positionsEq1_y = []
positionsEq2_x = []
positionsEq2_y = []
k2_list = []
k3_list = []
for i in range(len(data[0])):
k2_t = data[2][i] - data[0][i]
k3_t = data[1][i] - data[0][i]
k2 = k2_t * speedOfSound
k3 = k3_t * speedOfSound
k2_list.append(k2)
k3_list.append(k3)
position = getXY(k2, k3, L, l)
aberrant = False
for value in position:
if value < -1500 or value > 2000:
aberrant = True
if not aberrant:
positionsEq1_x.append(position[0])
positionsEq1_y.append(position[1])
positionsEq2_x.append(position[2])
positionsEq2_y.append(position[3])
else:
print("Valeur aberrante")
plt.figure()
plt.plot(positionsEq1_x, positionsEq1_y, 'ro')
plt.plot(positionsEq2_x, positionsEq2_y, 'go')
plt.savefig("output_" + filename + ".png")
plt.close()
plt.figure()
plt.plot(k2_list)
plt.savefig("k2_" + filename + ".png")
plt.close()
plt.figure()
plt.plot(k3_list)
plt.savefig("k3_" + filename + ".png")
plt.close()
def readDataFile_rawFormat(file):
data = [[], [], []]
for line in file:
splitLine = line.split(";")
beaconIndex = int(splitLine[0])
timestamp = int(splitLine[1])
data[beaconIndex].append(timestamp)
return data
def readDataFile_orderedFormat(file):
data = [[], [], []]
lineCount = 0
for line in file:
try:
splitLine = line.split(";")
data[0].append(int(splitLine[0]))
data[1].append(int(splitLine[1]))
data[2].append(int(splitLine[2]))
except ValueError:
print("Skip line nb" + str(lineCount))
lineCount += 1
return data
def adjustRawData(data, maxTimeDifference):
newData = data
i = 0
while i < min(len(newData[0]), len(newData[1]), len(newData[2])):
toDelete = []
for beacon in range(3):
for otherBeacon in range(3):
if otherBeacon != beacon:
if newData[otherBeacon][i] - newData[beacon][i] > maxTimeDifference:
toDelete.append(beacon)
if len(toDelete) == 0:
i += 1
else:
for item in toDelete:
print("pop b" + str(item) + " #" + str(i))
newData[item].pop(i)
for beaconData in newData:
while len(beaconData) > i:
print("POP!")
beaconData.pop()
return newData
# POSITIONING CALCULATION
def getXY(k2, k3, L, l):
cte = -(-2*k2+k3)**2*l**2*(k3**2-l**2)*(4*k2**2-l**2-4*L**2)*(4*k2**2-8*k2*k3+4*k3**2-l**2-4*L**2)
if cte >= 0:
sq = sqrt(cte)
else:
print("cte= " + str(cte) + " k2= " + str(k2) + " k3= " + str(k3))
return [0, 0, 0, 0]
ax = -8*k2**2*k3**2*L+8*k2*k3**3*L-4*k3**2*l**2*L+2*l**4*L
bx = sq
cx = 4*(4*k2**2*l**2-4*k2*k3*l**2+k3**2*l**2+4*k3**2*L**2-4*l**2*L**2)
ay = 16*k2**4*k3*l**2-32*k2**3*k3**2*l**2+20*k2**2*k3**3*l**2-4*k2*k3**4*l**2+16*k2**3*l**4-20*k2**2*k3*l**4+8*k2*k3**2*l**4-k3**3*l**4-16*k2**2*k3*l**2*L**2+32*k2*k3**2*l**2*L**2-12*k3**3*l**2*L**2-16*k2*l**4*L**2+8*k3*l**4*L**2
by = 2*k3*L*sq
cy = 4*(2*k2-k3)*l*(4*k2**2*l**2-4*k2*k3*l**2+k3**2*l**2+4*k3**2*L**2-4*l**2*L**2)
X1 = (ax + bx) / cx
Y1 = (ay + by) / cy
X2 = (ax - bx) / cx
Y2 = (ay - by) / cy
return [X1, Y1, X2, Y2]
main()
| gpl-3.0 |
LighthouseHPC/lighthouse | sandbox/ml/scikit/naiveBayes_kfold.py | 1 | 1288 | from sklearn.naive_bayes import GaussianNB
import pandas
import numpy as np
import matplotlib.pylab as plt
from sklearn.cross_validation import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
import sklearn.metrics
from sklearn import metrics
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import recall_score
from sklearn.metrics import accuracy_score
import time
from sklearn.model_selection import KFold
#Begin Code:
datafile = input("Enter your datafile: ")
print(datafile)
target_names = ['good', 'bad', 'fair']
data = pandas.read_csv(datafile)
a = len(data.T) - 1 #Again doing this to avoid as much hard coding as possible.
X = data.iloc[:,0:a]
Y = data.iloc[:, a] #Y
X = X.values
Y = Y.values
kf = KFold(n_splits = 10)
for train_index, test_index in kf.split(X):
X_train, X_test = X[train_index], X[test_index]
Y_train, Y_test = Y[train_index], Y[test_index]
gnb = GaussianNB()
gnb.fit(X_train,Y_train)
y_predict_test = gnb.predict(X_test)
print(y_predict_test)
result = accuracy_score(Y_test, y_predict_test)
print(result)
results = metrics.classification_report(Y_test, y_predict_test, target_names)
print(results)
print(time.clock())
| mit |
ekadhanda/bin | python/geodeticPlotter.py | 1 | 7961 | #!/usr/bin/env python
# Vasaant Krishnan.
# This script reads in the file name of the argument from the command line and plots out the multiband delays and fringe rates against universal time. The abovementioned file is any .dat output from Mark Reid's fit_geoblocs.f fortran program.
# End the commandline input with the word: save - to automatically get a .eps file of the output
# End the commandline input with the word: show - to display a graph
import re
from pylab import *
import sys
import matplotlib.gridspec as gridspec
# Integer to keep count of the number of rows of figures which will be plotted
figureNumber = 0
# Any float variable
floats = '\s+([+-]?\d+.\d+)'
# Array to keep track of ALL the universal times from ALL the .dat files which are being processed by this script
timeKeeper = []
# The list of .dat files to be processed by this script
nameList = sys.argv[1:]
# To print "The mean residual delay of baseline...." in the first instance only
firstBaseline = True
# The scrip chooses whether to use the default figure size or not
if len(nameList) >= 4:
fig = figure(figsize=(1.25*len(nameList),1.25*len(nameList)))
else:
fig = figure()
if nameList[len(nameList)-1] == "save":
saveFigure = True
nameList = nameList[0:len(nameList)-1]
else:
saveFigure = False
if nameList[len(nameList)-1] == "show":
showFigure = True
nameList = nameList[0:len(nameList)-1]
else:
showFigure = False
# The script starts from here!
for item in nameList:
universalTime = []
delayData = []
delayModel = []
delayResidual = []
rateData = []
rateModel = []
rateResidual = []
delayDataABS = []
delayModelABS = []
delayResidualABS = []
rateDataABS = []
rateModelABS = []
rateResidualABS = []
rmsDelResArray = []
# The co-ordinates for the individual points for the graphs are harvested here
for line in open(item,'r'):
# requiredInformation = re.search("\s+(\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)",line)
requiredInformation = re.search(7*floats,line)
titleInformation = re.search("(\d-\s\d)",line)
if requiredInformation:
universalTime.append(float(requiredInformation.group(1)))
delayData.append( float(requiredInformation.group(2)))
delayModel.append( float(requiredInformation.group(3)))
delayResidual.append(float(requiredInformation.group(4)))
rateData.append( float(requiredInformation.group(5)))
rateModel.append( float(requiredInformation.group(6)))
rateResidual.append( float(requiredInformation.group(7)))
if titleInformation:
graphTitle = titleInformation.group(1)
close(item)
# This next block of code is to harvest the data (as done in the previous block) but in this instance, to automatically determine the maximum and minimum values for the y-axes of both Multiband Delay and Fringe Rate plots.
for line in open(item,'r'):
# absRequiredInformation = re.search("\s+(\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)\s+(-?\d+.\d+)",line)
absRequiredInformation = re.search(7*floats,line)
if absRequiredInformation:
delayDataABS.append( abs(float(absRequiredInformation.group(2))))
delayModelABS.append( abs(float(absRequiredInformation.group(3))))
delayResidualABS.append(abs(float(absRequiredInformation.group(4))))
rateDataABS.append( abs(float(absRequiredInformation.group(5))))
rateModelABS.append( abs(float(absRequiredInformation.group(6))))
rateResidualABS.append( abs(float(absRequiredInformation.group(7))))
close(item)
# This bloc print out the RMS of the delays
for element in delayResidual:
rmsDelResArray.append(element**2)
rmsDelRes = sqrt(mean(rmsDelResArray))
if firstBaseline:
print ""
print "The mean residual delay of baseline ("+str(graphTitle)+") = %.3f nsec"%(rmsDelRes)
firstBaseline = False
else:
print " ("+str(graphTitle)+") = %.3f "%(rmsDelRes)
# These blocs of code determine the greatest of 3 values to use as max and min of the y-axes, only if there is data in the .dat file which is being read
if requiredInformation:
if max(delayDataABS) > max(delayModelABS):
if max(delayDataABS) > max(delayResidualABS):
delayPlotAxVal = ceil(max(delayDataABS))
else:
delayPlotAxVal = ceil(max(delayResidualABS))
else:
if max(delayModelABS) > max(delayResidualABS):
delayPlotAxVal = ceil(max(delayModelABS))
else:
delayPlotAxVal = ceil(max(delayResidualABS))
if max(rateDataABS) > max(rateModelABS):
if max(rateDataABS) > max(rateResidualABS):
ratePlotAxVal = ceil(max(rateDataABS))
else:
ratePlotAxVal = ceil(max(rateResidualABS))
else:
if max(rateModelABS) > max(rateResidualABS):
ratePlotAxVal = ceil(max(rateModelABS))
else:
ratePlotAxVal = ceil(max(rateResidualABS))
# The next couple of lines of code are to keep track of all the universal times of all the data which have been harvested. This will be used in the plotting bloc (of code) to standarise the x-axis.
for time in universalTime:
timeKeeper.append(time)
# The plotting and figure characteristics are determined from here on.
subplot2grid((len(nameList),2),(figureNumber,0))
scatter(universalTime,delayData, c='g',edgecolors='none',label="Data")
scatter(universalTime,delayModel, c='b',edgecolors='none',label="Delay Model")
scatter(universalTime,delayResidual,c='r', marker='4', label="Delay Residuals'")
plot([floor(min(timeKeeper))-0.5,ceil(max(timeKeeper))+0.5],[0,0],'--',c='black')
xlim( floor(min(timeKeeper))-0.5,ceil(max(timeKeeper))+0.5)
ylim( -delayPlotAxVal, delayPlotAxVal)
yticks([-delayPlotAxVal,0,delayPlotAxVal])
title('('+graphTitle+')',fontsize=10)
# Only the bottom-most figure gets axis labels and full title
if figureNumber == len(nameList)-1:
xticks()
xlabel("Universal Time (Hours)",weight='bold')
title('Baseline ('+graphTitle+')',fontsize=10)
else:
xticks([])
subplot2grid((len(nameList),2),(figureNumber,1))
scatter(universalTime,rateData, c='g',edgecolors='none',label="Data")
scatter(universalTime,rateModel, c='b',edgecolors='none',label="Delay Model")
scatter(universalTime,rateResidual,c='r', marker='4', label="Delay Residuals")
plot([floor(min(timeKeeper))-0.5,ceil(max(timeKeeper))+0.5],[0,0],'--',c='black')
xlim( floor(min(timeKeeper))-0.5,ceil(max(timeKeeper))+0.5)
ylim( -ratePlotAxVal, ratePlotAxVal)
yticks([-ratePlotAxVal,0,ratePlotAxVal])
title('('+graphTitle+')',fontsize=10)
# Only the bottom-most figure gets axis labels and full title
if figureNumber == len(nameList)-1:
xticks()
xlabel("Universal Time (Hours)",weight='bold')
title('Baseline ('+graphTitle+')',fontsize=10)
else:
xticks([])
fig.subplots_adjust(left=0.125, right=0.9, bottom=0.05, top=0.95, wspace=0.5, hspace=0.25)
figureNumber += 1
fig.text(0.05,0.55,'Multi-band Delays (nsec)',weight='bold',horizontalalignment='center',verticalalignment='top',rotation='vertical')
fig.text(0.52,0.55,'Fringe Rates (MHz)', weight='bold',horizontalalignment='center',verticalalignment='top',rotation='vertical')
if saveFigure:
savefig("geodeticPlotter.eps", format='eps')
elif showFigure:
show()
| mit |
mhdella/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
pmlrsg/arsf_tools | las13/las13.py | 1 | 9486 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
###########################################################
# This file has been created by ARSF Data Analysis Node and
# is licensed under the GPL v3 Licence. A copy of this
# licence is available to download with this file.
###########################################################
###########################################################################
#This is the main interface python library for the Las1.3Reader c++ library
# Use it for reading / plotting LAS 1.3 data
###########################################################################
import sys
import pylab
import las13reader
from matplotlib.backends.backend_pdf import PdfPages
#wrapper class for the c++ wrapper
class las13():
"""
Class to wrap the las13reader class to be more user friendly
"""
def __init__(self,filename,quiet=True):
"""
Constructor: takes a LAS 1.3 file as input
"""
if isinstance(filename,str):
self.reader=las13reader.Las1_3_handler(filename)
self.reader.SetQuiet(quiet)
else:
raise Exception("Expected string argument for filename.")
def points_in_bounds(self,bounds):
"""
Function that searches the LAS file and returns all points within the given rectangular bounds.
Inputs:
bounds - a list of 4 floating point values describing north, south, west and east bounds.
Returns:
An object of type las13reader.PulseManager.
"""
if not isinstance(bounds,list):
raise Exception("Expected list argument for bounds (of length 4: north,south,west,east).")
if len(bounds)!=4:
raise Exception("Expected bounds list of length 4: north,south,west,east.")
pmanager=self.reader.GetPointsInBounds(bounds[0],bounds[1],bounds[2],bounds[3])
return pmanager
def points_with_classification(self,classification):
"""
Function that searches the LAS file and returns all points with the given classification value.
Inputs:
classification - an integer value of the classification to search for
Returns:
An object of type las13reader.PulseManager.
"""
if not isinstance(classification,int):
raise Exception("Expected int argument for classification.")
pmanager=self.reader.GetPointsWithClassification(classification)
return pmanager
def read_like_book(self,npoints=1,reset=False):
"""
Function that searches the LAS file and returns points in sequence up to npoints.
Inputs:
npoints - an integer value for the maximum number of points to read
reset - a boolean that when True resets the reader back to the start of the file
Returns:
An object of type las13reader.PulseManager.
"""
if not isinstance(npoints,int):
raise Exception("Expected int argument for npoints.")
if not isinstance(reset,bool):
raise Exception("Expected bool argument for reset.")
pmanager=self.reader.ReadLikeBook(npoints,reset)
return pmanager
def tidy(self):
"""
Function to destroy and free up memory used in any current pulse managers
"""
self.reader.DeletePulseManagers()
###############################################################################
# Static methods below here - functions do not depend on an instance of las13
###############################################################################
#function to return the waveform x,y,z and intensity values from a given pulse
@staticmethod
def waveform(pulse):
"""
Function to return the waveform of intensity values from a given pulse object
Input:
pulse - a las13reader.Pulse object (such that pulsemanagers contain)
Returns:
The waveform as a dictionary with keys 'x','y','z', and 'intensity'.
"""
if not isinstance(pulse,las13reader.Pulse):
print("las13.waveform expects a Pulse object to be passed, not: %s"%type(pulse))
return None
#number of samples
nsamples=pulse.nsamples()
#return a dict of lists
waveform={'x':[],'y':[],'z':[],'intensity':[]}
for s in range(0,nsamples):
samplePos=list(pulse.sampleXYZ(s))
waveform['x'].append(samplePos[0])
waveform['y'].append(samplePos[1])
waveform['z'].append(samplePos[2])
waveform['intensity'].append(pulse.sampleintensity(s))
return waveform
@staticmethod
def discrete(pulse):
"""
Function to return the discrete point information from the given pulse
"""
discrete=[]
for r in range(0,pulse.nreturns()):
discrete.append(discretepoint(pulse,r))
return discrete
#Function to return some (requested) info about the given pulse
@staticmethod
def get_pulse_info(pulse,keyword):
"""
Function to extract the requested information from the given pulse object. This
is really just a helper function to convert vectors into lists.
Inputs:
pulse - the pulse object
keyword - key to describe information requested
Returns:
the requested data
"""
keywords=['time','nreturns','nsamples','origin','offset','scanangle','classification','returnlocs','disint']
if keyword == 'time':
return pulse.time()
elif keyword == 'nreturns':
return pulse.nreturns()
elif keyword == 'nsamples':
return pulse.nsamples()
elif keyword == 'origin':
return list(pulse.originXYZ())
elif keyword == 'offset':
return list(pulse.offsetXYZ())
elif keyword == 'scanangle':
return pulse.scanangle()
elif keyword == 'classification':
return list(pulse.classification())
elif keyword == 'returnlocs':
return list(pulse.returnpointlocation())
elif keyword == 'disint':
return list(pulse.discreteintensities())
else:
print("Keyword should be one of: ",keywords)
raise Exception("Unrecognised keyword in get_pulse_info: %s."%(keyword))
#Function to plot the pulse
@staticmethod
def quick_plot_pulse(pulse,title=None,filename=None):
"""
Function to produce a plot of the pulse waveform data
Inputs:
pulse - the pulse object
title - a title to give the plot
filename - if given the plot is saved to the filename, else displayed on screen
"""
waveform=las13.waveform(pulse)
pylab.plot(waveform['intensity'],'b-',label='Waveform')
pylab.xlabel('Sample number')
pylab.ylabel('Intensity')
if title:
pylab.title(title)
pylab.ylim([0,pylab.ylim()[1]+5])
pylab.legend()
if filename:
pylab.savefig(filename)
else:
pylab.show()
@staticmethod
def plot_all_pulses(pulsemanager,filename):
"""
Function to plot every pulse within a pulsemanager and save to a PDF file
Inputs:
pulsemanager - the pulsemanager object to plot data from
filename - the PDF filename to save the plots to
"""
fileobj=PdfPages(filename)
for p in range(pulsemanager.getNumOfPulses()):
pulse=pulsemanager[p]
waveform=las13.waveform(pulse)
pylab.plot(waveform['intensity'],'b-',label='Waveform')
pylab.plot( [x / pulse.sampletime() for x in las13.get_pulse_info(pulse,'returnlocs')],las13.get_pulse_info(pulse,'disint'),'ro',label='Discrete')
pylab.xlabel('Sample number')
pylab.ylabel('Intensity')
pylab.title('Pulse with time: %f'%pulse.time())
pylab.ylim([0,pylab.ylim()[1]+5])
pylab.legend()
fileobj.savefig()
pylab.clf()
fileobj.close()
class dpoint():
"""
Simple helper class to describe a points position in X,Y,Z
"""
def __init__(self,dposition):
self.X=dposition[0]
self.Y=dposition[1]
self.Z=dposition[2]
class discretepoint():
"""
Class to hold information on discrete points in a user friendly interface
"""
def __init__(self,pulse,item):
if not isinstance(pulse,las13reader.Pulse):
raise Exception("Parameter 'pulse' should be of type las13reader.Pulse in discretepoint object")
if not isinstance(item,int):
raise Exception("Parameter 'item' should be of type integer in discretepoint object")
#get the number of returns
self.returns=pulse.nreturns()
if item >= self.returns:
raise Exception("Cannot create a discretepoint for return %d when only %d returns for given pulse"%(item,self.returns))
#get all the discrete points
points=list(pulse.discretepoints())
#now convert and store as 'dpoint' object
self.position=dpoint(points[item])
#intensity
self.intensity=list(pulse.discreteintensities())[item]
#classification
self.classification=list(pulse.classification())[item]
#return number
self.returnnumber=item
| gpl-3.0 |
flygeneticist/Thinkful-Data-Science-Examples | Unit1/Lesson3/import-into-sql-with-python.py | 1 | 2255 | # import the sql library into python
import sqlite3 as lite
# data to be inserted into tables stored as tuples with-in a tuple
cities = (('Boston', 'MA'),
('Chicago', 'IL'),
('Miami', 'FL'),
('Dallas', 'TX'),
('Seattle', 'WA'),
('Portland', 'OR'),
('San Francisco', 'CA'),
('Los Angeles', 'CA'))
weather = (('Boston', '2013', 7, 1, 59, 0),
('Chicago', '2013', 7, 1, 59, 0),
('Miami', '2013', 8, 1, 84, 0),
('Dallas', '2013', 7, 1, 77, 0),
('Seattle', '2013', 7, 1, 61, 0),
('Portland', '2013', 7, 12, 63, 0),
('San Francisco', '2013', 9, 12, 64, 0),
('Los Angeles', '2013', 9, 12, 75, 0))
# connect to your SQL database and store as a variable to call in future code
con = lite.connect('my_database.db')
with con:
# setup the cursor for accessing records in the tables
cur = con.cursor()
'''
INSERTING DATA INTO SQLITE DATABASE
'''
# sqlite3 versions 3.6 and up use something like the following code
cur.executemany("INSERT INTO cities VALUES(?,?)", cities)
cur.executemany("INSERT INTO weather VALUES(?,?,?,?,?,?)", weather)
# # sqlite3 versions 3.5 and below use something like the following code
# for rec in cities:
# cur.execute("INSERT INTO cities VALUES (\'"+rec[0]+"\',\'"+rec[1]+"\')")
# for rec in weather:
# cur.execute("INSERT INTO weather VALUES (\'"+rec[0]+"\',"+str(rec[1])+","+str(rec[2])+","+str(rec[3])+","+str(rec[4])+")")
'''
RETRIEVING RAW DATA FROM SQLITE DATABASE
'''
# execute your search command
cur.execute("SELECT * FROM cities")
# grab the results
rows = cur.fetchall()
# iterate over the results, printing out each to std output with print
for row in rows:
print row
'''
RETRIEVING DATA FROM DATABASE AND STORING IT IN A PANDAS DATAFRAME
'''
# import the pandas library
import pandas as pd
# Select all rows
cur.execute("SELECT * FROM cities")
rows = cur.fetchall()
# set column labels from the first row of data retieved from the DB
cols = [desc[0] for desc in cur.description]
# set the pandas dataframe with data and column labels
df = pd.DataFrame(rows, columns=cols)
# display the first bit of data from the pandas DataFrame
print df.head()
| mit |
florian-f/sklearn | examples/linear_model/plot_sgd_loss_functions.py | 4 | 1602 | """
==========================
SGD: Convex Loss Functions
==========================
Plot the convex loss functions supported by
`sklearn.linear_model.stochastic_gradient`.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.linear_model.sgd_fast import SquaredHinge
from sklearn.linear_model.sgd_fast import Hinge
from sklearn.linear_model.sgd_fast import ModifiedHuber
from sklearn.linear_model.sgd_fast import SquaredLoss
###############################################################################
# Define loss functions
xmin, xmax = -4, 4
hinge = Hinge(1)
squared_hinge = SquaredHinge()
perceptron = Hinge(0)
log_loss = lambda z, p: np.log2(1.0 + np.exp(-z))
modified_huber = ModifiedHuber()
squared_loss = SquaredLoss()
###############################################################################
# Plot loss funcitons
xx = np.linspace(xmin, xmax, 100)
pl.plot([xmin, 0, 0, xmax], [1, 1, 0, 0], 'k-',
label="Zero-one loss")
pl.plot(xx, [hinge.loss(x, 1) for x in xx], 'g-',
label="Hinge loss")
pl.plot(xx, [perceptron.loss(x, 1) for x in xx], 'm-',
label="Perceptron loss")
pl.plot(xx, [log_loss(x, 1) for x in xx], 'r-',
label="Log loss")
#pl.plot(xx, [2 * squared_loss.loss(x, 1) for x in xx], 'c-',
# label="Squared loss")
pl.plot(xx, [squared_hinge.loss(x, 1) for x in xx], 'b-',
label="Squared hinge loss")
pl.plot(xx, [modified_huber.loss(x, 1) for x in xx], 'y--',
label="Modified huber loss")
pl.ylim((0, 8))
pl.legend(loc="upper right")
pl.xlabel(r"$y \cdot f(x)$")
pl.ylabel("$L(y, f(x))$")
pl.show()
| bsd-3-clause |
rohanp/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 150 | 3651 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_,ms2.cluster_centers_)
assert_array_equal(ms1.labels_,ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
Richert/BrainNetworks | BasalGanglia/gpe_2pop_pac_tmp.py | 1 | 2320 | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib as mpl
from pyrates.utility.visualization import create_cmap, plot_connectivity
from scipy.interpolate.interpolate import interp1d
import scipy.io as scio
linewidth = 1.2
fontsize1 = 10
fontsize2 = 10
markersize1 = 60
markersize2 = 60
dpi = 200
plt.style.reload_library()
plt.style.use('seaborn-whitegrid')
mpl.rcParams['text.latex.preamble'] = [r'\usepackage{sfmath} \boldmath']
#mpl.rc('text', usetex=True)
mpl.rcParams["font.sans-serif"] = ["Roboto"]
mpl.rcParams["font.size"] = fontsize1
mpl.rcParams["font.weight"] = "bold"
mpl.rcParams['lines.linewidth'] = linewidth
mpl.rcParams['axes.titlesize'] = fontsize2
mpl.rcParams['axes.titleweight'] = 'bold'
mpl.rcParams['axes.labelsize'] = 'large'
mpl.rcParams['axes.labelweight'] = 'bold'
mpl.rcParams['xtick.color'] = 'black'
mpl.rcParams['ytick.color'] = 'black'
mpl.rcParams['ytick.alignment'] = 'center'
mpl.rcParams['legend.fontsize'] = fontsize1
sns.set(style="whitegrid")
# load data
path = "/home/rgast/MatlabProjects/STN_GPe/PAC_bistable.mat"
data = scio.loadmat(path)
# extract from data
alpha = data['stim_amps_unique']
omega = data['stim_freqs_unique']
MI = data['PAC_max']
PAA_osc = data['PAA_osc']
PAA_env = data['PAA_env']
PAA = PAA_osc / PAA_env
# plot MI
cmap = create_cmap("pyrates_blue", n_colors=64, as_cmap=False, reverse=False)
ax = plot_connectivity(MI, cmap=cmap)
ax.set_xticks(ax.get_xticks()[0::2])
ax.set_yticks(ax.get_yticks()[0::2])
ax.set_xticklabels(np.round(omega.squeeze(), decimals=1)[0::2], rotation='horizontal')
ax.set_yticklabels(np.round(alpha.squeeze(), decimals=1)[0::2], rotation='horizontal')
ax.set_xlabel('omega')
ax.set_ylabel('alpha')
ax.invert_yaxis()
plt.tight_layout()
plt.savefig('MI_bs.svg')
plt.show()
# plot PAA
cmap = create_cmap("pyrates_blue", n_colors=64, as_cmap=False, reverse=False)
ax = plot_connectivity(PAA, cmap=cmap, vmin=0.0, vmax=1.0)
ax.set_xticks(ax.get_xticks()[0::2])
ax.set_yticks(ax.get_yticks()[0::2])
ax.set_xticklabels(np.round(omega.squeeze(), decimals=1)[0::2], rotation='horizontal')
ax.set_yticklabels(np.round(alpha.squeeze(), decimals=1)[0::2], rotation='horizontal')
ax.set_xlabel('omega')
ax.set_ylabel('alpha')
ax.invert_yaxis()
plt.tight_layout()
plt.savefig('PAA_bs.svg')
plt.show()
| apache-2.0 |
Barmaley-exe/scikit-learn | sklearn/feature_extraction/text.py | 6 | 49520 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Lars Buitinck <[email protected]>
# Robert Layton <[email protected]>
# Jochen Wersdörfer <[email protected]>
# Roman Sinayev <[email protected]>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg="%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Parameters
----------
input: string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents: {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer: string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor: callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer: callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range: tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words: string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
lowercase: boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern: string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, optional, (2 ** 20) by default
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, optional
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `tokenize == 'word'`. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, False by default.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log1p instead of log makes sure terms with zero idf don't get
# suppressed entirely
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if `analyzer == 'word'`. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, optional, 1.0 by default
When building the vocabulary ignore terms that have a document frequency
strictly higher than the given threshold (corpus specific stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, optional, 1 by default
When building the vocabulary ignore terms that have a document frequency
strictly lower than the given threshold.
This value is also called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : optional, None by default
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, False by default.
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, optional
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, optional
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, optional
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
JVillella/tensorflow | tensorflow/contrib/factorization/python/ops/gmm.py | 47 | 5877 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Gaussian mixture model (GMM) clustering using tf.Learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib import framework
from tensorflow.contrib.factorization.python.ops import gmm_ops
from tensorflow.contrib.framework.python.framework import checkpoint_utils
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops.control_flow_ops import with_dependencies
def _streaming_sum(scalar_tensor):
"""Create a sum metric and update op."""
sum_metric = framework.local_variable(constant_op.constant(0.0))
sum_update = sum_metric.assign_add(scalar_tensor)
return sum_metric, sum_update
class GMM(estimator.Estimator):
"""An estimator for GMM clustering."""
SCORES = 'scores'
ASSIGNMENTS = 'assignments'
ALL_SCORES = 'all_scores'
def __init__(self,
num_clusters,
model_dir=None,
random_seed=0,
params='wmc',
initial_clusters='random',
covariance_type='full',
config=None):
"""Creates a model for running GMM training and inference.
Args:
num_clusters: number of clusters to train.
model_dir: the directory to save the model results and log files.
random_seed: Python integer. Seed for PRNG used to initialize centers.
params: Controls which parameters are updated in the training process.
Can contain any combination of "w" for weights, "m" for means,
and "c" for covars.
initial_clusters: specifies how to initialize the clusters for training.
See gmm_ops.gmm for the possible values.
covariance_type: one of "full", "diag".
config: See Estimator
"""
self._num_clusters = num_clusters
self._params = params
self._training_initial_clusters = initial_clusters
self._covariance_type = covariance_type
self._training_graph = None
self._random_seed = random_seed
super(GMM, self).__init__(
model_fn=self._model_builder(), model_dir=model_dir, config=config)
def predict_assignments(self, input_fn=None, batch_size=None, outputs=None):
"""See BaseEstimator.predict."""
results = self.predict(input_fn=input_fn,
batch_size=batch_size,
outputs=outputs)
for result in results:
yield result[GMM.ASSIGNMENTS]
def score(self, input_fn=None, batch_size=None, steps=None):
"""Predict total sum of distances to nearest clusters.
Note that this function is different from the corresponding one in sklearn
which returns the negative of the sum of distances.
Args:
input_fn: see predict.
batch_size: see predict.
steps: see predict.
Returns:
Total sum of distances to nearest clusters.
"""
results = self.evaluate(input_fn=input_fn, batch_size=batch_size,
steps=steps)
return np.sum(results[GMM.SCORES])
def weights(self):
"""Returns the cluster weights."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_WEIGHT)
def clusters(self):
"""Returns cluster centers."""
clusters = checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_VARIABLE)
return np.squeeze(clusters, 1)
def covariances(self):
"""Returns the covariances."""
return checkpoint_utils.load_variable(
self.model_dir, gmm_ops.GmmAlgorithm.CLUSTERS_COVS_VARIABLE)
def _parse_tensor_or_dict(self, features):
if isinstance(features, dict):
return array_ops.concat([features[k] for k in sorted(features.keys())],
1)
return features
def _model_builder(self):
"""Creates a model function."""
def _model_fn(features, labels, mode):
"""Model function."""
assert labels is None, labels
(all_scores, model_predictions, losses, training_op) = gmm_ops.gmm(
self._parse_tensor_or_dict(features), self._training_initial_clusters,
self._num_clusters, self._random_seed, self._covariance_type,
self._params)
incr_step = state_ops.assign_add(variables.get_global_step(), 1)
loss = math_ops.reduce_sum(losses)
training_op = with_dependencies([training_op, incr_step], loss)
predictions = {
GMM.ALL_SCORES: all_scores[0],
GMM.ASSIGNMENTS: model_predictions[0][0],
}
eval_metric_ops = {
GMM.SCORES: _streaming_sum(loss),
}
return model_fn_lib.ModelFnOps(mode=mode, predictions=predictions,
eval_metric_ops=eval_metric_ops,
loss=loss, train_op=training_op)
return _model_fn
| apache-2.0 |
mrkowalski/kaggle_santander | scikit/src/commons.py | 1 | 7185 | import pandas as pd
import numpy as np
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from functools import partial
import re
num_months = 4
chunk_size = 1000000
indicators = ['ind_ahor_fin_ult1', 'ind_aval_fin_ult1', 'ind_cco_fin_ult1', 'ind_cder_fin_ult1', 'ind_cno_fin_ult1',
'ind_ctju_fin_ult1', 'ind_ctma_fin_ult1', 'ind_ctop_fin_ult1', 'ind_ctpp_fin_ult1', 'ind_deco_fin_ult1',
'ind_deme_fin_ult1', 'ind_dela_fin_ult1', 'ind_ecue_fin_ult1', 'ind_fond_fin_ult1', 'ind_hip_fin_ult1',
'ind_plan_fin_ult1', 'ind_pres_fin_ult1', 'ind_reca_fin_ult1', 'ind_tjcr_fin_ult1', 'ind_valo_fin_ult1',
'ind_viv_fin_ult1', 'ind_nomina_ult1', 'ind_nom_pens_ult1', 'ind_recibo_ult1']
column_prefix = 'bd'
main_dataframe = 'dataframe.pkl'
dataframe_dir = 'dataframes'
main_csv = 'stuff_6mo_all_indicators.csv'
def read_dataframe(): return pd.read_pickle(dataframe_dir + "/" + main_dataframe)
def read_dataframe_for_feature(feature): return pd.read_pickle(dataframe_dir + "/" + feature + ".pkl")
def save_dataframe_for_feature(df, feature): df.to_pickle(dataframe_dir + "/" + feature + ".pkl")
def save_dataframe(df): df.to_pickle(dataframe_dir + "/" + main_dataframe)
def predicted_column_name(name): return "{}{}_{}".format(column_prefix, num_months, name)
def save_model(clf, feature): joblib.dump(clf, "models/" + feature + ".pkl")
def read_model(feature): return joblib.load("models/" + feature + ".pkl")
def dtypes_range(name, upto, dtype): return {("bd{}_{}".format(i, name), dtype) for i in range(1, upto + 1)}
dtypes = {
'bd1_renta': np.int32,
'bd1_sexo': np.int8,
'bd1_pais_residencia': np.int16,
'bd1_canal_entrada': np.int16,
'bd1_ind_nuevo': np.bool
}
dtypes.update(dtypes_range('segmento_individual', num_months, np.bool))
dtypes.update(dtypes_range('segmento_vip', num_months, np.bool))
dtypes.update(dtypes_range('segmento_graduate', num_months, np.bool))
for i in indicators: dtypes.update(dtypes_range(i, num_months, np.bool))
dtypes.update(dtypes_range('cod_prov', num_months, np.uint8))
dtypes.update(dtypes_range('ind_empleado', num_months, np.int8))
dtypes.update(dtypes_range('age', num_months, np.int))
dtypes.update(dtypes_range('indrel_99', num_months, np.bool))
dtypes.update(dtypes_range('ind_actividad_cliente', num_months, np.bool))
dtypes.update(dtypes_range('antiguedad', num_months, np.int))
dtypes.update(dtypes_range('tipodom', num_months, np.bool))
dtypes.update(dtypes_range('indfall', num_months, np.bool))
dtypes.update(dtypes_range('indext', num_months, np.bool))
dtypes.update(dtypes_range('indresi', num_months, np.bool))
dtypes.update(dtypes_range('indrel_1mes', num_months, np.int8))
dtypes.update(dtypes_range('tiprel_1mes', num_months, np.int8))
rx_prefix = re.compile('bd\\d_')
le = LabelEncoder()
field_values = {
'sexo': LabelEncoder().fit(['V', 'H']),
'pais_residencia': LabelEncoder().fit(
['ES', 'CA', 'CH', 'CL', 'IE', 'AT', 'NL', 'FR', 'GB', 'DE', 'DO', 'BE', 'AR', 'VE', 'US', 'MX',
'BR', 'IT', 'EC', 'PE', 'CO', 'HN', 'FI', 'SE', 'AL', 'PT', 'MZ', 'CN', 'TW', 'PL', 'IN', 'CR',
'NI', 'HK', 'AD', 'CZ', 'AE', 'MA', 'GR', 'PR', 'RO', 'IL', 'RU', 'GT', 'GA', 'NO', 'SN',
'MR', 'UA', 'BG', 'PY', 'EE', 'SV', 'ET', 'CM', 'SA', 'CI', 'QA', 'LU', 'PA', 'BA', 'BO', 'AU',
'BY', 'KE', 'SG', 'HR', 'MD', 'SK', 'TR', 'AO', 'CU', 'GQ', 'EG', 'ZA', 'DK', 'UY', 'GE',
'TH', 'DZ', 'LB', 'JP', 'NG', 'PK', 'TN', 'TG', 'KR', 'GH', 'RS', 'VN', 'PH', 'KW', 'NZ',
'MM', 'KH', 'GI', 'SL', 'GN', 'GW', 'OM', 'CG', 'LV', 'LT', 'ML', 'MK', 'HU', 'IS', 'LY', 'CF',
'GM', 'KZ', 'CD', 'BZ', 'ZW', 'DJ', 'JM', 'BM', 'MT'
]),
'ind_empleado': LabelEncoder().fit(['N', 'A', 'B', 'F', 'S']),
'canal_entrada': LabelEncoder().fit(
['KHL', 'KHE', 'KHD', 'KFA', 'KFC', 'KAT', 'KAZ', 'RED', 'KHC', 'KHK', 'KGN', 'KHM', 'KHO', 'KDH',
'KEH', 'KAD', 'KBG', 'KGC', 'KHF', 'KFK', 'KHN', 'KHA', 'KAF', 'KGX', 'KFD', 'KAG', 'KFG', 'KAB',
'KCC', 'KAE', 'KAH', 'KAR', 'KFJ', 'KFL', 'KAI', 'KFU', 'KAQ', 'KFS', 'KAA', 'KFP', 'KAJ', 'KFN',
'KGV', 'KGY', 'KFF', 'KAP', 'KDE', 'KFV', '013', 'K00', 'KAK', 'KCK', 'KCL', 'KAY', 'KBU', 'KDR',
'KAC', 'KDT', 'KCG', 'KDO', 'KDY', 'KBQ', 'KDA', 'KBO', 'KCI', 'KEC', 'KBZ', 'KES', 'KDX', 'KAS',
'007', 'KEU', 'KCA', 'KAL', 'KDC', 'KAW', 'KCS', 'KCB', 'KDU', 'KDQ', 'KCN', 'KCM', '004', 'KCH',
'KCD', 'KCE', 'KEV', 'KBL', 'KEA', 'KBH', 'KDV', 'KFT', 'KEY', 'KAO', 'KEJ', 'KEO', 'KEI', 'KEW',
'KDZ', 'KBV', 'KBR', 'KBF', 'KDP', 'KCO', 'KCF', 'KCV', 'KAM', 'KEZ', 'KBD', 'KAN', 'KBY', 'KCT',
'KDD', 'KBW', 'KCU', 'KBX', 'KDB', 'KBS', 'KBE', 'KCX', 'KBP', 'KBN', 'KEB', 'KDS', 'KEL', 'KDG',
'KDF', 'KEF', 'KCP', 'KDM', 'KBB', 'KDW', 'KBJ', 'KFI', 'KBM', 'KEG', 'KEN', 'KEQ', 'KAV', 'KFH',
'KFM', 'KAU', 'KED', 'KFR', 'KEK', 'KFB', 'KGW', 'KFE', 'KGU', 'KDI', 'KDN', 'KEE', 'KCR', 'KCQ',
'KEM', 'KCJ', 'KHQ', 'KDL', '025', 'KHP', 'KHR', 'KHS']),
'indrel_1mes': LabelEncoder().fit(['1', '2', '3', '4', 'P']),
'tiprel_1mes': LabelEncoder().fit(['A', 'I', 'P', 'R', 'N']),
'indfall': LabelEncoder().fit(['N', 'S'])
}
def col_to_int(range, v):
if v is None:
return -1
else:
v = v.strip()
if len(v) == 0:
return -1
else:
return field_values[range].transform([v])[0]
def col_to_intvalue(default, v):
if v is None:
return default
else:
try:
return int(v)
except:
return default
def feature_range_list(name, upto): return ["bd{}_{}".format(i, name) for i in range(1, upto + 1)]
cols_as_integers = set(['segmento_', 'cod_prov', 'ind_', 'ind_empleado', 'age'])
cols_to_convert = ['bd1_sexo', 'bd1_pais_residencia', 'bd1_ind_empleado', 'bd1_canal_entrada'] + feature_range_list(
'indrel_1mes', num_months) + feature_range_list('indfall', num_months) + feature_range_list(
'segmento_individual', num_months) + feature_range_list('tiprel_1mes', num_months) + feature_range_list(
'segmento_graduate', num_months) + feature_range_list(
'segmento_vip', num_months) + feature_range_list('cod_prov', num_months) + feature_range_list('age', num_months)
for i in indicators: cols_to_convert = cols_to_convert + feature_range_list(i, num_months)
def make_converters():
converters = {}
for c in cols_to_convert:
if rx_prefix.match(c[:4]):
if any([c[4:].startswith(token) for token in cols_as_integers]):
converters[c] = partial(col_to_intvalue, 0)
else:
converters[c] = partial(col_to_int, c[4:])
else:
converters[c] = partial(col_to_int, c)
return converters
def fillna_range(data, name, upto, v):
for i in range(1, upto + 1):
data["bd{}_{}".format(i, name)].fillna(v, inplace=True)
def read_csv(source, names=None): return pd.read_csv(source, sep=',', nrows=chunk_size, converters=make_converters(),
dtype=dtypes, names=names)
| mit |
sinhrks/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
GunoH/intellij-community | python/helpers/pydev/pydev_ipython/inputhook.py | 21 | 19415 | # coding: utf-8
"""
Inputhook management for GUI event loop integration.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import sys
import select
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
# Constants for identifying the GUI toolkits.
GUI_WX = 'wx'
GUI_QT = 'qt'
GUI_QT4 = 'qt4'
GUI_QT5 = 'qt5'
GUI_GTK = 'gtk'
GUI_TK = 'tk'
GUI_OSX = 'osx'
GUI_GLUT = 'glut'
GUI_PYGLET = 'pyglet'
GUI_GTK3 = 'gtk3'
GUI_NONE = 'none' # i.e. disable
#-----------------------------------------------------------------------------
# Utilities
#-----------------------------------------------------------------------------
def ignore_CTRL_C():
"""Ignore CTRL+C (not implemented)."""
pass
def allow_CTRL_C():
"""Take CTRL+C into account (not implemented)."""
pass
#-----------------------------------------------------------------------------
# Main InputHookManager class
#-----------------------------------------------------------------------------
class InputHookManager(object):
"""Manage PyOS_InputHook for different GUI toolkits.
This class installs various hooks under ``PyOSInputHook`` to handle
GUI event loop integration.
"""
def __init__(self):
self._return_control_callback = None
self._apps = {}
self._reset()
self.pyplot_imported = False
def _reset(self):
self._callback_pyfunctype = None
self._callback = None
self._current_gui = None
def set_return_control_callback(self, return_control_callback):
self._return_control_callback = return_control_callback
def get_return_control_callback(self):
return self._return_control_callback
def return_control(self):
return self._return_control_callback()
def get_inputhook(self):
return self._callback
def set_inputhook(self, callback):
"""Set inputhook to callback."""
# We don't (in the context of PyDev console) actually set PyOS_InputHook, but rather
# while waiting for input on xmlrpc we run this code
self._callback = callback
def clear_inputhook(self, app=None):
"""Clear input hook.
Parameters
----------
app : optional, ignored
This parameter is allowed only so that clear_inputhook() can be
called with a similar interface as all the ``enable_*`` methods. But
the actual value of the parameter is ignored. This uniform interface
makes it easier to have user-level entry points in the main IPython
app like :meth:`enable_gui`."""
self._reset()
def clear_app_refs(self, gui=None):
"""Clear IPython's internal reference to an application instance.
Whenever we create an app for a user on qt4 or wx, we hold a
reference to the app. This is needed because in some cases bad things
can happen if a user doesn't hold a reference themselves. This
method is provided to clear the references we are holding.
Parameters
----------
gui : None or str
If None, clear all app references. If ('wx', 'qt4') clear
the app for that toolkit. References are not held for gtk or tk
as those toolkits don't have the notion of an app.
"""
if gui is None:
self._apps = {}
elif gui in self._apps:
del self._apps[gui]
def enable_wx(self, app=None):
"""Enable event loop integration with wxPython.
Parameters
----------
app : WX Application, optional.
Running application to use. If not given, we probe WX for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the ``PyOS_InputHook`` for wxPython, which allows
the wxPython to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`wx.App` as
follows::
import wx
app = wx.App(redirect=False, clearSigInt=False)
"""
import wx
from distutils.version import LooseVersion as V
wx_version = V(wx.__version__).version # @UndefinedVariable
if wx_version < [2, 8]:
raise ValueError("requires wxPython >= 2.8, but you have %s" % wx.__version__) # @UndefinedVariable
from pydev_ipython.inputhookwx import inputhook_wx
self.set_inputhook(inputhook_wx)
self._current_gui = GUI_WX
if app is None:
app = wx.GetApp() # @UndefinedVariable
if app is None:
app = wx.App(redirect=False, clearSigInt=False) # @UndefinedVariable
app._in_event_loop = True
self._apps[GUI_WX] = app
return app
def disable_wx(self):
"""Disable event loop integration with wxPython.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_WX in self._apps:
self._apps[GUI_WX]._in_event_loop = False
self.clear_inputhook()
def enable_qt(self, app=None):
from pydev_ipython.qt_for_kernel import QT_API, QT_API_PYQT5
if QT_API == QT_API_PYQT5:
self.enable_qt5(app)
else:
self.enable_qt4(app)
def enable_qt4(self, app=None):
"""Enable event loop integration with PyQt4.
Parameters
----------
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Notes
-----
This methods sets the PyOS_InputHook for PyQt4, which allows
the PyQt4 to integrate with terminal based applications like
IPython.
If ``app`` is not given we probe for an existing one, and return it if
found. If no existing app is found, we create an :class:`QApplication`
as follows::
from PyQt4 import QtCore
app = QtGui.QApplication(sys.argv)
"""
from pydev_ipython.inputhookqt4 import create_inputhook_qt4
app, inputhook_qt4 = create_inputhook_qt4(self, app)
self.set_inputhook(inputhook_qt4)
self._current_gui = GUI_QT4
app._in_event_loop = True
self._apps[GUI_QT4] = app
return app
def disable_qt4(self):
"""Disable event loop integration with PyQt4.
This merely sets PyOS_InputHook to NULL.
"""
if GUI_QT4 in self._apps:
self._apps[GUI_QT4]._in_event_loop = False
self.clear_inputhook()
def enable_qt5(self, app=None):
from pydev_ipython.inputhookqt5 import create_inputhook_qt5
app, inputhook_qt5 = create_inputhook_qt5(self, app)
self.set_inputhook(inputhook_qt5)
self._current_gui = GUI_QT5
app._in_event_loop = True
self._apps[GUI_QT5] = app
return app
def disable_qt5(self):
if GUI_QT5 in self._apps:
self._apps[GUI_QT5]._in_event_loop = False
self.clear_inputhook()
def enable_gtk(self, app=None):
"""Enable event loop integration with PyGTK.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for PyGTK, which allows
the PyGTK to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk import create_inputhook_gtk
self.set_inputhook(create_inputhook_gtk(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_tk(self, app=None):
"""Enable event loop integration with Tk.
Parameters
----------
app : toplevel :class:`Tkinter.Tk` widget, optional.
Running toplevel widget to use. If not given, we probe Tk for an
existing one, and create a new one if none is found.
Notes
-----
If you have already created a :class:`Tkinter.Tk` object, the only
thing done by this method is to register with the
:class:`InputHookManager`, since creating that object automatically
sets ``PyOS_InputHook``.
"""
self._current_gui = GUI_TK
if app is None:
try:
import Tkinter as _TK
except:
# Python 3
import tkinter as _TK # @UnresolvedImport
app = _TK.Tk()
app.withdraw()
self._apps[GUI_TK] = app
from pydev_ipython.inputhooktk import create_inputhook_tk
self.set_inputhook(create_inputhook_tk(app))
return app
def disable_tk(self):
"""Disable event loop integration with Tkinter.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_glut(self, app=None):
""" Enable event loop integration with GLUT.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for GLUT, which allows the GLUT to
integrate with terminal based applications like IPython. Due to GLUT
limitations, it is currently not possible to start the event loop
without first creating a window. You should thus not create another
window but use instead the created one. See 'gui-glut.py' in the
docs/examples/lib directory.
The default screen mode is set to:
glut.GLUT_DOUBLE | glut.GLUT_RGBA | glut.GLUT_DEPTH
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from pydev_ipython.inputhookglut import glut_display_mode, \
glut_close, glut_display, \
glut_idle, inputhook_glut
if GUI_GLUT not in self._apps:
glut.glutInit(sys.argv)
glut.glutInitDisplayMode(glut_display_mode)
# This is specific to freeglut
if bool(glut.glutSetOption):
glut.glutSetOption(glut.GLUT_ACTION_ON_WINDOW_CLOSE,
glut.GLUT_ACTION_GLUTMAINLOOP_RETURNS)
glut.glutCreateWindow(sys.argv[0])
glut.glutReshapeWindow(1, 1)
glut.glutHideWindow()
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
else:
glut.glutWMCloseFunc(glut_close)
glut.glutDisplayFunc(glut_display)
glut.glutIdleFunc(glut_idle)
self.set_inputhook(inputhook_glut)
self._current_gui = GUI_GLUT
self._apps[GUI_GLUT] = True
def disable_glut(self):
"""Disable event loop integration with glut.
This sets PyOS_InputHook to NULL and set the display function to a
dummy one and set the timer to a dummy timer that will be triggered
very far in the future.
"""
import OpenGL.GLUT as glut # @UnresolvedImport
from glut_support import glutMainLoopEvent # @UnresolvedImport
glut.glutHideWindow() # This is an event to be processed below
glutMainLoopEvent()
self.clear_inputhook()
def enable_pyglet(self, app=None):
"""Enable event loop integration with pyglet.
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the ``PyOS_InputHook`` for pyglet, which allows
pyglet to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookpyglet import inputhook_pyglet
self.set_inputhook(inputhook_pyglet)
self._current_gui = GUI_PYGLET
return app
def disable_pyglet(self):
"""Disable event loop integration with pyglet.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_gtk3(self, app=None):
"""Enable event loop integration with Gtk3 (gir bindings).
Parameters
----------
app : ignored
Ignored, it's only a placeholder to keep the call signature of all
gui activation methods consistent, which simplifies the logic of
supporting magics.
Notes
-----
This methods sets the PyOS_InputHook for Gtk3, which allows
the Gtk3 to integrate with terminal based applications like
IPython.
"""
from pydev_ipython.inputhookgtk3 import create_inputhook_gtk3
self.set_inputhook(create_inputhook_gtk3(self._stdin_file))
self._current_gui = GUI_GTK
def disable_gtk3(self):
"""Disable event loop integration with PyGTK.
This merely sets PyOS_InputHook to NULL.
"""
self.clear_inputhook()
def enable_mac(self, app=None):
""" Enable event loop integration with MacOSX.
We call function pyplot.pause, which updates and displays active
figure during pause. It's not MacOSX-specific, but it enables to
avoid inputhooks in native MacOSX backend.
Also we shouldn't import pyplot, until user does it. Cause it's
possible to choose backend before importing pyplot for the first
time only.
"""
def inputhook_mac(app=None):
if self.pyplot_imported:
pyplot = sys.modules['matplotlib.pyplot']
try:
pyplot.pause(0.01)
except:
pass
else:
if 'matplotlib.pyplot' in sys.modules:
self.pyplot_imported = True
self.set_inputhook(inputhook_mac)
self._current_gui = GUI_OSX
def disable_mac(self):
self.clear_inputhook()
def current_gui(self):
"""Return a string indicating the currently active GUI or None."""
return self._current_gui
inputhook_manager = InputHookManager()
enable_wx = inputhook_manager.enable_wx
disable_wx = inputhook_manager.disable_wx
enable_qt = inputhook_manager.enable_qt
enable_qt4 = inputhook_manager.enable_qt4
disable_qt4 = inputhook_manager.disable_qt4
enable_qt5 = inputhook_manager.enable_qt5
disable_qt5 = inputhook_manager.disable_qt5
enable_gtk = inputhook_manager.enable_gtk
disable_gtk = inputhook_manager.disable_gtk
enable_tk = inputhook_manager.enable_tk
disable_tk = inputhook_manager.disable_tk
enable_glut = inputhook_manager.enable_glut
disable_glut = inputhook_manager.disable_glut
enable_pyglet = inputhook_manager.enable_pyglet
disable_pyglet = inputhook_manager.disable_pyglet
enable_gtk3 = inputhook_manager.enable_gtk3
disable_gtk3 = inputhook_manager.disable_gtk3
enable_mac = inputhook_manager.enable_mac
disable_mac = inputhook_manager.disable_mac
clear_inputhook = inputhook_manager.clear_inputhook
set_inputhook = inputhook_manager.set_inputhook
current_gui = inputhook_manager.current_gui
clear_app_refs = inputhook_manager.clear_app_refs
# We maintain this as stdin_ready so that the individual inputhooks
# can diverge as little as possible from their IPython sources
stdin_ready = inputhook_manager.return_control
set_return_control_callback = inputhook_manager.set_return_control_callback
get_return_control_callback = inputhook_manager.get_return_control_callback
get_inputhook = inputhook_manager.get_inputhook
# Convenience function to switch amongst them
def enable_gui(gui=None, app=None):
"""Switch amongst GUI input hooks by name.
This is just a utility wrapper around the methods of the InputHookManager
object.
Parameters
----------
gui : optional, string or None
If None (or 'none'), clears input hook, otherwise it must be one
of the recognized GUI names (see ``GUI_*`` constants in module).
app : optional, existing application object.
For toolkits that have the concept of a global app, you can supply an
existing one. If not given, the toolkit will be probed for one, and if
none is found, a new one will be created. Note that GTK does not have
this concept, and passing an app if ``gui=="GTK"`` will raise an error.
Returns
-------
The output of the underlying gui switch routine, typically the actual
PyOS_InputHook wrapper object or the GUI toolkit app created, if there was
one.
"""
if get_return_control_callback() is None:
raise ValueError("A return_control_callback must be supplied as a reference before a gui can be enabled")
guis = {GUI_NONE: clear_inputhook,
GUI_OSX: enable_mac,
GUI_TK: enable_tk,
GUI_GTK: enable_gtk,
GUI_WX: enable_wx,
GUI_QT: enable_qt,
GUI_QT4: enable_qt4,
GUI_QT5: enable_qt5,
GUI_GLUT: enable_glut,
GUI_PYGLET: enable_pyglet,
GUI_GTK3: enable_gtk3,
}
try:
gui_hook = guis[gui]
except KeyError:
if gui is None or gui == '':
gui_hook = clear_inputhook
else:
e = "Invalid GUI request %r, valid ones are:%s" % (gui, guis.keys())
raise ValueError(e)
return gui_hook(app)
__all__ = [
"GUI_WX",
"GUI_QT",
"GUI_QT4",
"GUI_QT5",
"GUI_GTK",
"GUI_TK",
"GUI_OSX",
"GUI_GLUT",
"GUI_PYGLET",
"GUI_GTK3",
"GUI_NONE",
"ignore_CTRL_C",
"allow_CTRL_C",
"InputHookManager",
"inputhook_manager",
"enable_wx",
"disable_wx",
"enable_qt",
"enable_qt4",
"disable_qt4",
"enable_qt5",
"disable_qt5",
"enable_gtk",
"disable_gtk",
"enable_tk",
"disable_tk",
"enable_glut",
"disable_glut",
"enable_pyglet",
"disable_pyglet",
"enable_gtk3",
"disable_gtk3",
"enable_mac",
"disable_mac",
"clear_inputhook",
"set_inputhook",
"current_gui",
"clear_app_refs",
"stdin_ready",
"set_return_control_callback",
"get_return_control_callback",
"get_inputhook",
"enable_gui"]
| apache-2.0 |
rs2/pandas | pandas/tests/generic/methods/test_reorder_levels.py | 2 | 2804 | import numpy as np
import pytest
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestReorderLevels:
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_reorder_levels(self, klass):
index = MultiIndex(
levels=[["bar"], ["one", "two", "three"], [0, 1]],
codes=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]],
names=["L0", "L1", "L2"],
)
df = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=index)
obj = df if klass is DataFrame else df["A"]
# no change, position
result = obj.reorder_levels([0, 1, 2])
tm.assert_equal(obj, result)
# no change, labels
result = obj.reorder_levels(["L0", "L1", "L2"])
tm.assert_equal(obj, result)
# rotate, position
result = obj.reorder_levels([1, 2, 0])
e_idx = MultiIndex(
levels=[["one", "two", "three"], [0, 1], ["bar"]],
codes=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]],
names=["L1", "L2", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels([0, 0, 0])
e_idx = MultiIndex(
levels=[["bar"], ["bar"], ["bar"]],
codes=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]],
names=["L0", "L0", "L0"],
)
expected = DataFrame({"A": np.arange(6), "B": np.arange(6)}, index=e_idx)
expected = expected if klass is DataFrame else expected["A"]
tm.assert_equal(result, expected)
result = obj.reorder_levels(["L0", "L0", "L0"])
tm.assert_equal(result, expected)
def test_reorder_levels_swaplevel_equivalence(
self, multiindex_year_month_day_dataframe_random_data
):
ymd = multiindex_year_month_day_dataframe_random_data
result = ymd.reorder_levels(["month", "day", "year"])
expected = ymd.swaplevel(0, 1).swaplevel(1, 2)
tm.assert_frame_equal(result, expected)
result = ymd["A"].reorder_levels(["month", "day", "year"])
expected = ymd["A"].swaplevel(0, 1).swaplevel(1, 2)
tm.assert_series_equal(result, expected)
result = ymd.T.reorder_levels(["month", "day", "year"], axis=1)
expected = ymd.T.swaplevel(0, 1, axis=1).swaplevel(1, 2, axis=1)
tm.assert_frame_equal(result, expected)
with pytest.raises(TypeError, match="hierarchical axis"):
ymd.reorder_levels([1, 2], axis=1)
with pytest.raises(IndexError, match="Too many levels"):
ymd.index.reorder_levels([1, 2, 3])
| bsd-3-clause |
markr622/moose | examples/ex14_pps/plot.py | 14 | 1194 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import csv
# Python 2.7 does not have str.isnumeric()?
def isInt(string):
try:
int(string)
return True
except ValueError:
return False
# Format of the CSV file is:
# time,dofs,integral
# 1,221,2.3592493758695,
# 2,841,0.30939803328432,
# 3,3281,0.088619511656913,
# 4,12961,0.022979021365857,
# 5,51521,0.0057978748995635,
# 6,205441,0.0014528130907967,
reader = csv.reader(file('out.csv'))
dofs = []
errs = []
for row in reader:
if row and isInt(row[0]): # Skip rows that don't start with numbers.
dofs.append(int(row[1]))
errs.append(float(row[2]))
# Construct data to be plotted
xdata = np.log10(np.sqrt(dofs))
ydata = np.log10(errs)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot(xdata, ydata, 'bo-')
ax1.set_xlabel('log (1/h)')
ax1.set_ylabel('log (L2-error)')
# Create linear curve fits of the data, but just the last couple data
# point when we are in the asymptotic regime.
fit = np.polyfit(xdata[2:-1], ydata[2:-1], 1)
fit_msg = 'Slope ~ ' + '%.2f' % fit[0]
ax1.text(2.0, -1.0, fit_msg)
plt.savefig('plot.pdf', format='pdf')
# Local Variables:
# python-indent: 2
# End:
| lgpl-2.1 |
gdementen/xlwings | xlwings/conversion/__init__.py | 2 | 1220 | # -*- coding: utf-8 -*-
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
from .framework import ConversionContext, Options, Pipeline, Converter, accessors, Accessor
from .standard import (DictConverter, Accessor, RangeAccessor, RawValueAccessor, ValueAccessor,
AdjustDimensionsStage, CleanDataForWriteStage, CleanDataFromReadStage, Ensure2DStage,
ExpandRangeStage, ReadValueFromRangeStage, TransposeStage, WriteValueToRangeStage,
Options, Pipeline)
if np:
from .numpy_conv import NumpyArrayConverter
if pd:
from .pandas_conv import PandasDataFrameConverter, PandasSeriesConverter
def read(rng, value, options):
convert = options.get('convert', None)
pipeline = accessors.get(convert, convert).reader(options)
ctx = ConversionContext(rng=rng, value=value)
pipeline(ctx)
return ctx.value
def write(value, rng, options):
convert = options.get('convert', None)
pipeline = accessors.get(convert, convert).router(value, rng, options).writer(options)
ctx = ConversionContext(rng=rng, value=value)
pipeline(ctx)
return ctx.value
| bsd-3-clause |
jdvelasq/pytimeseries | pytimeseries/pytimeseries_test.py | 1 | 1410 | """
Test for pytimeseries library
"""
from transformer import transformer
from AR import AR
from AR import AR_Ridge_2
from AR import AR_Lasso
from AR import AR_ElasticNet
from HoltWinters import HoltWinters
import pandas
import matplotlib
ts = pandas.Series.from_csv('champagne.csv', index_col = 0, header = 0)
#ts = pandas.Series.from_csv('champagne_short.csv', index_col = 0, header = 0)
#mytransform = transformer(trans = 'boxcox')
#transformed = mytransform.fit_transform(ts)
#model = AR(p = 3)
#model = model.fit(transformed)
#ahead = model.forecast(transformed, periods = 2)
#original = mytransform.restore(ahead)
#model_1 = AR_Lasso(p = 3)
#model_1 = model_1.fit(ts)
#result_1 = model_1.predict(ts)
#model_2 = AR(p = 3)
#model_2 = model_2.fit(ts)
#result_2 = model_2.predict(ts)
#model_3 = AR_Ridge_2(p = 3, alpha=0.1)
#model_3 = model_3.fit(ts)
#result_3 = model_3.predict(ts)
#model_4 = AR_Ridge_2(p = 3, alpha=0.5)
#model_4 = model_4.fit(ts)
#result_4 = model_4.predict(ts)
model = HoltWinters(alpha = 0.9, beta = False, gamma = False)
result = model.predict(ts)
model_2 = HoltWinters(alpha = 0.9, beta = 0.1, gamma = False)
result_2 = model_2.predict(ts)
matplotlib.pyplot.plot(ts)
matplotlib.pyplot.plot(result)
#matplotlib.pyplot.plot(result_1)
matplotlib.pyplot.plot(result_2)
#matplotlib.pyplot.plot(result_3)
#matplotlib.pyplot.plot(result_4)
matplotlib.pyplot.show()
| mit |
Kate-Willett/Climate_Explorer | PYTHON/PlotAllRegionsTimeSeries_APR2015.py | 1 | 27217 | #!/usr/local/sci/bin/python
#***************************************
# 27 April 2015 KMW - v1
# For TEMPERATURE ONLY
# Plots a time series for any or all regions
# Option to overplot the same region from another source too
# Option to add trends of each source
# Option to add correlation of each source
#
#************************************************************************
# START
#************************************************************************
# USE python2.7
# python2.7 PlotAllRegionsTimeSeries_APR2015.py
#
# REQUIRES
# RandomsRanges.py
# LinearTrends.py
#************************************************************************
# Set up python imports
import matplotlib.pyplot as plt
import matplotlib.colors as mc
import matplotlib.cm as mpl_cm
import numpy.ma as ma
import numpy as np
import sys, os
import scipy.stats
import struct
import os.path
import math
from mpl_toolkits.basemap import Basemap
import datetime as dt
from matplotlib.dates import date2num,num2date
#from netCDF4 import Dataset
from scipy.io import netcdf
from scipy.stats.stats import pearsonr
from RandomsRanges import LetterRange
from LinearTrends import MedianPairwise
# Set up initial run choices
timetype='monthly' #'monthly', 'annual'
fitlin=True # False - fit a linear trend?
addcor=True # False - correlate the data?
plotdiff=True
# What are the units?
unitees='$^{o}$C' #'deg C'
# Number of sources to plot
nsources=6 # this can be one to three
# Source Names for Title
Namey=['Globe (70$^{o}$S-70$^{o}$N)','N. Hemisphere (20$^{o}$N-70$^{o}$N)','Tropics (20$^{o}$S-20$^{o}$N)','S. Hemisphere (70$^{o}$S-20$^{o}$S)']
SourceNames=['HadISDH.2.0.1.2014p (adj)','HadISDH.2.0.1.2014p (raw)','CRUTEM4.3.0.0','GHCNM3','GISTEMP','BERKELEY EARTH']
ShortSourceNames=['HadISDH adj','HadISDH raw','CRUTEM','GHCNM','GISS','BERKELEY']
# Infiles
INFIL1='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/HadISDH.landT.2.0.1.2014p_FLATgridIDPHA5by5_JAN2015_areaTS_19732014.nc'
INFIL2='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/HadISDH.landT.2.0.1.2014p_FLATgridRAW5by5_JAN2015_areaTS_19732014.nc'
INFIL3='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/CRUTEM4_T_areaTS_19732014.nc' #CRUTEM.4.3.0.0.anomalies_areaTS_18502014.nc
INFIL4='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/GHCNM_T_areaTS_19732014.nc' #GHCNM_18802014_areaTS_18802014.nc
INFIL5='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/GISS_T_areaTS_19732014.nc'
INFIL6='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/BERKELEY_T_areaTS_19732014.nc'
INFIL9='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/CRUTEM4_T_HadISDHMASKareaTS_19732014.nc' #CRUTEM.4.3.0.0.anomalies_areaTSMSK7605_19732014.nc
INFIL10='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/GHCNM_T_HadISDHMASKareaTS_19732014.nc' #GHCNM_18802014_areaTSMSK7605_19732014.nc
INFIL11='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/GISS_T_HadISDHMASKareaTS_19732014.nc'
INFIL12='/data/local/hadkw/HADCRUH2/UPDATE2014/STATISTICS/TIMESERIES/BERKELEY_T_HadISDHMASKareaTS_19732014.nc'
infilee=list([INFIL1,INFIL2,INFIL3,INFIL4,INFIL5,INFIL6,INFIL1,INFIL2,INFIL9,INFIL10,INFIL11,INFIL12])
# What gridbox to pull out (use long (-180 to 180) and lat (-90 to 90) centres?
# If this is greater than one element then make multiple time series
nREGs=4
Regions=list(['glob_T_anoms','nhem_T_anoms','trop_T_anoms','shem_T_anoms'])
RegionsOTH=list(['glob_anoms','nhem_anoms','trop_anoms','shem_anoms'])
OtherInfo=list([]) # could be trends or correlations or ratios
# Outfiles
if (plotdiff):
OUTFIL='/data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/OTHER/AllRegionSourceTimeSeriesDIFFSHadISDH.landT.2.0.1.2014p' # add long box, lat box and .eps, .png
else:
OUTFIL='/data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/OTHER/AllRegionSourceTimeSeriesHadISDH.landT.2.0.1.2014p' # add long box, lat box and .eps, .png
OUTFILC='/data/local/hadkw/HADCRUH2/UPDATE2014/IMAGES/OTHER/AllCorrsMatrixHadISDH.landT.2.0.1.2014p' # correlation matrix for each region
# Set up time info
#styr=list([1973,1973,1973,1850,1880,1973,1973,1973,1973,1973,1973,1973,1973]) # actual start year, start year of source 1, start year of source 2 etc.
styr=list([1973,1973,1973,1973,1973,1973,1973,1973,1973,1973,1973,1973,1973]) # actual start year, start year of source 1, start year of source 2 etc.
edyr=list([2014,2014,2014,2014,2014,2014,2014,2014,2014,2014,2014,2014,2014]) # actual end year, end year of source 1, end year of source 2 etc.
#styr=list([1973,1973,1973,1973,1973]) # actual start year, start year of source 1, start year of source 2 etc.
#edyr=list([2014,2014,2014,2014,2014]) # actual end year, end year of source 1, end year of source 2 etc.
nyrs=(edyr[0]-styr[0])+1
nmons=(nyrs)*12
climst=1981
climed=2010
stcl=climst-styr[0]
edcl=climed-styr[0]
# Set up variables
mdi=-1e30
#************************************************************************
# Subroutines
#************************************************************************
# ExtractREG
def ExtractREG(FileName,TheRegions,REGarr,YrStart,YrEnd,TheRCount,ThisSource):
''' Read in netCDF grid and pull out specific region time series '''
st=(YrStart[0]-YrStart[ThisSource+1])*12
# Check when source start is later than desired start
if st < 0:
st=0
ed=((YrEnd[0]+1)-YrStart[ThisSource+1])*12
print(st,ed)
f=netcdf.netcdf_file(FileName,'r')
for loo in range(TheRCount):
var=f.variables[TheRegions[loo]]
tmpvar=np.array(np.transpose(var.data[:]))
# Can cope when source begins earlier or later than desired start
REGarr[loo,len(REGarr[loo,:])-(ed-st):]=tmpvar[st:ed+1]
f.close()
return REGarr # ExtractREG
#************************************************************************
# CALCANOMS
def CalcAnoms(REGarr,StClim,EdClim,TheMDI,TheRCount):
''' Calculate climatology for each month if 50% of months present '''
''' Get climate anomalies for all months '''
for loo in range(TheRCount):
tmparr=np.reshape(REGarr[loo,:],(len(REGarr[loo,:])/12,12))
for mm in range(12):
subarr=tmparr[:,mm]
climarr=tmparr[StClim:EdClim+1,mm]
climgots=np.where(climarr != TheMDI)[0]
gots=np.where(subarr != TheMDI)[0]
if len(climgots) >= 15:
subarr[gots]=subarr[gots]-np.mean(climarr[climgots])
else:
subarr[:]=TheMDI
tmparr[:,mm]=subarr
REGarr[loo,:]=np.reshape(tmparr,np.size(tmparr))
return REGarr # CalcAnoms
#************************************************************************
# AVERAGEANNUALS
def AverageAnnuals(REGarr,TheMDI,TheRCount):
''' Calculate the annual mean anomalies where at least 75 % of months are present '''
AnnArr=np.empty((TheRCount,len(REGarr[0,:])/12))
AnnArr.fill(TheMDI)
for loo in range(TheRCount):
tmparr=np.reshape(REGarr[loo,:],(len(REGarr[loo,:])/12,12))
for yy in range(len(AnnArr[loo,:])):
subarr=tmparr[yy,:]
gots=np.where(subarr != TheMDI)[0]
if len(gots) >= 9:
AnnArr[loo,yy]=np.mean(subarr[gots])
return AnnArr # AverageAnnuals
#************************************************************************
# FITTREND
def FitTrend(REGarr,TypeTime,TheMDI,TheRCount):
''' Use MedianPairwise to fit a trend '''
''' return the 5th, median and 95th percentile slopes '''
''' If annual then multiply by 10, if monthly then multiply by 120 '''
TrendStats=np.empty((TheRCount,3))
for loo in range(TheRCount):
TrendStats[loo,:]=MedianPairwise(REGarr[loo,:],TheMDI,TrendStats[loo,:])
if TypeTime == 'monthly':
TrendStats[loo,:]=np.array(TrendStats[loo,:])*120.
else:
TrendStats[loo,:]=np.array(TrendStats[loo,:])*10.
return TrendStats # FitTrend
#************************************************************************
# GETCORR
def GetCorr(ALLarr,TypeTime,TheMDI,TheRCount,TheSCount):
''' Get simple correlation over months present '''
CorrCount=sum(range(TheSCount)) # number of unique correlating pairs
CorrStats=np.empty((CorrCount,TheRCount))
point1=0 # source number
point2=1 # source number
for cloo in range(CorrCount):
print('Source points: ',point1,point2)
for rloo in range(TheRCount):
tmp1=ALLarr[point1,rloo,:]
tmp2=ALLarr[point2,rloo,:]
gots=np.where((tmp1 > TheMDI) & (tmp2 > TheMDI))
CorrStats[cloo,rloo]=np.corrcoef(tmp1[gots],tmp2[gots])[0,1]
point2=point2+1
if (point2 == TheSCount): # loop through
point1=point1+1
point2=1+point1
return CorrStats # GetCorr
#************************************************************************
# PlotTimeSeries
def PlotTimeSeries(TheFile,ALLarr,trendALL,corrALL,ALLarrMSK,trendALLMSK,corrALLMSK,ExtraInfo,TheUnits,
MonCount,YrCount,TypeTime,YrStart,YrEnd,TheMDI,SourceCount,
Titlee,ShortNameSource,TheRCount):
''' Plot a multi-panel and overlay the time series '''
''' Add a legend with colours and sources '''
''' Annotate with trends and otherinfo '''
''' This will be an 8 panel plot - Non-masked on the left, masked on the right '''
''' Save as png and eps '''
# for clearer plotting
MonCount=MonCount+1
YrCount=YrCount+1
# Mask the time series
MSKALLarr=ma.masked_where(ALLarr <= -999.,ALLarr) # a fudge here as floating point precision issues in MDI
MSKALLarrMSK=ma.masked_where(ALLarrMSK <= -999.,ALLarrMSK) # a fudge here as floating point precision issues in MDI
# set up x axes
if TypeTime == 'monthly':
TheMonths=[]
yr=YrStart[0]
mon=1
for m in range(MonCount):
TheMonths.append(dt.date(yr,mon,1))
mon=mon+1
if mon == 13:
mon=1
yr=yr+1
TheMonths=np.array(TheMonths)
else:
TheMonths=[]
yr=YrStart[0]
mon=1
for y in range(YrCount):
TheMonths.append(dt.date(yr,mon,1))
yr=yr+1
TheMonths=np.array(TheMonths)
xtitlee='Years'
ytitlee='Anomalies ('+TheUnits+')'
# set up number of panels and number of lines
nplots=TheRCount
print('PLOT NUMBERS: ',nplots)
nlines=[]
for n in range(nplots):
nlines.append(SourceCount)
Letteree=[]
Letteree=LetterRange(0,nplots*2)
# set up dimensions and plot - this is a 2 column nvar rows plot
xpos=[]
ypos=[]
xfat=[]
ytall=[]
totalyspace=0.90 # start 0.08 end 0.98
totalxspace=0.40 # start 0.12 end 0.98
for n in range(nplots):
xpos.append(0.07)
ypos.append(0.98-((n+1)*(totalyspace/nplots)))
xfat.append(totalxspace)
ytall.append(totalyspace/nplots)
# set up dimensions and plot - this is a 3 column nvar rows plot
if (SourceCount == 6):
cols=['DimGrey','Firebrick','DodgerBlue','DarkOrange','MediumBlue','HotPink']
else:
cols=['Firebrick','DodgerBlue','DarkOrange','MediumBlue','HotPink']
f,axarr=plt.subplots(TheRCount*2,figsize=(16,12),sharex=False) #6,18
for pp in range(nplots):
print('Plot: ',pp)
#axarr[pp].set_size(14)
axarr[pp].set_position([xpos[pp],ypos[pp],xfat[pp],ytall[pp]])
if TypeTime == 'monthly':
axarr[pp].set_xlim([TheMonths[0],TheMonths[MonCount-1]])
else:
axarr[pp].set_xlim([TheMonths[0],TheMonths[YrCount-1]])
if pp < nplots-1:
axarr[pp].set_xticklabels([])
miny=(math.floor(10.*np.min(np.append(MSKALLarr,MSKALLarrMSK))))/10.
maxy=(math.ceil(10.*np.max(np.append(MSKALLarr,MSKALLarrMSK))))/10.
axarr[pp].set_ylim([miny,maxy])
if TypeTime == 'monthly':
for sn in range(SourceCount):
axarr[pp].plot(TheMonths[0:len(TheMonths)-1],MSKALLarr[sn,pp,:],c=cols[sn],linewidth=1)
else:
for sn in range(SourceCount):
axarr[pp].plot(TheMonths[0:len(TheMonths)-1],MSKALLarr[sn,pp,:],c=cols[sn],linewidth=2)
axarr[pp].annotate(Letteree[pp]+') '+Titlee[pp],xy=(0.02,0.90),xycoords='axes fraction',size=16)
# get linear trend and annotate (does this work with masked arrays?)
if trendALL[0,pp,0] != TheMDI:
scaletrend=([0.32,0.26,0.2,0.14,0.08,0.02]) #([0.88,0.82,0.76,0.70])
if (SourceCount == 5):
scaletrend=scaletrend=([0.26,0.2,0.14,0.08,0.02])
for sn in range(SourceCount):
linstr='{0:5.2f}'.format(trendALL[sn,pp,0])+' ('+'{0:5.2f}'.format(trendALL[sn,pp,1])+','+'{0:5.2f}'.format(trendALL[sn,pp,2])+') '+'{:s}'.format(TheUnits)+' dec$^{-1}$'
namstr=ShortNameSource[sn]+' ('+'{0:5.2f}'.format(np.std(MSKALLarr[sn,pp,:]))+')'
#linstr="%19s %5.2f (%5.2f to %5.2f) %s decade$^{-1}$ " % (NameSource[sn],trendGB[sn,0],trendGB[sn,1],trendGB[sn,2],TheUnits)
axarr[pp].annotate(namstr,xy=(0.5,scaletrend[sn]),xycoords='axes fraction',size=9,ha='left',color=cols[sn])
axarr[pp].annotate(linstr,xy=(0.72,scaletrend[sn]),xycoords='axes fraction',size=9,ha='left',color=cols[sn])
## get correlation and annotate (does this work with masked arrays?)
# if corrALL[0,pp] != TheMDI:
# scaletrend=([0.02,0.08,0.14,0.2,0.26,0.32])
# point1=0
# point2=1
# for sn in range(len(corrALL[:,0])):
# linstr='r = '+'{0:6.3f}'.format(corrALL[sn,pp])
# #linstr="%19s %5.2f (%5.2f to %5.2f) %s decade$^{-1}$ " % (NameSource[sn],trendGB[sn,0],trendGB[sn,1],trendGB[sn,2],TheUnits)
# axarr[pp].annotate(ShortNameSource[point1]+','+ShortNameSource[point2],xy=(0.6,scaletrend[sn]),xycoords='axes fraction',size=10,ha='left',color='Black')
# axarr[pp].annotate(linstr,xy=(0.85,scaletrend[sn]),xycoords='axes fraction',size=10,ha='left',color='Black')
# point2=point2+1
# if (point2 == SourceCount): # loop through
# point1=point1+1
# point2=1+point1
# Plot a line at zero
if TypeTime == 'monthly':
axarr[pp].hlines(0,TheMonths[0],TheMonths[MonCount-1],color='black',linewidth=0.5)
else:
axarr[pp].hlines(0,TheMonths[0],TheMonths[YrCount-1],color='black',linewidth=0.5)
axarr[pp].set_ylabel(ytitlee,fontsize=12)
axarr[pp].set_xlabel(xtitlee,fontsize=12)
# NOW DO THE MASKED PLOTS
# set up dimensions and plot - this is a 2 column nvar rows plot
xpos=[]
ypos=[]
xfat=[]
ytall=[]
totalyspace=0.90 # start 0.08 end 0.98
totalxspace=0.40 # start 0.12 end 0.98
for n in range(nplots):
xpos.append(0.57)
ypos.append(0.98-((n+1)*(totalyspace/nplots)))
xfat.append(totalxspace)
ytall.append(totalyspace/nplots)
# set up dimensions and plot - this is a 3 column nvar rows plot
# f,axarr=plt.subplots(TheRCount*2,figsize=(16,12),sharex=False) #6,18
for pp in range(nplots):
print('Plot: ',pp)
#axarr[pp].set_size(14)
axarr[pp+4].set_position([xpos[pp],ypos[pp],xfat[pp],ytall[pp]])
if TypeTime == 'monthly':
axarr[pp+4].set_xlim([TheMonths[0],TheMonths[MonCount-1]])
else:
axarr[pp+4].set_xlim([TheMonths[0],TheMonths[YrCount-1]])
if pp < nplots-1:
axarr[pp+4].set_xticklabels([])
# miny=(math.floor(10.*np.min(MSKALLarr)))/10.
# maxy=(math.ceil(10.*np.max(MSKALLarr)))/10.
axarr[pp+4].set_ylim([miny,maxy])
if TypeTime == 'monthly':
for sn in range(SourceCount):
axarr[pp+4].plot(TheMonths[0:len(TheMonths)-1],MSKALLarrMSK[sn,pp,:],c=cols[sn],linewidth=1)
else:
for sn in range(SourceCount):
axarr[pp+4].plot(TheMonths[0:len(TheMonths)-1],MSKALLarrMSK[sn,pp,:],c=cols[sn],linewidth=2)
axarr[pp+4].annotate(Letteree[pp+4]+') '+Titlee[pp],xy=(0.02,0.9),xycoords='axes fraction',size=16)
# get linear trend and annotate (does this work with masked arrays?)
if trendALLMSK[0,pp,0] != TheMDI:
scaletrend=([0.32,0.26,0.2,0.14,0.08,0.02]) #([0.88,0.82,0.76,0.70])
if (SourceCount == 5):
scaletrend=scaletrend=([0.26,0.2,0.14,0.08,0.02])
for sn in range(SourceCount):
if (sn > 1):
mush=' (masked)'
else:
mush=''
linstr='{0:5.2f}'.format(trendALLMSK[sn,pp,0])+' ('+'{0:5.2f}'.format(trendALLMSK[sn,pp,1])+','+'{0:5.2f}'.format(trendALLMSK[sn,pp,2])+') '+'{:s}'.format(TheUnits)+' dec$^{-1}$'
namstr=ShortNameSource[sn]+' ('+'{0:5.2f}'.format(np.std(MSKALLarrMSK[sn,pp,:]))+')'
#linstr="%19s %5.2f (%5.2f to %5.2f) %s decade$^{-1}$ " % (NameSource[sn],trendGB[sn,0],trendGB[sn,1],trendGB[sn,2],TheUnits)
axarr[pp+4].annotate(namstr,xy=(0.5,scaletrend[sn]),xycoords='axes fraction',size=9,ha='left',color=cols[sn])
axarr[pp+4].annotate(linstr,xy=(0.72,scaletrend[sn]),xycoords='axes fraction',size=9,ha='left',color=cols[sn])
## get correlation and annotate (does this work with masked arrays?)
# if corrALL[0,pp] != TheMDI:
# scaletrend=([0.02,0.08,0.14,0.2,0.26,0.32])
# point1=0
# point2=1
# for sn in range(len(corrALL[:,0])):
# linstr='r = '+'{0:6.3f}'.format(corrALL[sn,pp])
# #linstr="%19s %5.2f (%5.2f to %5.2f) %s decade$^{-1}$ " % (NameSource[sn],trendGB[sn,0],trendGB[sn,1],trendGB[sn,2],TheUnits)
# axarr[pp+4].annotate(ShortNameSource[point1]+','+ShortNameSource[point2],xy=(0.6,scaletrend[sn]),xycoords='axes fraction',size=10,ha='left',color='Black')
# axarr[pp+4].annotate(linstr,xy=(0.85,scaletrend[sn]),xycoords='axes fraction',size=10,ha='left',color='Black')
# point2=point2+1
# if (point2 == SourceCount): # loop through
# point1=point1+1
# point2=1+point1
# Plot a line at zero
if TypeTime == 'monthly':
axarr[pp+4].hlines(0,TheMonths[0],TheMonths[MonCount-1],color='black',linewidth=0.5)
else:
axarr[pp+4].hlines(0,TheMonths[0],TheMonths[YrCount-1],color='black',linewidth=0.5)
axarr[pp+4].set_ylabel(ytitlee,fontsize=12)
axarr[pp+4].set_xlabel(xtitlee,fontsize=12)
# Figure Watermark and Labels
# watermarkstring="/".join(os.getcwd().split('/')[4:])+'/'+os.path.basename( __file__ )+" "+dt.datetime.strftime(dt.datetime.now(), "%d-%b-%Y %H:%M")
# plt.figtext(0.01,0.01,watermarkstring,size=6)
#plt.show()
plt.savefig(TheFile+".eps")
plt.savefig(TheFile+".png")
return #PlotTimeSeries
#************************************************************************
# PlotCorrMatrix
def PlotCorrMatrix(Filee,CorrAll,CorrAllMSK,TheRegions,TheNames):
''' This plots a correlation matrix for each region '''
''' Upper triangle is non-masked '''
''' Lower triangle is masked to HadISDH '''
''' Shades of YlOrRd from 0.8 to 1 '''
# make matrix for each region
RegCount=len(TheRegions)
SourceCount=len(TheNames)
GlobMat=np.empty((SourceCount,SourceCount))
NHemMat=np.empty((SourceCount,SourceCount))
TropMat=np.empty((SourceCount,SourceCount))
SHemMat=np.empty((SourceCount,SourceCount))
np.fill_diagonal(GlobMat,1.)
np.fill_diagonal(NHemMat,1.)
np.fill_diagonal(TropMat,1.)
np.fill_diagonal(SHemMat,1.)
print('FILLED DIAGONALS')
point1=0
point2=1
for cc in range(len(CorrAll)):
print(cc,point1,point2)
GlobMat[point1,point2]=CorrAll[cc,0]
NHemMat[point1,point2]=CorrAll[cc,1]
TropMat[point1,point2]=CorrAll[cc,2]
SHemMat[point1,point2]=CorrAll[cc,3]
GlobMat[point2,point1]=CorrAllMSK[cc,0]
NHemMat[point2,point1]=CorrAllMSK[cc,1]
TropMat[point2,point1]=CorrAllMSK[cc,2]
SHemMat[point2,point1]=CorrAllMSK[cc,3]
point2=point2+1
if (point2 == 6):
point1=point1+1
point2=point1+1
print('GOT MATRICES')
# set up colours
cmap=plt.get_cmap('YlOrRd')
cmaplist=[cmap(i) for i in range(cmap.N)]
print('CHOSEN COLOURS')
# remove the darkest and lightest (white and black) - and reverse
for loo in range(30):
cmaplist.remove(cmaplist[0])
cmap=cmap.from_list('this_cmap',cmaplist,cmap.N)
print('REFINED COLOURS')
bounds=np.array([0.9,0.91,0.92,0.93,0.94,0.95,0.96,0.97,0.98,0.99,1,1.00001])
strbounds=["%4.3g" % i for i in bounds]
norm=mpl_cm.colors.BoundaryNorm(bounds,cmap.N)
print('SORTED COLOURS')
# set up plot space and dimensions and plot - this is a 2 column nvar rows plot
# xpos=list([0.12,0.62,0.12,0.62])
# ypos=list([0.54,0.54,0.08,0.08])
# xfat=list([0.36,0.36,0.36,0.36])
# ytall=list([0.32,0.32,0.32,0.32])
xpos=list([0.02,0.52,0.02,0.52])
ypos=list([0.66,0.66,0.2,0.2])
xfat=list([0.36,0.36,0.36,0.36])
ytall=list([0.30,0.3,0.3,0.3])
miny=0
maxy=6
xinds=range(7)
yinds=range(6,-1,-1)
ArrX,ArrY=np.meshgrid(xinds,yinds)
Letteree=[]
Letteree=LetterRange(0,len(TheRegions))
f,axarr=plt.subplots(len(TheRegions),figsize=(12,13),sharex=False) #6,18
for pp in range(len(TheRegions)):
print('Plot: ',pp)
#axarr[pp].set_size(14)
axarr[pp].set_position([xpos[pp],ypos[pp],xfat[pp],ytall[pp]])
axarr[pp].set_xticklabels([])
axarr[pp].set_yticklabels([])
axarr[pp].set_ylim([miny,maxy])
axarr[pp].set_xlim([miny,maxy])
if (pp == 0):
ChosenMat=GlobMat
if (pp == 1):
ChosenMat=NHemMat
if (pp == 2):
ChosenMat=TropMat
if (pp == 3):
ChosenMat=SHemMat
print('MAKING THE GRIDS')
print(ChosenMat)
grids=axarr[pp].pcolor(ArrX,ArrY,ChosenMat,cmap=cmap,norm=norm, edgecolor='0.2',linewidth=0.5) #,latlon='TRUE'
for sn in range(len(TheNames)):
NamePos=sn*(1./6.)+(1./12.)
Topsies=-0.02
Sidesies=1.02
print(sn,NamePos,Topsies,Sidesies)
axarr[pp].annotate(TheNames[sn],xy=(NamePos,Topsies),xycoords='axes fraction',rotation=90,size=12,ha='center',va='top')
axarr[pp].annotate(TheNames[sn],xy=(Sidesies,1-NamePos),xycoords='axes fraction',rotation=0,size=12,ha='left',va='center')
axarr[pp].annotate(Letteree[pp]+') '+TheRegions[pp],xy=(0.0,1.03),xycoords='axes fraction',size=16)
# loop through plots (a to d) label sources
# add colour bar along righthandside
cbax=f.add_axes([0.1,0.05,0.8,0.03])
cb=f.colorbar(grids,cax=cbax,orientation='horizontal',ticks=bounds) #, extend=extend
cb.ax.tick_params(labelsize=12)
cb.ax.set_xticklabels(strbounds)
plt.figtext(0.5,0.01,'Correlation',size=16,ha='center')
# save to file
#plt.show()
plt.savefig(Filee+".eps")
plt.savefig(Filee+".png")
return #PlotCorrMatrix
#************************************************************************
# MAIN PROGRAM
#************************************************************************
# Set up arrays
if (timetype == 'monthly'):
REGts=np.empty((nsources,nREGs,nmons))
REGtsMSK=np.empty((nsources,nREGs,nmons))
DIFFts=np.empty((nsources-1,nREGs,nmons))
DIFFtsMSK=np.empty((nsources-1,nREGs,nmons))
moop='MON'
else:
REGts=np.empty((nsources,nREGs,nyrs))
REGtsMSK=np.empty((nsources,nREGs,nyrs))
DIFFts=np.empty((nsources-1,nREGs,nyrs))
DIFFtsMSK=np.empty((nsources-1,nREGs,nyrs))
moop='ANN'
REGts.fill(mdi)
REGtsMSK.fill(mdi)
DIFFts.fill(mdi)
DIFFtsMSK.fill(mdi)
if not(plotdiff):
ALLtrends=np.zeros((nsources,nREGs,3)) # row for each source, columns for trend, 5thpct, 95thpct
ALLcorrs=np.zeros((sum(range(nsources)),nREGs)) # row for each source, columns for trend, 5thpct, 95thpct
ALLtrendsMSK=np.zeros((nsources,nREGs,3)) # row for each source, columns for trend, 5thpct, 95thpct
ALLcorrsMSK=np.zeros((sum(range(nsources)),nREGs)) # row for each source, columns for trend, 5thpct, 95thpct
else:
ALLtrends=np.zeros((nsources-1,nREGs,3)) # row for each source, columns for trend, 5thpct, 95thpct
ALLcorrs=np.zeros((sum(range(nsources-1)),nREGs)) # row for each source, columns for trend, 5thpct, 95thpct
ALLtrendsMSK=np.zeros((nsources-1,nREGs,3)) # row for each source, columns for trend, 5thpct, 95thpct
ALLcorrsMSK=np.zeros((sum(range(nsources-1)),nREGs)) # row for each source, columns for trend, 5thpct, 95thpct
ALLtrends.fill(mdi)
ALLcorrs.fill(mdi)
ALLtrendsMSK.fill(mdi)
ALLcorrsMSK.fill(mdi)
# Can now establish the filename
plottee=OUTFIL+'_'+moop
if not(plotdiff):
plotteeC=OUTFILC+'_'+moop
# Loop through sources
for ns in range(nsources):
# set up tmparr for gridbox data
tmp=np.empty((nREGs,nmons))
tmp.fill(mdi)
tmpMSK=np.empty((nREGs,nmons))
tmpMSK.fill(mdi)
# Read in netCDF set of grids and extract gridbox
if (ns > 1):
TheRegions=RegionsOTH
else:
TheRegions=Regions
tmp=ExtractREG(infilee[ns],TheRegions,tmp,styr,edyr,nREGs,ns)
bads=np.where(tmp < -999.)[0]
tmp[bads]=mdi
tmpMSK=ExtractREG(infilee[ns+6],TheRegions,tmpMSK,styr,edyr,nREGs,ns+6)
bads=np.where(tmpMSK < -999.)[0]
tmpMSK[bads]=mdi
# Reanomalies to climatology period
tmp=CalcAnoms(tmp,stcl,edcl,mdi,nREGs) # should modify tmpGB
tmpMSK=CalcAnoms(tmpMSK,stcl,edcl,mdi,nREGs) # should modify tmpGB
# If timetype is annual then average to annual
if (timetype == 'annual'):
REGts[ns,:,:]=AverageAnnuals(tmp,mdi,nREGs)
REGtsMSK[ns,:,:]=AverageAnnuals(tmpMSK,mdi,nREGs)
else:
REGts[ns,:,:]=tmp
REGtsMSK[ns,:,:]=tmpMSK
if not(plotdiff):
# Fit a linear trend if desired
if (fitlin):
ALLtrends[ns,:,:]=FitTrend(REGts[ns,:,:],timetype,mdi,nREGs)
ALLtrendsMSK[ns,:,:]=FitTrend(REGtsMSK[ns,:,:],timetype,mdi,nREGs)
if not(plotdiff):
# Get correlations if desired (necesary for plotting correlation matrices)
if (addcor):
ALLcorrs=GetCorr(REGts,timetype,mdi,nREGs,nsources)
ALLcorrsMSK=GetCorr(REGtsMSK,timetype,mdi,nREGs,nsources)
# Pass all sources and trends and otherinfo to plotter
print('Plotting...')
PlotCorrMatrix(plotteeC,ALLcorrs,ALLcorrsMSK,Namey,ShortSourceNames)
PlotTimeSeries(plottee,REGts,ALLtrends,ALLcorrs,REGtsMSK,ALLtrendsMSK,ALLcorrsMSK,OtherInfo,unitees,nmons,nyrs,timetype,styr,edyr,mdi,nsources,Namey,ShortSourceNames,nREGs)
else:
# Now sort out difference arrays, trends and source names
nsources=nsources-1
NewShortSourceNames=ShortSourceNames[1:nsources+1]
for ns in range(nsources):
for nr in range(nREGs):
gots=np.where((REGts[0,nr,:] != mdi) & (REGts[ns+1,nr,:] != mdi))[0]
DIFFts[ns,nr,gots]=REGts[ns+1,nr,:]-REGts[0,nr,gots]
gots=np.where((REGtsMSK[0,nr,:] != mdi) & (REGtsMSK[ns+1,nr,:] != mdi))[0]
DIFFtsMSK[ns,nr,gots]=REGtsMSK[ns+1,nr,:]-REGtsMSK[0,nr,gots]
# Fit a linear trend if desired
if (fitlin):
ALLtrends[ns,:,:]=FitTrend(DIFFts[ns,:,:],timetype,mdi,nREGs)
ALLtrendsMSK[ns,:,:]=FitTrend(DIFFtsMSK[ns,:,:],timetype,mdi,nREGs)
PlotTimeSeries(plottee,DIFFts,ALLtrends,ALLcorrs,DIFFtsMSK,ALLtrendsMSK,ALLcorrsMSK,OtherInfo,unitees,nmons,nyrs,timetype,styr,edyr,mdi,nsources,Namey,NewShortSourceNames,nREGs)
stop()
print("And, we are done!")
| cc0-1.0 |
IndraVikas/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
kyoren/https-github.com-h2oai-h2o-3 | h2o-py/h2o/model/dim_reduction.py | 1 | 2653 | """
DimReduction Models
"""
from metrics_base import *
class H2ODimReductionModel(ModelBase):
def __init__(self, dest_key, model_json):
super(H2ODimReductionModel, self).__init__(dest_key, model_json,H2ODimReductionModelMetrics)
def num_iterations(self):
"""
Get the number of iterations that it took to converge or reach max iterations.
:return: number of iterations (integer)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('number_of_iterations')]
def objective(self):
"""
Get the final value of the objective function from the GLRM model.
:return: final objective value (double)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_objective_value')]
def final_step(self):
"""
Get the final step size from the GLRM model.
:return: final step size (double)
"""
o = self._model_json["output"]
return o["model_summary"].cell_values[0][o["model_summary"].col_header.index('final_step_size')]
def archetypes(self):
"""
:return: the archetypes (Y) of the GLRM model.
"""
o = self._model_json["output"]
yvals = o["archetypes"].cell_values
archetypes = []
for yidx, yval in enumerate(yvals):
archetypes.append(list(yvals[yidx])[1:])
return archetypes
def screeplot(self, type="barplot", **kwargs):
"""
Produce the scree plot
:param type: type of plot. "barplot" and "lines" currently supported
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
# check for matplotlib. exit if absent.
try:
imp.find_module('matplotlib')
import matplotlib
if 'server' in kwargs.keys() and kwargs['server']: matplotlib.use('Agg', warn=False)
import matplotlib.pyplot as plt
except ImportError:
print "matplotlib is required for this function!"
return
variances = [s**2 for s in self._model_json['output']['importance'].cell_values[0][1:]]
plt.xlabel('Components')
plt.ylabel('Variances')
plt.title('Scree Plot')
plt.xticks(range(1,len(variances)+1))
if type == "barplot": plt.bar(range(1,len(variances)+1), variances)
elif type == "lines": plt.plot(range(1,len(variances)+1), variances, 'b--')
if not ('server' in kwargs.keys() and kwargs['server']): plt.show()
| apache-2.0 |
zihua/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <[email protected]
# Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
mjudsp/Tsallis | examples/linear_model/plot_ard.py | 29 | 2828 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weights with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, color='darkblue', linestyle='-', linewidth=2,
label="ARD estimate")
plt.plot(ols.coef_, color='yellowgreen', linestyle=':', linewidth=2,
label="OLS estimate")
plt.plot(w, color='orange', linestyle='-', linewidth=2, label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, color='navy', log=True)
plt.scatter(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
color='gold', marker='o', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_, color='navy', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
szbokhar/AmethystBaloon | util.py | 1 | 5528 | import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial
import scipy.ndimage.filters
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
from pprint import pprint
from numpy.random import choice, rand, randint
from RL_Config import *
def center_points(pts):
dx = np.sum(pts, 0)/pts.shape[0]
pts = pts - dx
return (pts, dx)
def smooth_path(path, alpha, beta):
spath = np.zeros(path.shape)
spath[0, :] = path[0, :]
v = np.array([0.0, 0.0, 0.0])
for i in range(1, len(path)):
spath[i, :] = spath[i-1, :] + v
r = path[i, :] - spath[i, :]
spath[i, :] = spath[i, :] + alpha * r
v = v + beta * r
return spath
def block_path(path, block_size):
bpath = np.round(path/block_size, 0).astype(int)
return bpath
def make_voxel_grid(pts, colors, block_size, person, paths=None, alpha=0.2, beta=0.1):
grid_coords = np.round(pts/block_size, 0).astype(int)
mx = np.min(grid_coords[:,0], 0)
mz = np.min(grid_coords[:,1], 0)
my = np.min(grid_coords[:,2], 0)
grid_coords[:,0] = grid_coords[:,0] - mx
grid_coords[:,1] = grid_coords[:,1] - mz
grid_coords[:,2] = grid_coords[:,2] - my
if paths is not None:
allpts = np.zeros((0,3))
for i in range(len(paths)):
paths[i].smooth_points = smooth_path(paths[i].points, alpha, beta)
paths[i].block_points = block_path(paths[i].smooth_points, block_size)
paths[i].points = np.copy(paths[i].block_points)
paths[i].points[:,0] = paths[i].points[:,0] - mx
paths[i].points[:,1] = paths[i].points[:,1] - mz
paths[i].points[:,2] = paths[i].points[:,2] - my
allpts = np.concatenate((allpts, paths[i].raw_points), axis=0)
t1 = np.mean(allpts, axis=0) - person[0]/5
t2 = t1 + person[0]
b1 = np.round(t1/block_size, 0) - np.array([mx, mz, my])
b2 = np.round(t2/block_size, 0) - np.array([mx, mz, my])
low = int(b1[1])
high = int(b2[1])
mx = np.max(grid_coords[:,0], 0)
mz = np.max(grid_coords[:,1], 0)
my = np.max(grid_coords[:,2], 0)
grid = np.zeros((mx+1, mz+1, my+1))
for i in range(grid_coords.shape[0]):
p = grid_coords[i,:]
grid[p[0], p[1], p[2]] += 1
grid = np.log(grid+1)
grid = scipy.ndimage.filters.gaussian_filter(grid, 0.5/block_size)
return (grid, list(range(low, high+1)), b2-b1)
def do_qlearn(rl_config, num_iter, rand_count):
sars = rl_config.total_SARSA_list
qshape = rl_config.voxel_grid.shape
alpha = rl_config.alpha
gamma = rl_config.gamma
state_size = len(rl_config.rl_state_ids.keys())
print(rl_config.total_SARSA_list)
Q = np.zeros(rl_config.q_shape)
vals = []
for t in range(num_iter):
#print('-----', t)
idx = choice(sars.shape[0], rand_count, replace=True)
S = sars[idx,:]
for i in range(len(S)):
s = S[i, 0:state_size].astype(int)
act = S[i, state_size]
ns = S[i, (state_size+2):(2*state_size+2)].astype(int)
st = tuple(s.tolist() + [act])
nst = tuple(ns.tolist() + [[x for x in range(Q.shape[-1])]])
R = S[i, state_size+1]
Q[st] = Q[st] + alpha*(R + gamma*np.max(Q[nst]) - Q[st])
if t % 10 == 0:
vals.append(np.sum(Q))
return (Q, vals)
def do_explore_qlearn(rl_config, num_iter=2000, rand_count=500, reset_episode=100, memory=10000):
memcount = len(rl_config.total_SARSA_list)
qshape = rl_config.voxel_grid.shape
alpha = rl_config.alpha
gamma = rl_config.gamma
epsilon = rl_config.epsilon
state_size = len(rl_config.rl_state_ids.keys())
rid2rl_actions = rl_config.rl_actions
rl_actions2rid = {v: k for k, v in rid2rl_actions.items()}
Q = np.zeros(rl_config.q_shape)
umap = np.zeros(rl_config.q_shape)
sars = np.zeros((memory, rl_config.total_SARSA_list.shape[1]))
sars[0:memcount,:] = np.copy(rl_config.total_SARSA_list)
vals = []
e_state = np.zeros(state_size)
next_e_state = np.zeros(state_size)
e_length = 0
for t in range(num_iter):
if e_length % reset_episode == 0:
e_state = rl_config.get_random_state(rl_config)
print("Step:", t)
(next_e_state, new_sarsa_state, isFinished) = rl_config.explore_step(rl_config, Q, e_state, epsilon=epsilon)
if memcount >= sars.shape[0]:
ridx = randint(sars.shape[0])
sars[ridx] = new_sarsa_state
else:
sars[memcount] = new_sarsa_state
e_state = np.copy(next_e_state)
if isFinished:
e_length = 0
else:
e_length += 1
#print(new_sarsa_state)
#print('-----', t)
idx = choice(min(sars.shape[0], memcount), rand_count, replace=True)
S = sars[idx,:]
for i in range(len(S)):
s = S[i, 0:state_size].astype(int)
act = S[i, state_size]
ns = S[i, (state_size+2):(2*state_size+2)].astype(int)
st = tuple(s.tolist() + [act])
nst = tuple(ns.tolist() + [[x for x in range(Q.shape[-1])]])
R = S[i, state_size+1]
Q[st] = Q[st] + alpha*(R + gamma*np.max(Q[nst]) - Q[st])
umap[st] += 1
if t % 50 == 0:
# print(vals[-1] if len(vals) > 1 else None)
vals.append(np.sum(Q))
memcount = memcount+1 if memcount < sars.shape[0] else memcount
return (Q, vals, umap)
| mit |
aaltay/beam | sdks/python/apache_beam/io/parquetio_test.py | 1 | 18619 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import json
import logging
import os
import shutil
import tempfile
import unittest
import hamcrest as hc
import pandas
import pytest
from parameterized import param
from parameterized import parameterized
from apache_beam import Create
from apache_beam import Map
from apache_beam.io import filebasedsource
from apache_beam.io import source_test_utils
from apache_beam.io.iobase import RangeTracker
from apache_beam.io.parquetio import ReadAllFromParquet
from apache_beam.io.parquetio import ReadAllFromParquetBatched
from apache_beam.io.parquetio import ReadFromParquet
from apache_beam.io.parquetio import ReadFromParquetBatched
from apache_beam.io.parquetio import WriteToParquet
from apache_beam.io.parquetio import _create_parquet_sink
from apache_beam.io.parquetio import _create_parquet_source
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
from apache_beam.transforms.display import DisplayData
from apache_beam.transforms.display_test import DisplayDataItemMatcher
# TODO(BEAM-8371): Use tempfile.TemporaryDirectory.
from apache_beam.utils.subprocess_server_test import TemporaryDirectory
try:
import pyarrow as pa
import pyarrow.lib as pl
import pyarrow.parquet as pq
except ImportError:
pa = None
pl = None
pq = None
@unittest.skipIf(pa is None, "PyArrow is not installed.")
@pytest.mark.uses_pyarrow
class TestParquet(unittest.TestCase):
def setUp(self):
# Reducing the size of thread pools. Without this test execution may fail in
# environments with limited amount of resources.
filebasedsource.MAX_NUM_THREADS_FOR_SIZE_ESTIMATION = 2
self.temp_dir = tempfile.mkdtemp()
self.RECORDS = [{
'name': 'Thomas', 'favorite_number': 1, 'favorite_color': 'blue'
},
{
'name': 'Henry',
'favorite_number': 3,
'favorite_color': 'green'
},
{
'name': 'Toby',
'favorite_number': 7,
'favorite_color': 'brown'
},
{
'name': 'Gordon',
'favorite_number': 4,
'favorite_color': 'blue'
},
{
'name': 'Emily',
'favorite_number': -1,
'favorite_color': 'Red'
},
{
'name': 'Percy',
'favorite_number': 6,
'favorite_color': 'Green'
}]
self.SCHEMA = pa.schema([('name', pa.string()),
('favorite_number', pa.int64()),
('favorite_color', pa.string())])
self.SCHEMA96 = pa.schema([('name', pa.string()),
('favorite_number', pa.timestamp('ns')),
('favorite_color', pa.string())])
def tearDown(self):
shutil.rmtree(self.temp_dir)
def _record_to_columns(self, records, schema):
col_list = []
for n in schema.names:
column = []
for r in records:
column.append(r[n])
col_list.append(column)
return col_list
def _records_as_arrow(self, schema=None, count=None):
if schema is None:
schema = self.SCHEMA
if count is None:
count = len(self.RECORDS)
len_records = len(self.RECORDS)
data = []
for i in range(count):
data.append(self.RECORDS[i % len_records])
col_data = self._record_to_columns(data, schema)
col_array = [pa.array(c, schema.types[cn]) for cn, c in enumerate(col_data)]
return pa.Table.from_arrays(col_array, schema.names)
def _write_data(
self,
directory=None,
schema=None,
prefix=tempfile.template,
row_group_size=1000,
codec='none',
count=None):
if directory is None:
directory = self.temp_dir
with tempfile.NamedTemporaryFile(delete=False, dir=directory,
prefix=prefix) as f:
table = self._records_as_arrow(schema, count)
pq.write_table(
table,
f,
row_group_size=row_group_size,
compression=codec,
use_deprecated_int96_timestamps=True)
return f.name
def _write_pattern(self, num_files):
assert num_files > 0
temp_dir = tempfile.mkdtemp(dir=self.temp_dir)
for _ in range(num_files):
self._write_data(directory=temp_dir, prefix='mytemp')
return temp_dir + os.path.sep + 'mytemp*'
def _run_parquet_test(
self,
pattern,
columns,
desired_bundle_size,
perform_splitting,
expected_result):
source = _create_parquet_source(pattern, columns=columns)
if perform_splitting:
assert desired_bundle_size
sources_info = [
(split.source, split.start_position, split.stop_position)
for split in source.split(desired_bundle_size=desired_bundle_size)
]
if len(sources_info) < 2:
raise ValueError(
'Test is trivial. Please adjust it so that at least '
'two splits get generated')
source_test_utils.assert_sources_equal_reference_source(
(source, None, None), sources_info)
else:
read_records = source_test_utils.read_from_source(source, None, None)
self.assertCountEqual(expected_result, read_records)
def test_read_without_splitting(self):
file_name = self._write_data()
expected_result = [self._records_as_arrow()]
self._run_parquet_test(file_name, None, None, False, expected_result)
def test_read_with_splitting(self):
file_name = self._write_data()
expected_result = [self._records_as_arrow()]
self._run_parquet_test(file_name, None, 100, True, expected_result)
def test_source_display_data(self):
file_name = 'some_parquet_source'
source = \
_create_parquet_source(
file_name,
validate=False
)
dd = DisplayData.create_from(source)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_read_display_data(self):
file_name = 'some_parquet_source'
read = \
ReadFromParquet(
file_name,
validate=False)
read_batched = \
ReadFromParquetBatched(
file_name,
validate=False)
expected_items = [
DisplayDataItemMatcher('compression', 'auto'),
DisplayDataItemMatcher('file_pattern', file_name)
]
hc.assert_that(
DisplayData.create_from(read).items,
hc.contains_inanyorder(*expected_items))
hc.assert_that(
DisplayData.create_from(read_batched).items,
hc.contains_inanyorder(*expected_items))
def test_sink_display_data(self):
file_name = 'some_parquet_sink'
sink = _create_parquet_sink(
file_name,
self.SCHEMA,
'none',
1024 * 1024,
1000,
False,
'.end',
0,
None,
'application/x-parquet')
dd = DisplayData.create_from(sink)
expected_items = [
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher(
'file_pattern',
'some_parquet_sink-%(shard_num)05d-of-%(num_shards)05d.end'),
DisplayDataItemMatcher('codec', 'none'),
DisplayDataItemMatcher('row_group_buffer_size', str(1024 * 1024)),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_write_display_data(self):
file_name = 'some_parquet_sink'
write = WriteToParquet(file_name, self.SCHEMA)
dd = DisplayData.create_from(write)
expected_items = [
DisplayDataItemMatcher('codec', 'none'),
DisplayDataItemMatcher('schema', str(self.SCHEMA)),
DisplayDataItemMatcher('row_group_buffer_size', str(64 * 1024 * 1024)),
DisplayDataItemMatcher(
'file_pattern',
'some_parquet_sink-%(shard_num)05d-of-%(num_shards)05d'),
DisplayDataItemMatcher('compression', 'uncompressed')
]
hc.assert_that(dd.items, hc.contains_inanyorder(*expected_items))
def test_sink_transform_int96(self):
with tempfile.NamedTemporaryFile() as dst:
path = dst.name
# pylint: disable=c-extension-no-member
with self.assertRaises(pl.ArrowInvalid):
# Should throw an error "ArrowInvalid: Casting from timestamp[ns] to
# timestamp[us] would lose data"
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS) \
| WriteToParquet(
path, self.SCHEMA96, num_shards=1, shard_name_template='')
def test_sink_transform(self):
with TemporaryDirectory() as tmp_dirname:
path = os.path.join(tmp_dirname + "tmp_filename")
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS) \
| WriteToParquet(
path, self.SCHEMA, num_shards=1, shard_name_template='')
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| ReadFromParquet(path) \
| Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
def test_batched_read(self):
with TemporaryDirectory() as tmp_dirname:
path = os.path.join(tmp_dirname + "tmp_filename")
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS, reshuffle=False) \
| WriteToParquet(
path, self.SCHEMA, num_shards=1, shard_name_template='')
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| ReadFromParquetBatched(path)
assert_that(readback, equal_to([self._records_as_arrow()]))
@parameterized.expand([
param(compression_type='snappy'),
param(compression_type='gzip'),
param(compression_type='brotli'),
param(compression_type='lz4'),
param(compression_type='zstd')
])
def test_sink_transform_compressed(self, compression_type):
if compression_type == 'lz4' and int(pa.__version__.split('.')[0]) == 1:
return unittest.skip(
"Writing with LZ4 compression is not supported in "
"pyarrow 1.x")
with TemporaryDirectory() as tmp_dirname:
path = os.path.join(tmp_dirname + "tmp_filename")
with TestPipeline() as p:
_ = p \
| Create(self.RECORDS) \
| WriteToParquet(
path, self.SCHEMA, codec=compression_type,
num_shards=1, shard_name_template='')
with TestPipeline() as p:
# json used for stable sortability
readback = \
p \
| ReadFromParquet(path + '*') \
| Map(json.dumps)
assert_that(readback, equal_to([json.dumps(r) for r in self.RECORDS]))
def test_read_reentrant(self):
file_name = self._write_data(count=6, row_group_size=3)
source = _create_parquet_source(file_name)
source_test_utils.assert_reentrant_reads_succeed((source, None, None))
def test_read_without_splitting_multiple_row_group(self):
file_name = self._write_data(count=12000, row_group_size=1000)
# We expect 12000 elements, split into batches of 1000 elements. Create
# a list of pa.Table instances to model this expecation
expected_result = [
pa.Table.from_batches([batch]) for batch in self._records_as_arrow(
count=12000).to_batches(chunksize=1000)
]
self._run_parquet_test(file_name, None, None, False, expected_result)
def test_read_with_splitting_multiple_row_group(self):
file_name = self._write_data(count=12000, row_group_size=1000)
# We expect 12000 elements, split into batches of 1000 elements. Create
# a list of pa.Table instances to model this expecation
expected_result = [
pa.Table.from_batches([batch]) for batch in self._records_as_arrow(
count=12000).to_batches(chunksize=1000)
]
self._run_parquet_test(file_name, None, 10000, True, expected_result)
def test_dynamic_work_rebalancing(self):
file_name = self._write_data(count=120, row_group_size=20)
source = _create_parquet_source(file_name)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
source_test_utils.assert_split_at_fraction_exhaustive(
splits[0].source, splits[0].start_position, splits[0].stop_position)
def test_min_bundle_size(self):
file_name = self._write_data(count=120, row_group_size=20)
source = _create_parquet_source(
file_name, min_bundle_size=100 * 1024 * 1024)
splits = [split for split in source.split(desired_bundle_size=1)]
self.assertEqual(len(splits), 1)
source = _create_parquet_source(file_name, min_bundle_size=0)
splits = [split for split in source.split(desired_bundle_size=1)]
self.assertNotEqual(len(splits), 1)
def _convert_to_timestamped_record(self, record):
timestamped_record = record.copy()
timestamped_record['favorite_number'] =\
pandas.Timestamp(timestamped_record['favorite_number'])
return timestamped_record
def test_int96_type_conversion(self):
file_name = self._write_data(
count=120, row_group_size=20, schema=self.SCHEMA96)
orig = self._records_as_arrow(count=120, schema=self.SCHEMA96)
expected_result = [
pa.Table.from_batches([batch])
for batch in orig.to_batches(chunksize=20)
]
self._run_parquet_test(file_name, None, None, False, expected_result)
def test_split_points(self):
file_name = self._write_data(count=12000, row_group_size=3000)
source = _create_parquet_source(file_name)
splits = [split for split in source.split(desired_bundle_size=float('inf'))]
assert len(splits) == 1
range_tracker = splits[0].source.get_range_tracker(
splits[0].start_position, splits[0].stop_position)
split_points_report = []
for _ in splits[0].source.read(range_tracker):
split_points_report.append(range_tracker.split_points())
# There are a total of four row groups. Each row group has 3000 records.
# When reading records of the first group, range_tracker.split_points()
# should return (0, iobase.RangeTracker.SPLIT_POINTS_UNKNOWN)
self.assertEqual(
split_points_report,
[
(0, RangeTracker.SPLIT_POINTS_UNKNOWN),
(1, RangeTracker.SPLIT_POINTS_UNKNOWN),
(2, RangeTracker.SPLIT_POINTS_UNKNOWN),
(3, 1),
])
def test_selective_columns(self):
file_name = self._write_data()
orig = self._records_as_arrow()
expected_result = [
pa.Table.from_arrays([orig.column('name')], names=['name'])
]
self._run_parquet_test(file_name, ['name'], None, False, expected_result)
def test_sink_transform_multiple_row_group(self):
with TemporaryDirectory() as tmp_dirname:
path = os.path.join(tmp_dirname + "tmp_filename")
with TestPipeline() as p:
# writing 623200 bytes of data
_ = p \
| Create(self.RECORDS * 4000) \
| WriteToParquet(
path, self.SCHEMA, num_shards=1, codec='none',
shard_name_template='', row_group_buffer_size=250000)
self.assertEqual(pq.read_metadata(path).num_row_groups, 3)
def test_read_all_from_parquet_single_file(self):
path = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS))
with TestPipeline() as p:
assert_that(
p \
| Create([path]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()]))
def test_read_all_from_parquet_many_single_files(self):
path1 = self._write_data()
path2 = self._write_data()
path3 = self._write_data()
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS * 3))
with TestPipeline() as p:
assert_that(
p \
| Create([path1, path2, path3]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()] * 3))
def test_read_all_from_parquet_file_pattern(self):
file_pattern = self._write_pattern(5)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS * 5))
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()] * 5))
def test_read_all_from_parquet_many_file_patterns(self):
file_pattern1 = self._write_pattern(5)
file_pattern2 = self._write_pattern(2)
file_pattern3 = self._write_pattern(3)
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| ReadAllFromParquet(),
equal_to(self.RECORDS * 10))
with TestPipeline() as p:
assert_that(
p \
| Create([file_pattern1, file_pattern2, file_pattern3]) \
| ReadAllFromParquetBatched(),
equal_to([self._records_as_arrow()] * 10))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.